repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
mtsolmn/lantz-drivers
lantz/drivers/sacher/Sacher_EPOS.py
f48caf9000ddd08f2abb837d832e341410af4788
# sacher_epos.py, python wrapper for sacher epos motor # David Christle <[email protected]>, August 2014 # """ Possbily Maxon EPOS now """ """ This is the actual version that works But only in the lab32 virtual environment """ # from instrument import Instrument # import qt import ctypes import ctypes.wintypes import logging import time # from instrument import Instrument from ctypes.wintypes import DWORD, WORD import numpy as np """ okay so we import a bunch of random stuff I always forget what ctypes is for but I'll worry about it later """ # from subprocess import Popen, PIPE # from multiprocessing.managers import BaseManager # import atexit # import os # python32_dir = "C:\\Users\\Alex\\Miniconda3\\envs\\lab32" # assert os.path.isdir(python32_dir) # os.chdir(python32_dir) # derp = "C:\\Users\\Alex\\Documents\\wow_such_code" # assert os.path.isdir(derp) # os.chdir(derp) # p = Popen([python32_dir + "\\python.exe", derp + "\\delegate.py"], stdout=PIPE, cwd=derp) # atexit.register(p.terminate) # port = int(p.stdout.readline()) # authkey = p.stdout.read() # print(port, authkey) # m = BaseManager(address=("localhost", port), authkey=authkey) # m.connect() # tell manager to expect an attribute called LibC # m.register("SacherLasaTeknique") # access and use libc # libc = m.SacherLasaTeknique() # print(libc.vcs()) # eposlib = ctypes.windll.eposcmd eposlib = ctypes.windll.LoadLibrary('C:\\Users\\Carbro\\Desktop\\Charmander\\EposCmd.dll') DeviceName = b'EPOS' ProtocolStackName = b'MAXON_RS232' InterfaceName = b'RS232' """ Max on Max off but anyway it looks like ctypes is the thing that's talking to the epos dll """ HISTCHAN = 65536 TTREADMAX = 131072 RANGES = 8 MODE_HIST = 0 MODE_T2 = 2 MODE_T3 = 3 FLAG_OVERFLOW = 0x0040 FLAG_FIFOFULL = 0x0003 # in mV ZCMIN = 0 ZCMAX = 20 DISCRMIN = 0 DISCRMAX = 800 # in ps OFFSETMIN = 0 OFFSETMAX = 1000000000 # in ms ACQTMIN = 1 ACQTMAX = 10 * 60 * 60 * 1000 # in mV PHR800LVMIN = -1600 PHR800LVMAX = 2400 """ wooooooo a bunch a variables and none of them are explained way to go dc you da real champ """ class Sacher_EPOS(): """ ok before I dive into this giant Sacher class thing let me just list here all the functions that are being defined in this class: check(self) before wreck(self) ok but actually: __init__(self, name, address, reset=False) __del__(self) get_bit(self, byteval,idx) _u32todouble(self, uinput) open(self) close(self) get_offset(self) fine_tuning_steps(self, steps) set_new_offset(self, new_offset) get_motor_position(self) set_target_position(self, target, absolute, immediately) do_get_wavelength(self) do_set_wavelength(self, wavelength) is_open(self) clear_fault(self) initialize(self) The last one is really long And also damn there are 16 of them I'll comment about them as I go through them """ def __init__(self, name, address, reset=False): # Instrument.__init__(self, name, tags=['physical']) # self._port_name = str(address) self._port_name = address self._is_open = False self._HPM = True # self.add_parameter('wavelength', # flags = Instrument.FLAG_GETSET, # type = types.FloatType, # units = 'nm', # minval=1070.0,maxval=1180.0) # self.add_function('open') # self.add_function('close') # self.add_function('fine_tuning_steps') # self.add_function('get_motor_position') # self.add_function('set_target_position') # try: self.open() self.initialize() # except: # logging.error('Error loading Sacher EPOS motor. In use?') """ I mean to me this really seems like the initialize function so I wonder what initialize(self) is doing At any rate there doesn't seem to be a lot going on here """ def __del__(self): # execute disconnect self.close() return """ this might be the only self explanatory one it disconnects """ @staticmethod def get_bit(byteval, idx): # def get_bit(self, byteval,idx): return ((byteval & (1 << idx)) != 0) """ you get the bits, and then you use them but honestly I don't really get what this is doing sudo git a_clue """ @staticmethod def _u32todouble(uinput): # def _u32todouble(self, uinput): # this function implements the really weird/non-standard U32 to # floating point conversion in the sacher VIs # get sign of number sign = Sacher_EPOS.get_bit(uinput, 31) if sign == False: mantissa_sign = 1 elif sign == True: mantissa_sign = -1 exp_mask = 0b111111 # print 'uin u is %d' % uinput # print 'type uin %s' % type(uinput) # print 'binary input is %s' % bin(long(uinput)) # get sign of exponent if Sacher_EPOS.get_bit(uinput, 7) == False: exp_sign = 1 elif Sacher_EPOS.get_bit(uinput, 7) == True: exp_sign = -1 # print 'exp extract %s' % bin(int(uinput & exp_mask)) # print 'exp conv %s' % (exp_sign*int(uinput & exp_mask)) # print 'sign of exponent %s' % self.get_bit(uinput,7) # print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000)) mantissa_mask = 0b01111111111111111111111100000000 # mantissa_mask = 0b0111111111111111111111110000000 # print 'mantissa extract is %s' % bin((uinput & mantissa_mask) >> 8) mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8) # print 'mantissa is %.12f' % mantissa # print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask) output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask))) # print 'output is %s' % output return output """ ok dc gave some slight explanations here Apparently there's a "really weird/non-standard U32 to floating point conversion in the sacher VIs" It'd be gr8 if I knew what U32's were unsigned 32 bit something something? ah whatever I'll have to worry about this later """ @staticmethod def _doubletou32(dinput): mantissa_bit = 0 if int(dinput / abs(dinput)) > 0 else 1 exp_bit = 1 if -1 < dinput < 1 else 0 b = np.ceil(np.log10(abs(dinput))) a = dinput / 10 ** b if dinput < 0: a = -a # print('a:\t{}\tb:\t{}'.format(a, b)) d = np.log2(10) * b d_ = np.ceil(d) c = a * 2 ** (d - d_) # print('c:\t{}\td_:{}\toriginal:\t{}'.format(c, d_, c * 2 ** d_)) return (int(mantissa_bit) << 31) + (int(c * 1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_)) def open(self): eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.POINTER(DWORD)] eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.HANDLE() # print 'types are all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf)) ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf) self._keyhandle = ret # print 'keyhandle is %s' % self._keyhandle # print 'open device ret %s' % buf # print 'printing' # print buf.contents.value # print 'done printer' if int(buf.contents.value) >= 0: self._is_open = True self._keyhandle = ret return """ I have absolutely no idea what the hell this is doing Considering that close(self) is apparently closing the EPOS motor, maybe this is opening it """ def close(self): print('closing EPOS motor.') eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)] eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL buf = ctypes.pointer(DWORD(0)) ret = ctypes.wintypes.BOOL() ret = eposlib.VCS_CloseDevice(self._keyhandle, buf) # print 'close device returned %s' % buf if int(buf.contents.value) >= 0: self._is_open = False else: logging.error(__name__ + ' did not close Sacher EPOS motor correctly.') return """ Apparently this closes the EPOS motor I don't know what "opening" and "closing" the motor means though and yeah also these random variables don't make any sense to me """ def get_motor_current(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL motorCurrent = ctypes.c_uint8(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf)) return motorCurrent.value """ Not sure what this is doing yet """ def find_home(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf)) print('Homing: {}'.format(ret)) return ret """ Not sure what this is doing yet """ def restore(self): nodeID = ctypes.wintypes.WORD(0) eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf)) print('Restore: {}'.format(ret)) return ret """ Not sure what this is doing yet """ def get_offset(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I got from the LabVIEW program -- I don't think # any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32)) if ret == 0: logging.error(__name__ + ' Could not read stored position from Sacher EPOS motor') return CastedObjectData[0] """ Not sure what this is doing yet """ def fine_tuning_steps(self, steps): current_motor_pos = self.get_motor_position() self._offset = self.get_offset() self.set_target_position(steps, False, True) new_motor_pos = self.get_motor_position() # print('New motor position is %s' % new_motor_pos) # print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) """ Not sure what this is doing yet """ def set_new_offset(self, new_offset): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset' StoredPositionObject = ctypes.wintypes.WORD(8321) StoredPositionObjectSubindex = ctypes.c_uint8(0) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) if ret == 0: logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor') return """ Not sure what this is doing yet """ def set_coeffs(self, a, b, c, min_wl, max_wl): print('') print("setting coefficients...") nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL # print 'setting new offset' d = (min_wl << 16) + max_wl StoredPositionObject = ctypes.wintypes.WORD(8204) for subidx, coeff in enumerate([a, b, c]): print(subidx, coeff) StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff)) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4) ObjectDataArray = (ctypes.c_uint32 * 1)(d) ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten, ctypes.byref(buf)) print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) if ret == 0: logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor') return """ Not sure what this is doing yet """ def get_motor_position(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) pPosition = ctypes.pointer(ctypes.c_long()) eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf)) # print 'get motor position ret %s' % ret # print 'get motor position buf %s' % buf.value # print 'get motor position value %s' % pPosition.contents.value return pPosition.contents.value # print('getting motor position...') # print(ret) # return print(pPosition.contents.value) """ Not sure what this is doing yet """ def set_target_position(self, target, absolute, immediately): # print('check #1') nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # First, set enabled state # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) # print('#5 Motor current: {}'.format(self.get_motor_current())) ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('Enable state ret %s buf %s' % (ret, buf.value)) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) # print('#6 Motor current: {}'.format(self.get_motor_current())) pTarget = ctypes.c_long(target) pAbsolute = ctypes.wintypes.BOOL(absolute) pImmediately = ctypes.wintypes.BOOL(immediately) eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long, ctypes.wintypes.BOOL, ctypes.wintypes.BOOL, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL # print('check #2') # print('About to set motor position') # print('Current motor position is %d' % (self.get_motor_position())) ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf)) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('#7 Motor current: {}'.format(self.get_motor_current())) # print('set motor position ret %s' % ret) # print('set motor position buf %s' % buf.value) steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now nchecks = 0 # print('check #3') while nchecks < 1000: # get the movement state. a movement state of 1 indicates the motor # is done moving # print('') # print('check #4') # print('Motor current: {}'.format(self.get_motor_current())) print('Motor position: {}'.format(self.get_motor_position())) # print('Motor offset: {}'.format(self.get_offset())) self._offset = self.get_offset() # print('Motor offset is %s' % self._offset) pMovementState = ctypes.pointer(ctypes.wintypes.BOOL()) # print(pMovementState.contents.value) eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.BOOL), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL # print('Getting movement state') ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf)) # print('set motor position ret %s' % ret) # print('set motor position buf %s' % buf.value) # print('Movement state is %s' % pMovementState.contents.value) if pMovementState.contents.value == 1: break nchecks = nchecks + 1 # print('Current motor position is %d' % self.get_motor_position()) # print('check #5') # print(nchecks) # print('') time.sleep(0.01) # Now set disabled state ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) # print('check #6') # print('Disable state ret %s buf %s' % (ret, buf.value)) # print('Final motor position is %d' % (self.get_motor_position())) # print('check #7') return ret """ Not sure what this is doing yet """ def fuck_my_life(self, wavelength): print('goddamn this piece of shit') print('') print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step 1: Get the actual motor position # print('Getting motor position') current_motor_pos = self.get_motor_position() # Step 2: Get the motor offset self._offset = self.get_offset() # print('Motor offset is %s' % self._offset) # Step 3: Convert the desired wavelength into a position # Check sign of position-to-wavelength pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC # logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction') # If that's OK, use the quadratic formula to calculate the roots b2a = -1.0 * self._doubleB / (2.0 * self._doubleA) sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA # print('wut da fuuuu') # print(b2a) # print(sqrtarg) # print(pos0) # print(pos5000) if sqrtarg < 0.0: logging.error(__name__ + ' Negative value under square root sign -- something is wrong') if pos0 > pos5000: # Take the + square root solution x = b2a - np.sqrt(sqrtarg) elif pos0 < pos5000: x = b2a + np.sqrt(sqrtarg) print(b2a) print(np.sqrt(sqrtarg)) # print('Position is %s' % x) wavelength_to_pos = int(round(x)) # Step 4: Calculate difference between the output position and the stored offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) print('wavelength_to_pos: {}'.format(wavelength_to_pos)) print('diff_wavelength_offset: {}'.format(diff_wavelength_offset)) print('self._offset: {}'.format(int(self._offset))) """ Not sure what this is doing yet """ def do_get_wavelength(self): self._offset = self.get_offset() # self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC self._currentwl = self._doubleA * ( self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) return self._currentwl """ Not sure what this is doing yet """ def do_set_wavelength(self, wavelength): print('setting wavelength...') print('') # print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC)) # print('#3 Motor current: {}'.format(self.get_motor_current())) nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) # Step 1: Get the actual motor position # print('Getting motor position') current_motor_pos = self.get_motor_position() # Step 2: Get the motor offset self._offset = self.get_offset() # print('Motor offset is %s' % self._offset) # Step 3: Convert the desired wavelength into a position # Check sign of position-to-wavelength pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC # logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction') # If that's OK, use the quadratic formula to calculate the roots b2a = -1.0 * self._doubleB / (2.0 * self._doubleA) sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA # print('wut da fuuuu') # print(b2a) # print(sqrtarg) # print(pos0) # print(pos5000) if sqrtarg < 0.0: logging.error(__name__ + ' Negative value under square root sign -- something is wrong') if pos0 > pos5000: # Take the + square root solution x = b2a - np.sqrt(sqrtarg) elif pos0 < pos5000: x = b2a + np.sqrt(sqrtarg) # x is what the motor position should be # print('Position is %s' % x) wavelength_to_pos = int(round(x)) # Step 4: Calculate difference between the output position and the stored offset # print('Step 4...') diff_wavelength_offset = wavelength_to_pos - int(self._offset) # print('Diff wavelength offset %s' % diff_wavelength_offset) # Step 5: If HPM is activated and the wavelength position is lower, overshoot # the movement by 10,000 steps # print('Step 5...') # print('#4 Motor current: {}'.format(self.get_motor_current())) if 1 == 2: print('uh-oh') # if self._HPM and diff_wavelength_offset < 0: # # print('Overshooting by 10000') # # self.set_target_position(diff_wavelength_offset - 10000, False, True) # # Step 6: Set the real target position # # """ # HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING! # """ # # #print('Step 6a... diff wavelength') # # self.set_target_position(10000, False, True) else: # print('Step 6b... diff wavelength') # self.set_target_position(diff_wavelength_offset, False, True) """WRONG""" self.set_target_position(wavelength_to_pos, True, True) """this is the real shit right here I need to set the absolute position to true """ # self.set_target_position(10000, False, True) # Step 7: Get the actual motor position new_motor_pos = self.get_motor_position() # print('New motor position is %s' % new_motor_pos) # print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)) self.set_new_offset(new_motor_pos - current_motor_pos + self._offset) # Step 8, get and print current wavelength # print('Current wavelength is %.3f' % self.do_get_wavelength()) # print('setting wavelength done') return """ Not sure what this is doing yet """ def is_open(self): return self._is_open """ Not sure what this is doing yet """ def clear_fault(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) print('clear fault buf %s, ret %s' % (buf, ret)) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) """ Not sure what this is doing yet """ def initialize(self): nodeID = ctypes.wintypes.WORD(0) buf = ctypes.wintypes.DWORD(0) BaudRate = DWORD(38400) Timeout = DWORD(100) ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf)) # print 'set protocol buf %s ret %s' % (buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) # eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf)) # print 'clear fault buf %s, ret %s' % (buf, ret) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) buf = ctypes.wintypes.DWORD(0) plsenabled = ctypes.wintypes.DWORD(0) ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf)) # print 'get enable state buf %s ret %s and en %s' % (buf, ret, plsenabled) if ret == 0: errbuf = ctypes.create_string_buffer(64) eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64)) raise ValueError(errbuf.value) if int(plsenabled.value) != 0: logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.') ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf)) if int(ret) != 0: logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding') else: logging.error(__name__ + ' EPOS motor was not successfully disabled!') buf = ctypes.wintypes.DWORD(0) Counts = WORD(512) # incremental encoder counts in pulses per turn PositionSensorType = WORD(4) ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf)) ## if ret == int(0): ## print 'errr' ## errbuf = ctypes.create_string_buffer(64) ## print 'sending' ## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL ## print 'boolerrorinfo' ## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD] ## print 'arg' ## ## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64)) ## print 'err' ## raise ValueError(errbuf.value) # For some reason, it appears normal in the LabVIEW code that this # function actually returns an error, i.e. the return value is zero # and the buffer has a non-zero error code in it; the LabVIEW code # doesn't check it. # Also, it appears that in the 2005 version of this DLL, the function # VCS_GetErrorInfo doesn't exist! # Get operation mode, check if it's 1 -- this is "profile position mode" buf = ctypes.wintypes.DWORD(0) pMode = ctypes.pointer(ctypes.c_int8()) eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf)) # if mode is not 1, make it 1 if pMode.contents.value != 1: eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL pMode_setting = ctypes.c_int8(1) ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf)) eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD()) pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD()) ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value) if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int( 60000) or int(pProfileDeceleration.contents.value) > int(60000)): eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL pProfileVelocity = ctypes.wintypes.DWORD(429) pProfileAcceleration = ctypes.wintypes.DWORD(429) pProfileDeceleration = ctypes.wintypes.DWORD(429) logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...') ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration, pProfileDeceleration, ctypes.byref(buf)) # Now get the motor position (stored position offset) # from the device's "homposition" object self._offset = self.get_offset() # Now read the stored 'calculation parameters' eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # More hardcoded values StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(1) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefA = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # Get coefficient B StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(2) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefB = CastedObjectData[0] eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I got from the LabVIEW program -- I don't think # any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(3) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefC = CastedObjectData[0] # Get coefficient D eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD, ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)] eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL # These are hardcoded values I got from the LabVIEW program -- I don't think # any documentation exists on particular object indices StoredPositionObject = ctypes.wintypes.WORD(8204) StoredPositionObjectSubindex = ctypes.c_uint8(4) StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4) ObjectData = ctypes.c_void_p() ObjectDataArray = (ctypes.c_uint32 * 1)() ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32)) StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0)) ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex, ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead, ctypes.byref(buf)) # Cast the object data to uint32 CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32)) self._coefD = CastedObjectData[0] # print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD) self._doubleA = self._u32todouble(self._coefA) self._doubleB = self._u32todouble(self._coefB) self._doubleC = self._u32todouble(self._coefC) firstHalf = np.int16(self._coefD >> 16) secondHalf = np.int16(self._coefD & 0xffff) # Set the minimum and maximum wavelengths for the motor self._minwl = float(firstHalf) / 10.0 self._maxwl = float(secondHalf) / 10.0 # print 'first %s second %s' % (firstHalf, secondHalf) # This returns '10871' and '11859' for the Sacher, which are the correct # wavelength ranges in Angstroms # print 'Now calculate the current wavelength position:' self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC print('Current wavelength: %.3f nm' % self._currentwl) print('initializing done') return True """ Not sure what this is doing yet """ """ Also we're done with the Sacher_EPOS() class at this point """ if __name__ == '__main__': epos = Sacher_EPOS(None, b'COM3') # epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860) # epos.do_get_wavelength() # print('#1 Motor current: {}'.format(epos.get_motor_current())) # epos.do_get_wavelength() # print('motor position is...') # current_pos = epos.get_motor_position() # print('current position is {}'.format(current_pos)) # new_pos = current_pos + 10000 # epos.set_target_position(new_pos, True, True) # print(epos.get_motor_position()) # print('#2 Motor current: {}'.format(epos.get_motor_current())) # epos.find_home() # epos.restore() # time.sleep(7) epos.do_set_wavelength(1151.5) # epos.do_get_wavelength() print('Motor current: {}'.format(epos.get_motor_current())) print('Motor position: {}'.format(epos.get_motor_position())) """ OTHER MISC. NOTES: increasing wavelength: causes the square to rotate left causes base to move to the left when square is stuck in causes screw to loosen causes large gold base to tighten decreasing wavelength: there's an overshoot when lowering wavelength causes the square to rotate right causes base to move to the right when square is stuck in causes screw to tighten causes large gold base to loosen, and also unplug the motor Also you don't need to explicitly run epos.initialize() because there's an __init__ function which contains epos.initialize() """ # womp the end
[((67, 10, 67, 90), 'ctypes.windll.LoadLibrary', 'ctypes.windll.LoadLibrary', ({(67, 36, 67, 89): '"""C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll"""'}, {}), "('C:\\\\Users\\\\Carbro\\\\Desktop\\\\Charmander\\\\EposCmd.dll'\n )", False, 'import ctypes\n'), ((253, 13, 253, 23), 'numpy.ceil', 'np.ceil', ({(253, 21, 253, 22): 'd'}, {}), '(d)', True, 'import numpy as np\n'), ((264, 14, 264, 38), 'ctypes.wintypes.HANDLE', 'ctypes.wintypes.HANDLE', ({}, {}), '()', False, 'import ctypes\n'), ((290, 14, 290, 36), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', ({}, {}), '()', False, 'import ctypes\n'), ((309, 17, 309, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(309, 38, 309, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((314, 23, 314, 40), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(314, 38, 314, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((315, 14, 315, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(315, 36, 315, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((324, 17, 324, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(324, 38, 324, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((329, 14, 329, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(329, 36, 329, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((339, 17, 339, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(339, 38, 339, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((344, 14, 344, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(344, 36, 344, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((354, 17, 354, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(354, 38, 354, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((355, 14, 355, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(355, 36, 355, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((362, 31, 362, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(362, 52, 362, 56): '8321'}, {}), '(8321)', False, 'import ctypes\n'), ((363, 39, 363, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(363, 54, 363, 55): '0'}, {}), '(0)', False, 'import ctypes\n'), ((364, 38, 364, 62), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(364, 60, 364, 61): '4'}, {}), '(4)', False, 'import ctypes\n'), ((365, 21, 365, 38), 'ctypes.c_void_p', 'ctypes.c_void_p', ({}, {}), '()', False, 'import ctypes\n'), ((397, 17, 397, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(397, 38, 397, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((398, 14, 398, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(398, 36, 398, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((405, 31, 405, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(405, 52, 405, 56): '8321'}, {}), '(8321)', False, 'import ctypes\n'), ((406, 39, 406, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(406, 54, 406, 55): '0'}, {}), '(0)', False, 'import ctypes\n'), ((407, 39, 407, 63), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(407, 61, 407, 62): '4'}, {}), '(4)', False, 'import ctypes\n'), ((427, 17, 427, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(427, 38, 427, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((428, 14, 428, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(428, 36, 428, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((436, 31, 436, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(436, 52, 436, 56): '8204'}, {}), '(8204)', False, 'import ctypes\n'), ((449, 39, 449, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(449, 54, 449, 55): '4'}, {}), '(4)', False, 'import ctypes\n'), ((450, 39, 450, 63), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(450, 61, 450, 62): '4'}, {}), '(4)', False, 'import ctypes\n'), ((469, 17, 469, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(469, 38, 469, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((470, 14, 470, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(470, 36, 470, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((493, 17, 493, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(493, 38, 493, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((494, 14, 494, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(494, 36, 494, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((512, 18, 512, 39), 'ctypes.c_long', 'ctypes.c_long', ({(512, 32, 512, 38): 'target'}, {}), '(target)', False, 'import ctypes\n'), ((513, 20, 513, 50), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', ({(513, 41, 513, 49): 'absolute'}, {}), '(absolute)', False, 'import ctypes\n'), ((514, 23, 514, 56), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', ({(514, 44, 514, 55): 'immediately'}, {}), '(immediately)', False, 'import ctypes\n'), ((592, 17, 592, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(592, 38, 592, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((593, 14, 593, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(593, 36, 593, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((663, 17, 663, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(663, 38, 663, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((664, 14, 664, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(664, 36, 664, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((774, 17, 774, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(774, 38, 774, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((775, 14, 775, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(775, 36, 775, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((788, 17, 788, 40), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(788, 38, 788, 39): '0'}, {}), '(0)', False, 'import ctypes\n'), ((789, 14, 789, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(789, 36, 789, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((790, 19, 790, 31), 'ctypes.wintypes.DWORD', 'DWORD', ({(790, 25, 790, 30): '38400'}, {}), '(38400)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((791, 18, 791, 28), 'ctypes.wintypes.DWORD', 'DWORD', ({(791, 24, 791, 27): '100'}, {}), '(100)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((799, 14, 799, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(799, 36, 799, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((806, 14, 806, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(806, 36, 806, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((807, 21, 807, 45), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(807, 43, 807, 44): '0'}, {}), '(0)', False, 'import ctypes\n'), ((822, 14, 822, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(822, 36, 822, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((823, 17, 823, 26), 'ctypes.wintypes.WORD', 'WORD', ({(823, 22, 823, 25): '512'}, {}), '(512)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((824, 29, 824, 36), 'ctypes.wintypes.WORD', 'WORD', ({(824, 34, 824, 35): '4'}, {}), '(4)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((847, 14, 847, 38), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(847, 36, 847, 37): '0'}, {}), '(0)', False, 'import ctypes\n'), ((900, 31, 900, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(900, 52, 900, 56): '8204'}, {}), '(8204)', False, 'import ctypes\n'), ((901, 39, 901, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(901, 54, 901, 55): '1'}, {}), '(1)', False, 'import ctypes\n'), ((902, 38, 902, 62), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(902, 60, 902, 61): '4'}, {}), '(4)', False, 'import ctypes\n'), ((903, 21, 903, 38), 'ctypes.c_void_p', 'ctypes.c_void_p', ({}, {}), '()', False, 'import ctypes\n'), ((920, 31, 920, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(920, 52, 920, 56): '8204'}, {}), '(8204)', False, 'import ctypes\n'), ((921, 39, 921, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(921, 54, 921, 55): '2'}, {}), '(2)', False, 'import ctypes\n'), ((922, 38, 922, 62), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(922, 60, 922, 61): '4'}, {}), '(4)', False, 'import ctypes\n'), ((923, 21, 923, 38), 'ctypes.c_void_p', 'ctypes.c_void_p', ({}, {}), '()', False, 'import ctypes\n'), ((941, 31, 941, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(941, 52, 941, 56): '8204'}, {}), '(8204)', False, 'import ctypes\n'), ((942, 39, 942, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(942, 54, 942, 55): '3'}, {}), '(3)', False, 'import ctypes\n'), ((943, 38, 943, 62), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(943, 60, 943, 61): '4'}, {}), '(4)', False, 'import ctypes\n'), ((944, 21, 944, 38), 'ctypes.c_void_p', 'ctypes.c_void_p', ({}, {}), '()', False, 'import ctypes\n'), ((964, 31, 964, 57), 'ctypes.wintypes.WORD', 'ctypes.wintypes.WORD', ({(964, 52, 964, 56): '8204'}, {}), '(8204)', False, 'import ctypes\n'), ((965, 39, 965, 56), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(965, 54, 965, 55): '4'}, {}), '(4)', False, 'import ctypes\n'), ((966, 38, 966, 62), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(966, 60, 966, 61): '4'}, {}), '(4)', False, 'import ctypes\n'), ((967, 21, 967, 38), 'ctypes.c_void_p', 'ctypes.c_void_p', ({}, {}), '()', False, 'import ctypes\n'), ((982, 20, 982, 47), 'numpy.int16', 'np.int16', ({(982, 29, 982, 46): 'self._coefD >> 16'}, {}), '(self._coefD >> 16)', True, 'import numpy as np\n'), ((983, 21, 983, 51), 'numpy.int16', 'np.int16', ({(983, 30, 983, 50): 'self._coefD & 65535'}, {}), '(self._coefD & 65535)', True, 'import numpy as np\n'), ((252, 12, 252, 23), 'numpy.log2', 'np.log2', ({(252, 20, 252, 22): '(10)'}, {}), '(10)', True, 'import numpy as np\n'), ((261, 43, 261, 64), 'ctypes.POINTER', 'ctypes.POINTER', ({(261, 58, 261, 63): 'DWORD'}, {}), '(DWORD)', False, 'import ctypes\n'), ((263, 29, 263, 37), 'ctypes.wintypes.DWORD', 'DWORD', ({(263, 35, 263, 36): '0'}, {}), '(0)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((287, 68, 287, 89), 'ctypes.POINTER', 'ctypes.POINTER', ({(287, 83, 287, 88): 'DWORD'}, {}), '(DWORD)', False, 'import ctypes\n'), ((289, 29, 289, 37), 'ctypes.wintypes.DWORD', 'DWORD', ({(289, 35, 289, 36): '0'}, {}), '(0)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((299, 12, 299, 83), 'logging.error', 'logging.error', ({(299, 26, 299, 82): "(__name__ + ' did not close Sacher EPOS motor correctly.')"}, {}), "(__name__ + ' did not close Sacher EPOS motor correctly.')", False, 'import logging\n'), ((311, 45, 311, 75), 'ctypes.POINTER', 'ctypes.POINTER', ({(311, 60, 311, 74): 'ctypes.c_uint8'}, {}), '(ctypes.c_uint8)', False, 'import ctypes\n'), ((311, 77, 311, 114), 'ctypes.POINTER', 'ctypes.POINTER', ({(311, 92, 311, 113): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((316, 64, 316, 90), 'ctypes.byref', 'ctypes.byref', ({(316, 77, 316, 89): 'motorCurrent'}, {}), '(motorCurrent)', False, 'import ctypes\n'), ((316, 92, 316, 109), 'ctypes.byref', 'ctypes.byref', ({(316, 105, 316, 108): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((326, 41, 326, 78), 'ctypes.POINTER', 'ctypes.POINTER', ({(326, 56, 326, 77): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((330, 60, 330, 78), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(330, 75, 330, 77): '35'}, {}), '(35)', False, 'import ctypes\n'), ((330, 80, 330, 97), 'ctypes.byref', 'ctypes.byref', ({(330, 93, 330, 96): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((341, 41, 341, 78), 'ctypes.POINTER', 'ctypes.POINTER', ({(341, 56, 341, 77): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((345, 59, 345, 76), 'ctypes.byref', 'ctypes.byref', ({(345, 72, 345, 75): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((358, 42, 358, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(358, 57, 358, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((358, 81, 358, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(358, 96, 358, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((367, 50, 367, 80), 'ctypes.POINTER', 'ctypes.POINTER', ({(367, 65, 367, 79): 'ctypes.c_int32'}, {}), '(ctypes.c_int32)', False, 'import ctypes\n'), ((368, 51, 368, 75), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(368, 73, 368, 74): '0'}, {}), '(0)', False, 'import ctypes\n'), ((371, 36, 371, 53), 'ctypes.byref', 'ctypes.byref', ({(371, 49, 371, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((374, 51, 374, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(374, 66, 374, 80): 'ctypes.c_int32'}, {}), '(ctypes.c_int32)', False, 'import ctypes\n'), ((376, 12, 376, 94), 'logging.error', 'logging.error', ({(376, 26, 376, 93): "(__name__ + ' Could not read stored position from Sacher EPOS motor')"}, {}), "(__name__ +\n ' Could not read stored position from Sacher EPOS motor')", False, 'import logging\n'), ((400, 58, 400, 95), 'ctypes.POINTER', 'ctypes.POINTER', ({(400, 73, 400, 94): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((401, 42, 401, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(401, 57, 401, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((401, 81, 401, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(401, 96, 401, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((410, 50, 410, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(410, 65, 410, 80): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((411, 54, 411, 78), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(411, 76, 411, 77): '0'}, {}), '(0)', False, 'import ctypes\n'), ((414, 36, 414, 53), 'ctypes.byref', 'ctypes.byref', ({(414, 49, 414, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((417, 12, 417, 95), 'logging.error', 'logging.error', ({(417, 26, 417, 94): "(__name__ + ' Could not write stored position from Sacher EPOS motor')"}, {}), "(__name__ +\n ' Could not write stored position from Sacher EPOS motor')", False, 'import logging\n'), ((430, 58, 430, 95), 'ctypes.POINTER', 'ctypes.POINTER', ({(430, 73, 430, 94): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((431, 42, 431, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(431, 57, 431, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((431, 81, 431, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(431, 96, 431, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((439, 43, 439, 69), 'ctypes.c_uint8', 'ctypes.c_uint8', ({(439, 58, 439, 68): 'subidx + 1'}, {}), '(subidx + 1)', False, 'import ctypes\n'), ((440, 43, 440, 67), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(440, 65, 440, 66): '4'}, {}), '(4)', False, 'import ctypes\n'), ((452, 50, 452, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(452, 65, 452, 80): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((453, 54, 453, 78), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(453, 76, 453, 77): '0'}, {}), '(0)', False, 'import ctypes\n'), ((456, 36, 456, 53), 'ctypes.byref', 'ctypes.byref', ({(456, 49, 456, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((461, 12, 461, 95), 'logging.error', 'logging.error', ({(461, 26, 461, 94): "(__name__ + ' Could not write stored position from Sacher EPOS motor')"}, {}), "(__name__ +\n ' Could not write stored position from Sacher EPOS motor')", False, 'import logging\n'), ((471, 35, 471, 50), 'ctypes.c_long', 'ctypes.c_long', ({}, {}), '()', False, 'import ctypes\n'), ((473, 46, 473, 75), 'ctypes.POINTER', 'ctypes.POINTER', ({(473, 61, 473, 74): 'ctypes.c_long'}, {}), '(ctypes.c_long)', False, 'import ctypes\n'), ((473, 77, 473, 114), 'ctypes.POINTER', 'ctypes.POINTER', ({(473, 92, 473, 113): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((475, 76, 475, 93), 'ctypes.byref', 'ctypes.byref', ({(475, 89, 475, 92): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((503, 66, 503, 83), 'ctypes.byref', 'ctypes.byref', ({(503, 79, 503, 82): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((518, 47, 518, 84), 'ctypes.POINTER', 'ctypes.POINTER', ({(518, 62, 518, 83): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((526, 100, 526, 117), 'ctypes.byref', 'ctypes.byref', ({(526, 113, 526, 116): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((573, 12, 573, 28), 'time.sleep', 'time.sleep', ({(573, 23, 573, 27): '(0.01)'}, {}), '(0.01)', False, 'import time\n'), ((575, 67, 575, 84), 'ctypes.byref', 'ctypes.byref', ({(575, 80, 575, 83): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((620, 12, 620, 100), 'logging.error', 'logging.error', ({(620, 26, 620, 99): "(__name__ + ' Negative value under square root sign -- something is wrong')"}, {}), "(__name__ +\n ' Negative value under square root sign -- something is wrong')", False, 'import logging\n'), ((628, 14, 628, 30), 'numpy.sqrt', 'np.sqrt', ({(628, 22, 628, 29): 'sqrtarg'}, {}), '(sqrtarg)', True, 'import numpy as np\n'), ((691, 12, 691, 100), 'logging.error', 'logging.error', ({(691, 26, 691, 99): "(__name__ + ' Negative value under square root sign -- something is wrong')"}, {}), "(__name__ +\n ' Negative value under square root sign -- something is wrong')", False, 'import logging\n'), ((776, 62, 776, 79), 'ctypes.byref', 'ctypes.byref', ({(776, 75, 776, 78): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((779, 21, 779, 52), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', ({(779, 49, 779, 51): '64'}, {}), '(64)', False, 'import ctypes\n'), ((792, 87, 792, 104), 'ctypes.byref', 'ctypes.byref', ({(792, 100, 792, 103): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((795, 21, 795, 52), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', ({(795, 49, 795, 51): '64'}, {}), '(64)', False, 'import ctypes\n'), ((800, 62, 800, 79), 'ctypes.byref', 'ctypes.byref', ({(800, 75, 800, 78): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((803, 21, 803, 52), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', ({(803, 49, 803, 51): '64'}, {}), '(64)', False, 'import ctypes\n'), ((808, 66, 808, 90), 'ctypes.byref', 'ctypes.byref', ({(808, 79, 808, 89): 'plsenabled'}, {}), '(plsenabled)', False, 'import ctypes\n'), ((808, 92, 808, 109), 'ctypes.byref', 'ctypes.byref', ({(808, 105, 808, 108): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((811, 21, 811, 52), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', ({(811, 49, 811, 51): '64'}, {}), '(64)', False, 'import ctypes\n'), ((816, 12, 816, 91), 'logging.warning', 'logging.warning', ({(816, 28, 816, 90): "(__name__ + ' EPOS motor enabled, disabling before proceeding.')"}, {}), "(__name__ + ' EPOS motor enabled, disabling before proceeding.')", False, 'import logging\n'), ((825, 99, 825, 116), 'ctypes.byref', 'ctypes.byref', ({(825, 112, 825, 115): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((848, 31, 848, 46), 'ctypes.c_int8', 'ctypes.c_int8', ({}, {}), '()', False, 'import ctypes\n'), ((850, 49, 850, 78), 'ctypes.POINTER', 'ctypes.POINTER', ({(850, 64, 850, 77): 'ctypes.c_int8'}, {}), '(ctypes.c_int8)', False, 'import ctypes\n'), ((850, 80, 850, 117), 'ctypes.POINTER', 'ctypes.POINTER', ({(850, 95, 850, 116): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((852, 75, 852, 92), 'ctypes.byref', 'ctypes.byref', ({(852, 88, 852, 91): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((858, 28, 858, 44), 'ctypes.c_int8', 'ctypes.c_int8', ({(858, 42, 858, 43): '1'}, {}), '(1)', False, 'import ctypes\n'), ((861, 51, 861, 88), 'ctypes.POINTER', 'ctypes.POINTER', ({(861, 66, 861, 87): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((862, 51, 862, 88), 'ctypes.POINTER', 'ctypes.POINTER', ({(862, 66, 862, 87): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((863, 51, 863, 88), 'ctypes.POINTER', 'ctypes.POINTER', ({(863, 66, 863, 87): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((864, 51, 864, 88), 'ctypes.POINTER', 'ctypes.POINTER', ({(864, 66, 864, 87): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((866, 42, 866, 65), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({}, {}), '()', False, 'import ctypes\n'), ((867, 46, 867, 69), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({}, {}), '()', False, 'import ctypes\n'), ((868, 46, 868, 69), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({}, {}), '()', False, 'import ctypes\n'), ((871, 67, 871, 84), 'ctypes.byref', 'ctypes.byref', ({(871, 80, 871, 83): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((881, 31, 881, 57), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(881, 53, 881, 56): '429'}, {}), '(429)', False, 'import ctypes\n'), ((882, 35, 882, 61), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(882, 57, 882, 60): '429'}, {}), '(429)', False, 'import ctypes\n'), ((883, 35, 883, 61), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(883, 57, 883, 60): '429'}, {}), '(429)', False, 'import ctypes\n'), ((884, 12, 884, 89), 'logging.warning', 'logging.warning', ({(884, 28, 884, 88): "(__name__ + ' GetPositionProfile out of bounds, resetting...')"}, {}), "(__name__ + ' GetPositionProfile out of bounds, resetting...')", False, 'import logging\n'), ((896, 42, 896, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(896, 57, 896, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((896, 81, 896, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(896, 96, 896, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((905, 50, 905, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(905, 65, 905, 80): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((906, 51, 906, 75), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(906, 73, 906, 74): '0'}, {}), '(0)', False, 'import ctypes\n'), ((909, 36, 909, 53), 'ctypes.byref', 'ctypes.byref', ({(909, 49, 909, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((911, 51, 911, 82), 'ctypes.POINTER', 'ctypes.POINTER', ({(911, 66, 911, 81): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((916, 42, 916, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(916, 57, 916, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((916, 81, 916, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(916, 96, 916, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((925, 50, 925, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(925, 65, 925, 80): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((926, 51, 926, 75), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(926, 73, 926, 74): '0'}, {}), '(0)', False, 'import ctypes\n'), ((929, 36, 929, 53), 'ctypes.byref', 'ctypes.byref', ({(929, 49, 929, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((931, 51, 931, 82), 'ctypes.POINTER', 'ctypes.POINTER', ({(931, 66, 931, 81): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((936, 42, 936, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(936, 57, 936, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((936, 81, 936, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(936, 96, 936, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((946, 50, 946, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(946, 65, 946, 80): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((947, 51, 947, 75), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(947, 73, 947, 74): '0'}, {}), '(0)', False, 'import ctypes\n'), ((950, 36, 950, 53), 'ctypes.byref', 'ctypes.byref', ({(950, 49, 950, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((952, 51, 952, 82), 'ctypes.POINTER', 'ctypes.POINTER', ({(952, 66, 952, 81): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((959, 42, 959, 79), 'ctypes.POINTER', 'ctypes.POINTER', ({(959, 57, 959, 78): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((959, 81, 959, 118), 'ctypes.POINTER', 'ctypes.POINTER', ({(959, 96, 959, 117): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((969, 50, 969, 81), 'ctypes.POINTER', 'ctypes.POINTER', ({(969, 65, 969, 80): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((970, 51, 970, 75), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(970, 73, 970, 74): '0'}, {}), '(0)', False, 'import ctypes\n'), ((973, 36, 973, 53), 'ctypes.byref', 'ctypes.byref', ({(973, 49, 973, 52): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((975, 51, 975, 82), 'ctypes.POINTER', 'ctypes.POINTER', ({(975, 66, 975, 81): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((443, 54, 443, 85), 'ctypes.POINTER', 'ctypes.POINTER', ({(443, 69, 443, 84): 'ctypes.c_uint32'}, {}), '(ctypes.c_uint32)', False, 'import ctypes\n'), ((444, 58, 444, 82), 'ctypes.wintypes.DWORD', 'ctypes.wintypes.DWORD', ({(444, 80, 444, 81): '0'}, {}), '(0)', False, 'import ctypes\n'), ((447, 40, 447, 57), 'ctypes.byref', 'ctypes.byref', ({(447, 53, 447, 56): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((553, 44, 553, 66), 'ctypes.wintypes.BOOL', 'ctypes.wintypes.BOOL', ({}, {}), '()', False, 'import ctypes\n'), ((557, 53, 557, 89), 'ctypes.POINTER', 'ctypes.POINTER', ({(557, 68, 557, 88): 'ctypes.wintypes.BOOL'}, {}), '(ctypes.wintypes.BOOL)', False, 'import ctypes\n'), ((558, 53, 558, 90), 'ctypes.POINTER', 'ctypes.POINTER', ({(558, 68, 558, 89): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((561, 88, 561, 105), 'ctypes.byref', 'ctypes.byref', ({(561, 101, 561, 104): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((623, 22, 623, 38), 'numpy.sqrt', 'np.sqrt', ({(623, 30, 623, 37): 'sqrtarg'}, {}), '(sqrtarg)', True, 'import numpy as np\n'), ((694, 22, 694, 38), 'numpy.sqrt', 'np.sqrt', ({(694, 30, 694, 37): 'sqrtarg'}, {}), '(sqrtarg)', True, 'import numpy as np\n'), ((780, 50, 780, 58), 'ctypes.wintypes.WORD', 'WORD', ({(780, 55, 780, 57): '(64)'}, {}), '(64)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((804, 50, 804, 58), 'ctypes.wintypes.WORD', 'WORD', ({(804, 55, 804, 57): '(64)'}, {}), '(64)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((812, 50, 812, 58), 'ctypes.wintypes.WORD', 'WORD', ({(812, 55, 812, 57): '(64)'}, {}), '(64)', False, 'from ctypes.wintypes import DWORD, WORD\n'), ((817, 71, 817, 88), 'ctypes.byref', 'ctypes.byref', ({(817, 84, 817, 87): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((819, 16, 819, 91), 'logging.warning', 'logging.warning', ({(819, 32, 819, 90): "(__name__ + ' EPOS motor successfully disabled, proceeding')"}, {}), "(__name__ + ' EPOS motor successfully disabled, proceeding')", False, 'import logging\n'), ((821, 16, 821, 86), 'logging.error', 'logging.error', ({(821, 30, 821, 85): "(__name__ + ' EPOS motor was not successfully disabled!')"}, {}), "(__name__ + ' EPOS motor was not successfully disabled!')", False, 'import logging\n'), ((856, 53, 856, 90), 'ctypes.POINTER', 'ctypes.POINTER', ({(856, 68, 856, 89): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((859, 87, 859, 104), 'ctypes.byref', 'ctypes.byref', ({(859, 100, 859, 103): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((879, 78, 879, 115), 'ctypes.POINTER', 'ctypes.POINTER', ({(879, 93, 879, 114): 'ctypes.wintypes.DWORD'}, {}), '(ctypes.wintypes.DWORD)', False, 'import ctypes\n'), ((886, 71, 886, 88), 'ctypes.byref', 'ctypes.byref', ({(886, 84, 886, 87): 'buf'}, {}), '(buf)', False, 'import ctypes\n'), ((625, 22, 625, 38), 'numpy.sqrt', 'np.sqrt', ({(625, 30, 625, 37): 'sqrtarg'}, {}), '(sqrtarg)', True, 'import numpy as np\n'), ((696, 22, 696, 38), 'numpy.sqrt', 'np.sqrt', ({(696, 30, 696, 37): 'sqrtarg'}, {}), '(sqrtarg)', True, 'import numpy as np\n')]
haotianliu001/HRNet-Lesion
tools/generate_lst.py
9dae108879456e084b2200e39d7e58c1c08c2b16
import argparse import os image_dir = 'image' label_dir = 'label' splits = ['train', 'val', 'test'] image_dirs = [ 'image/{}', 'image/{}_crop' ] label_dirs = [ 'label/{}/annotations', 'label/{}/annotations_crop', ] def generate(root): assert len(image_dirs) == len(label_dirs) for split in splits: for image_path, label_path in zip(image_dirs, label_dirs): image_path = image_path.format(split) label_path = label_path.format(split) if split != 'train' and image_path.endswith('_crop'): label_path = label_path.replace('_crop', '') if not os.path.exists(os.path.join(root, label_path)): continue lines = [] for label in os.listdir(os.path.join(root, label_path)): image = label.replace('.png', '.jpg') if os.path.exists(os.path.join(root, image_path, image)): lines.append('{} {}\n'.format(os.path.join(image_path, image), os.path.join(label_path, label))) else: print('not found: {}'.format(os.path.join(root, image_path, image))) print(image_path, label_path, len(lines)) output_file = '{}.lst'.format(image_path.split('/')[1]) with open(os.path.join(root, output_file), 'w') as f: f.writelines(lines) print(f'Save to {os.path.join(root, output_file)}\n') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('root', type=str, help='path of dataset root') args = parser.parse_args() generate(args.root)
[((50, 13, 50, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((32, 36, 32, 66), 'os.path.join', 'os.path.join', ({(32, 49, 32, 53): 'root', (32, 55, 32, 65): 'label_path'}, {}), '(root, label_path)', False, 'import os\n'), ((28, 34, 28, 64), 'os.path.join', 'os.path.join', ({(28, 47, 28, 51): 'root', (28, 53, 28, 63): 'label_path'}, {}), '(root, label_path)', False, 'import os\n'), ((35, 34, 35, 71), 'os.path.join', 'os.path.join', ({(35, 47, 35, 51): 'root', (35, 53, 35, 63): 'image_path', (35, 65, 35, 70): 'image'}, {}), '(root, image_path, image)', False, 'import os\n'), ((43, 22, 43, 53), 'os.path.join', 'os.path.join', ({(43, 35, 43, 39): 'root', (43, 41, 43, 52): 'output_file'}, {}), '(root, output_file)', False, 'import os\n'), ((46, 29, 46, 60), 'os.path.join', 'os.path.join', ({(46, 42, 46, 46): 'root', (46, 48, 46, 59): 'output_file'}, {}), '(root, output_file)', False, 'import os\n'), ((36, 50, 36, 81), 'os.path.join', 'os.path.join', ({(36, 63, 36, 73): 'image_path', (36, 75, 36, 80): 'image'}, {}), '(image_path, image)', False, 'import os\n'), ((36, 83, 36, 114), 'os.path.join', 'os.path.join', ({(36, 96, 36, 106): 'label_path', (36, 108, 36, 113): 'label'}, {}), '(label_path, label)', False, 'import os\n'), ((38, 49, 38, 86), 'os.path.join', 'os.path.join', ({(38, 62, 38, 66): 'root', (38, 68, 38, 78): 'image_path', (38, 80, 38, 85): 'image'}, {}), '(root, image_path, image)', False, 'import os\n')]
f-dangel/unfoldNd
examples/example.py
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
"""How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``.""" # imports, make this example deterministic import torch import unfoldNd torch.manual_seed(0) # random batched RGB 32x32 image-shaped input tensor of batch size 64 inputs = torch.randn((64, 3, 32, 32)) # module hyperparameters kernel_size = 3 dilation = 1 padding = 1 stride = 2 # both modules accept the same arguments and perform the same operation torch_module = torch.nn.Unfold( kernel_size, dilation=dilation, padding=padding, stride=stride ) lib_module = unfoldNd.UnfoldNd( kernel_size, dilation=dilation, padding=padding, stride=stride ) # forward pass torch_outputs = torch_module(inputs) lib_outputs = lib_module(inputs) # check if torch.allclose(torch_outputs, lib_outputs): print("✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.") else: raise AssertionError("❌ Outputs don't match")
[((8, 0, 8, 20), 'torch.manual_seed', 'torch.manual_seed', ({(8, 18, 8, 19): '(0)'}, {}), '(0)', False, 'import torch\n'), ((11, 9, 11, 37), 'torch.randn', 'torch.randn', ({(11, 21, 11, 36): '(64, 3, 32, 32)'}, {}), '((64, 3, 32, 32))', False, 'import torch\n'), ((20, 15, 22, 1), 'torch.nn.Unfold', 'torch.nn.Unfold', (), '', False, 'import torch\n'), ((23, 13, 25, 1), 'unfoldNd.UnfoldNd', 'unfoldNd.UnfoldNd', (), '', False, 'import unfoldNd\n'), ((32, 3, 32, 45), 'torch.allclose', 'torch.allclose', ({(32, 18, 32, 31): 'torch_outputs', (32, 33, 32, 44): 'lib_outputs'}, {}), '(torch_outputs, lib_outputs)', False, 'import torch\n')]
NicsTr/pretix
src/pretix/helpers/escapejson.py
e6d2380d9ed1836cc64a688b2be20d00a8500eab
from django.utils.encoding import force_str from django.utils.functional import keep_lazy from django.utils.safestring import SafeText, mark_safe _json_escapes = { ord('>'): '\\u003E', ord('<'): '\\u003C', ord('&'): '\\u0026', } _json_escapes_attr = { ord('>'): '\\u003E', ord('<'): '\\u003C', ord('&'): '\\u0026', ord('"'): '&#34;', ord("'"): '&#39;', ord("="): '&#61;', } @keep_lazy(str, SafeText) def escapejson(value): """Hex encodes characters for use in a application/json type script.""" return mark_safe(force_str(value).translate(_json_escapes)) @keep_lazy(str, SafeText) def escapejson_attr(value): """Hex encodes characters for use in a html attributw script.""" return mark_safe(force_str(value).translate(_json_escapes_attr))
[((21, 1, 21, 25), 'django.utils.functional.keep_lazy', 'keep_lazy', ({(21, 11, 21, 14): 'str', (21, 16, 21, 24): 'SafeText'}, {}), '(str, SafeText)', False, 'from django.utils.functional import keep_lazy\n'), ((27, 1, 27, 25), 'django.utils.functional.keep_lazy', 'keep_lazy', ({(27, 11, 27, 14): 'str', (27, 16, 27, 24): 'SafeText'}, {}), '(str, SafeText)', False, 'from django.utils.functional import keep_lazy\n'), ((24, 21, 24, 37), 'django.utils.encoding.force_str', 'force_str', ({(24, 31, 24, 36): 'value'}, {}), '(value)', False, 'from django.utils.encoding import force_str\n'), ((30, 21, 30, 37), 'django.utils.encoding.force_str', 'force_str', ({(30, 31, 30, 36): 'value'}, {}), '(value)', False, 'from django.utils.encoding import force_str\n')]
snowind/pyxley
pyxley/charts/plotly/base.py
cff9e50b8d80b9794c6907355e541f166959cd6c
from ..charts import Chart from flask import jsonify, request _BASE_CONFIG = { "showLink": False, "displaylogo": False, "modeBarButtonsToRemove": ["sendDataToCloud"] } class PlotlyAPI(Chart): """ Base class for Plotly.js API This class is used to create charts using the plotly.js api To keep this general, this chart does not have a default method of transmitting data. Instead the user must supply a route_func method. """ def __init__(self, chart_id, url, route_func, init_params={}): options = { "chartid": chart_id, "url": url, "params": init_params } super(PlotlyAPI, self).__init__("PlotlyAPI", options, route_func) @staticmethod def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG): """ basic line plot dataframe to json for a line plot Args: df (pandas.DataFrame): input dataframe xypairs (list): list of tuples containing column names mode (str): plotly.js mode (e.g. lines) layout (dict): layout parameters config (dict): config parameters """ if df.empty: return { "x": [], "y": [], "mode": mode } _data = [] for x, y in xypairs: if (x in df.columns) and (y in df.columns): _data.append( { "x": df[x].values.tolist(), "y": df[y].values.tolist(), "mode": mode } ) return { "data": _data, "layout": layout, "config": config }
[]
CospanDesign/python
pyqt/getting_started/close_window.py
9f911509aae7abd9237c14a4635294c7719c9129
#!/usr/bin/python import sys from PyQt4 import QtGui from PyQt4 import QtCore class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button') self.show() def main(): app = QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__ == "__main__": main()
[((22, 8, 22, 36), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', ({(22, 27, 22, 35): 'sys.argv'}, {}), '(sys.argv)', False, 'from PyQt4 import QtGui\n'), ((13, 11, 13, 42), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', ({(13, 29, 13, 35): '"""Quit"""', (13, 37, 13, 41): 'self'}, {}), "('Quit', self)", False, 'from PyQt4 import QtGui\n'), ((14, 25, 14, 59), 'PyQt4.QtCore.QCoreApplication.instance', 'QtCore.QCoreApplication.instance', ({}, {}), '()', False, 'from PyQt4 import QtCore\n')]
bdecost/gpytorch
test/means/test_zero_mean.py
a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import torch import unittest from gpytorch.means import ZeroMean class TestZeroMean(unittest.TestCase): def setUp(self): self.mean = ZeroMean() def test_forward(self): a = torch.Tensor([[1, 2], [2, 4]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2,)) self.assertTrue(res.eq(0).all()) def test_forward_batch(self): a = torch.Tensor([[[1, 2], [1, 2], [2, 4]], [[2, 3], [2, 3], [1, 3]]]) res = self.mean(a) self.assertEqual(tuple(res.size()), (2, 3)) self.assertTrue(res.eq(0).all())
[((13, 20, 13, 30), 'gpytorch.means.ZeroMean', 'ZeroMean', ({}, {}), '()', False, 'from gpytorch.means import ZeroMean\n'), ((16, 12, 16, 42), 'torch.Tensor', 'torch.Tensor', ({(16, 25, 16, 41): '[[1, 2], [2, 4]]'}, {}), '([[1, 2], [2, 4]])', False, 'import torch\n'), ((22, 12, 22, 78), 'torch.Tensor', 'torch.Tensor', ({(22, 25, 22, 77): '[[[1, 2], [1, 2], [2, 4]], [[2, 3], [2, 3], [1, 3]]]'}, {}), '([[[1, 2], [1, 2], [2, 4]], [[2, 3], [2, 3], [1, 3]]])', False, 'import torch\n')]
rizzak/python_training
generator/contact.py
38bbe5d7e38892e8dcc28caeae1481b98cce7356
import jsonpickle import random import string from model.contact import Contact import os.path import getopt import sys try: opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"]) except getopt.GetoptError as err: getopt.usage() sys.exit(2) n = 5 f = "data/contacts.json" for o, a in opts: if o == "-n": n = int(a) elif o == "-f": f = a def random_string(prefix, maxlen): symbols = string.ascii_letters + string.digits + " "*10 return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]) testdata = [Contact(first_name="", middle_name="", last_name="", nickname="", title="", company="", address="", home_tel="", mobile_tel="", work_tel="", fax="", email="", homepage="", birthday="", anniversary="", secondary_address="", secondary_tel="", notes="")] + [ Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10), nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10), address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10), work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10), homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10), secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10)) for i in range(5) ] file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f) with open(file , "w") as out: jsonpickle.set_encoder_options("json", indent=2) out.write(jsonpickle.encode(testdata))
[((11, 17, 11, 84), 'getopt.getopt', 'getopt.getopt', ({(11, 31, 11, 43): 'sys.argv[1:]', (11, 45, 11, 51): '"""n:f:"""', (11, 53, 11, 83): "['number of contacts', 'file']"}, {}), "(sys.argv[1:], 'n:f:', ['number of contacts', 'file'])", False, 'import getopt\n'), ((47, 4, 47, 52), 'jsonpickle.set_encoder_options', 'jsonpickle.set_encoder_options', (), '', False, 'import jsonpickle\n'), ((13, 4, 13, 18), 'getopt.usage', 'getopt.usage', ({}, {}), '()', False, 'import getopt\n'), ((14, 4, 14, 15), 'sys.exit', 'sys.exit', ({(14, 13, 14, 14): '(2)'}, {}), '(2)', False, 'import sys\n'), ((31, 12, 33, 85), 'model.contact.Contact', 'Contact', (), '', False, 'from model.contact import Contact\n'), ((48, 14, 48, 41), 'jsonpickle.encode', 'jsonpickle.encode', ({(48, 32, 48, 40): 'testdata'}, {}), '(testdata)', False, 'import jsonpickle\n'), ((28, 29, 28, 51), 'random.choice', 'random.choice', ({(28, 43, 28, 50): 'symbols'}, {}), '(symbols)', False, 'import random\n'), ((28, 67, 28, 91), 'random.randrange', 'random.randrange', ({(28, 84, 28, 90): 'maxlen'}, {}), '(maxlen)', False, 'import random\n')]
arvindm95/unladen-swallow
Lib/test/test_runpy.py
8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3
# Test the runpy module import unittest import os import os.path import sys import tempfile from test.test_support import verbose, run_unittest, forget from runpy import _run_code, _run_module_code, run_module # Note: This module can't safely test _run_module_as_main as it # runs its tests in the current process, which would mess with the # real __main__ module (usually test.regrtest) # See test_cmd_line_script for a test that executes that code path # Set up the test code and expected results class RunModuleCodeTest(unittest.TestCase): expected_result = ["Top level assignment", "Lower level reference"] test_source = ( "# Check basic code execution\n" "result = ['Top level assignment']\n" "def f():\n" " result.append('Lower level reference')\n" "f()\n" "# Check the sys module\n" "import sys\n" "run_argv0 = sys.argv[0]\n" "run_name_in_sys_modules = __name__ in sys.modules\n" "if run_name_in_sys_modules:\n" " module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n" "# Check nested operation\n" "import runpy\n" "nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n" ) def test_run_code(self): saved_argv0 = sys.argv[0] d = _run_code(self.test_source, {}) self.failUnless(d["result"] == self.expected_result) self.failUnless(d["__name__"] is None) self.failUnless(d["__file__"] is None) self.failUnless(d["__loader__"] is None) self.failUnless(d["__package__"] is None) self.failUnless(d["run_argv0"] is saved_argv0) self.failUnless("run_name" not in d) self.failUnless(sys.argv[0] is saved_argv0) def test_run_module_code(self): initial = object() name = "<Nonsense>" file = "Some other nonsense" loader = "Now you're just being silly" package = '' # Treat as a top level module d1 = dict(initial=initial) saved_argv0 = sys.argv[0] d2 = _run_module_code(self.test_source, d1, name, file, loader, package) self.failUnless("result" not in d1) self.failUnless(d2["initial"] is initial) self.failUnless(d2["result"] == self.expected_result) self.failUnless(d2["nested"]["x"] == 1) self.failUnless(d2["__name__"] is name) self.failUnless(d2["run_name_in_sys_modules"]) self.failUnless(d2["module_in_sys_modules"]) self.failUnless(d2["__file__"] is file) self.failUnless(d2["run_argv0"] is file) self.failUnless(d2["__loader__"] is loader) self.failUnless(d2["__package__"] is package) self.failUnless(sys.argv[0] is saved_argv0) self.failUnless(name not in sys.modules) class RunModuleTest(unittest.TestCase): def expect_import_error(self, mod_name): try: run_module(mod_name) except ImportError: pass else: self.fail("Expected import error for " + mod_name) def test_invalid_names(self): # Builtin module self.expect_import_error("sys") # Non-existent modules self.expect_import_error("sys.imp.eric") self.expect_import_error("os.path.half") self.expect_import_error("a.bee") self.expect_import_error(".howard") self.expect_import_error("..eaten") # Package self.expect_import_error("logging") def test_library_module(self): run_module("runpy") def _add_pkg_dir(self, pkg_dir): os.mkdir(pkg_dir) pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py") pkg_file = open(pkg_fname, "w") pkg_file.close() return pkg_fname def _make_pkg(self, source, depth): pkg_name = "__runpy_pkg__" test_fname = "runpy_test"+os.extsep+"py" pkg_dir = sub_dir = tempfile.mkdtemp() if verbose: print " Package tree in:", sub_dir sys.path.insert(0, pkg_dir) if verbose: print " Updated sys.path:", sys.path[0] for i in range(depth): sub_dir = os.path.join(sub_dir, pkg_name) pkg_fname = self._add_pkg_dir(sub_dir) if verbose: print " Next level in:", sub_dir if verbose: print " Created:", pkg_fname mod_fname = os.path.join(sub_dir, test_fname) mod_file = open(mod_fname, "w") mod_file.write(source) mod_file.close() if verbose: print " Created:", mod_fname mod_name = (pkg_name+".")*depth + "runpy_test" return pkg_dir, mod_fname, mod_name def _del_pkg(self, top, depth, mod_name): for entry in list(sys.modules): if entry.startswith("__runpy_pkg__"): del sys.modules[entry] if verbose: print " Removed sys.modules entries" del sys.path[0] if verbose: print " Removed sys.path entry" for root, dirs, files in os.walk(top, topdown=False): for name in files: try: os.remove(os.path.join(root, name)) except OSError, ex: if verbose: print ex # Persist with cleaning up for name in dirs: fullname = os.path.join(root, name) try: os.rmdir(fullname) except OSError, ex: if verbose: print ex # Persist with cleaning up try: os.rmdir(top) if verbose: print " Removed package tree" except OSError, ex: if verbose: print ex # Persist with cleaning up def _check_module(self, depth): pkg_dir, mod_fname, mod_name = ( self._make_pkg("x=1\n", depth)) forget(mod_name) try: if verbose: print "Running from source:", mod_name d1 = run_module(mod_name) # Read from source self.failUnless("x" in d1) self.failUnless(d1["x"] == 1) del d1 # Ensure __loader__ entry doesn't keep file open __import__(mod_name) os.remove(mod_fname) if verbose: print "Running from compiled:", mod_name d2 = run_module(mod_name) # Read from bytecode self.failUnless("x" in d2) self.failUnless(d2["x"] == 1) del d2 # Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print "Module executed successfully" def _add_relative_modules(self, base_dir, source, depth): if depth <= 1: raise ValueError("Relative module test needs depth > 1") pkg_name = "__runpy_pkg__" module_dir = base_dir for i in range(depth): parent_dir = module_dir module_dir = os.path.join(module_dir, pkg_name) # Add sibling module sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py") sibling_file = open(sibling_fname, "w") sibling_file.close() if verbose: print " Added sibling module:", sibling_fname # Add nephew module uncle_dir = os.path.join(parent_dir, "uncle") self._add_pkg_dir(uncle_dir) if verbose: print " Added uncle package:", uncle_dir cousin_dir = os.path.join(uncle_dir, "cousin") self._add_pkg_dir(cousin_dir) if verbose: print " Added cousin package:", cousin_dir nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py") nephew_file = open(nephew_fname, "w") nephew_file.close() if verbose: print " Added nephew module:", nephew_fname def _check_relative_imports(self, depth, run_name=None): contents = r"""\ from __future__ import absolute_import from . import sibling from ..uncle.cousin import nephew """ pkg_dir, mod_fname, mod_name = ( self._make_pkg(contents, depth)) try: self._add_relative_modules(pkg_dir, contents, depth) pkg_name = mod_name.rpartition('.')[0] if verbose: print "Running from source:", mod_name d1 = run_module(mod_name, run_name=run_name) # Read from source self.failUnless("__package__" in d1) self.failUnless(d1["__package__"] == pkg_name) self.failUnless("sibling" in d1) self.failUnless("nephew" in d1) del d1 # Ensure __loader__ entry doesn't keep file open __import__(mod_name) os.remove(mod_fname) if verbose: print "Running from compiled:", mod_name d2 = run_module(mod_name, run_name=run_name) # Read from bytecode self.failUnless("__package__" in d2) self.failUnless(d2["__package__"] == pkg_name) self.failUnless("sibling" in d2) self.failUnless("nephew" in d2) del d2 # Ensure __loader__ entry doesn't keep file open finally: self._del_pkg(pkg_dir, depth, mod_name) if verbose: print "Module executed successfully" def test_run_module(self): for depth in range(4): if verbose: print "Testing package depth:", depth self._check_module(depth) def test_explicit_relative_import(self): for depth in range(2, 5): if verbose: print "Testing relative imports at depth:", depth self._check_relative_imports(depth) def test_main_relative_import(self): for depth in range(2, 5): if verbose: print "Testing main relative imports at depth:", depth self._check_relative_imports(depth, "__main__") def test_main(): run_unittest(RunModuleCodeTest) run_unittest(RunModuleTest) if __name__ == "__main__": test_main()
[]
RedisAI/benchmarks
experiments/_pytorch/_grpc_server/protofiles/imagedata_pb2.py
65b8509b81795da73f25f51941c61fbd9765914c
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: imagedata.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='imagedata.proto', package='', syntax='proto3', serialized_options=None, serialized_pb=_b('\n\x0fimagedata.proto\"H\n\tImageData\x12\r\n\x05image\x18\x01 \x01(\x0c\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\r\n\x05\x64type\x18\x04 \x01(\t\"!\n\x0fPredictionClass\x12\x0e\n\x06output\x18\x01 \x03(\x02\x32<\n\tPredictor\x12/\n\rGetPrediction\x12\n.ImageData\x1a\x10.PredictionClass\"\x00\x62\x06proto3') ) _IMAGEDATA = _descriptor.Descriptor( name='ImageData', full_name='ImageData', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='image', full_name='ImageData.image', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='height', full_name='ImageData.height', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='width', full_name='ImageData.width', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='dtype', full_name='ImageData.dtype', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=19, serialized_end=91, ) _PREDICTIONCLASS = _descriptor.Descriptor( name='PredictionClass', full_name='PredictionClass', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='output', full_name='PredictionClass.output', index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=93, serialized_end=126, ) DESCRIPTOR.message_types_by_name['ImageData'] = _IMAGEDATA DESCRIPTOR.message_types_by_name['PredictionClass'] = _PREDICTIONCLASS _sym_db.RegisterFileDescriptor(DESCRIPTOR) ImageData = _reflection.GeneratedProtocolMessageType('ImageData', (_message.Message,), dict( DESCRIPTOR = _IMAGEDATA, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:ImageData) )) _sym_db.RegisterMessage(ImageData) PredictionClass = _reflection.GeneratedProtocolMessageType('PredictionClass', (_message.Message,), dict( DESCRIPTOR = _PREDICTIONCLASS, __module__ = 'imagedata_pb2' # @@protoc_insertion_point(class_scope:PredictionClass) )) _sym_db.RegisterMessage(PredictionClass) _PREDICTOR = _descriptor.ServiceDescriptor( name='Predictor', full_name='Predictor', file=DESCRIPTOR, index=0, serialized_options=None, serialized_start=128, serialized_end=188, methods=[ _descriptor.MethodDescriptor( name='GetPrediction', full_name='Predictor.GetPrediction', index=0, containing_service=None, input_type=_IMAGEDATA, output_type=_PREDICTIONCLASS, serialized_options=None, ), ]) _sym_db.RegisterServiceDescriptor(_PREDICTOR) DESCRIPTOR.services_by_name['Predictor'] = _PREDICTOR # @@protoc_insertion_point(module_scope)
[((13, 10, 13, 36), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ({}, {}), '()', True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((43, 4, 49, 47), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((50, 4, 56, 47), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((88, 4, 94, 47), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n'), ((140, 2, 148, 3), 'google.protobuf.descriptor.MethodDescriptor', '_descriptor.MethodDescriptor', (), '', True, 'from google.protobuf import descriptor as _descriptor\n')]
akashtalole/python-flask-restful-api
app/api/admin_sales/discounted.py
475d8fd7be1724183716a197aac4257f8fbbeac4
from sqlalchemy import func from flask_rest_jsonapi import ResourceList from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from app.api.bootstrap import api from app.models import db from app.models.discount_code import DiscountCode from app.models.event import Event from app.models.order import Order, OrderTicket from app.models.user import User def sales_per_marketer_and_discount_by_status(status): return db.session.query(Event.id.label('event_id'), DiscountCode.id.label('discount_code_id'), User.id.label('marketer_id'), func.sum(Order.amount).label(status + '_sales'), func.sum(OrderTicket.quantity).label(status + '_tickets')) \ .filter(Event.id == Order.event_id) \ .filter(Order.marketer_id == User.id) \ .filter(Order.discount_code_id == DiscountCode.id) \ .filter(Order.status == status) \ .group_by(Event) \ .group_by(DiscountCode) \ .group_by(User) \ .group_by(Order.status) \ .cte() class AdminSalesDiscountedSchema(Schema): """ Discounted sales by event Provides Event name, discount code, marketer mail, count of tickets and total sales for orders grouped by status """ class Meta: type_ = 'admin-sales-discounted' self_view = 'v1.admin_sales_discounted' inflect = dasherize id = fields.String() code = fields.String() email = fields.String() event_name = fields.String() payment_currency = fields.String() sales = fields.Method('calc_sales') @staticmethod def calc_sales(obj): """ Returns sales (dictionary with total sales and ticket count) for placed, completed and pending orders """ res = {'placed': {}, 'completed': {}, 'pending': {}} res['placed']['sales_total'] = obj.placed_sales or 0 res['placed']['ticket_count'] = obj.placed_tickets or 0 res['completed']['sales_total'] = obj.completed_sales or 0 res['completed']['ticket_count'] = obj.completed_tickets or 0 res['pending']['sales_total'] = obj.pending_sales or 0 res['pending']['ticket_count'] = obj.pending_tickets or 0 return res class AdminSalesDiscountedList(ResourceList): """ Resource for sales by marketer. Joins event marketer and orders and subsequently accumulates sales by status """ def query(self, _): pending = sales_per_marketer_and_discount_by_status('pending') completed = sales_per_marketer_and_discount_by_status('completed') placed = sales_per_marketer_and_discount_by_status('placed') discounts = self.session.query(Event.id.label('event_id'), Event.name.label('event_name'), DiscountCode.id.label('discount_code_id'), DiscountCode.code.label('code'), User.id.label('marketer_id'), User.email.label('email')) \ .filter(Event.id == Order.event_id) \ .filter(Order.marketer_id == User.id) \ .filter(Order.discount_code_id == DiscountCode.id) \ .cte() return self.session.query(discounts, pending, completed, placed) \ .outerjoin(pending, (pending.c.event_id == discounts.c.event_id) & (pending.c.discount_code_id == discounts.c.discount_code_id) & (pending.c.marketer_id == discounts.c.marketer_id)) \ .outerjoin(completed, (completed.c.event_id == discounts.c.event_id) & (completed.c.discount_code_id == discounts.c.discount_code_id) & (completed.c.marketer_id == discounts.c.marketer_id)) \ .outerjoin(placed, (placed.c.event_id == discounts.c.event_id) & (placed.c.discount_code_id == discounts.c.discount_code_id) & (placed.c.marketer_id == discounts.c.marketer_id)) methods = ['GET'] decorators = (api.has_permission('is_admin'), ) schema = AdminSalesDiscountedSchema data_layer = { 'model': Event, 'session': db.session, 'methods': { 'query': query } }
[((48, 9, 48, 24), 'marshmallow_jsonapi.fields.String', 'fields.String', ({}, {}), '()', False, 'from marshmallow_jsonapi import fields\n'), ((49, 11, 49, 26), 'marshmallow_jsonapi.fields.String', 'fields.String', ({}, {}), '()', False, 'from marshmallow_jsonapi import fields\n'), ((50, 12, 50, 27), 'marshmallow_jsonapi.fields.String', 'fields.String', ({}, {}), '()', False, 'from marshmallow_jsonapi import fields\n'), ((51, 17, 51, 32), 'marshmallow_jsonapi.fields.String', 'fields.String', ({}, {}), '()', False, 'from marshmallow_jsonapi import fields\n'), ((52, 23, 52, 38), 'marshmallow_jsonapi.fields.String', 'fields.String', ({}, {}), '()', False, 'from marshmallow_jsonapi import fields\n'), ((53, 12, 53, 39), 'marshmallow_jsonapi.fields.Method', 'fields.Method', ({(53, 26, 53, 38): '"""calc_sales"""'}, {}), "('calc_sales')", False, 'from marshmallow_jsonapi import fields\n'), ((109, 18, 109, 48), 'app.api.bootstrap.api.has_permission', 'api.has_permission', ({(109, 37, 109, 47): '"""is_admin"""'}, {}), "('is_admin')", False, 'from app.api.bootstrap import api\n'), ((83, 39, 83, 65), 'app.models.event.Event.id.label', 'Event.id.label', ({(83, 54, 83, 64): '"""event_id"""'}, {}), "('event_id')", False, 'from app.models.event import Event\n'), ((84, 39, 84, 69), 'app.models.event.Event.name.label', 'Event.name.label', ({(84, 56, 84, 68): '"""event_name"""'}, {}), "('event_name')", False, 'from app.models.event import Event\n'), ((85, 39, 85, 80), 'app.models.discount_code.DiscountCode.id.label', 'DiscountCode.id.label', ({(85, 61, 85, 79): '"""discount_code_id"""'}, {}), "('discount_code_id')", False, 'from app.models.discount_code import DiscountCode\n'), ((86, 39, 86, 70), 'app.models.discount_code.DiscountCode.code.label', 'DiscountCode.code.label', ({(86, 63, 86, 69): '"""code"""'}, {}), "('code')", False, 'from app.models.discount_code import DiscountCode\n'), ((87, 39, 87, 67), 'app.models.user.User.id.label', 'User.id.label', ({(87, 53, 87, 66): '"""marketer_id"""'}, {}), "('marketer_id')", False, 'from app.models.user import User\n'), ((88, 39, 88, 64), 'app.models.user.User.email.label', 'User.email.label', ({(88, 56, 88, 63): '"""email"""'}, {}), "('email')", False, 'from app.models.user import User\n'), ((16, 28, 16, 54), 'app.models.event.Event.id.label', 'Event.id.label', ({(16, 43, 16, 53): '"""event_id"""'}, {}), "('event_id')", False, 'from app.models.event import Event\n'), ((17, 28, 17, 69), 'app.models.discount_code.DiscountCode.id.label', 'DiscountCode.id.label', ({(17, 50, 17, 68): '"""discount_code_id"""'}, {}), "('discount_code_id')", False, 'from app.models.discount_code import DiscountCode\n'), ((18, 28, 18, 56), 'app.models.user.User.id.label', 'User.id.label', ({(18, 42, 18, 55): '"""marketer_id"""'}, {}), "('marketer_id')", False, 'from app.models.user import User\n'), ((19, 28, 19, 50), 'sqlalchemy.func.sum', 'func.sum', ({(19, 37, 19, 49): 'Order.amount'}, {}), '(Order.amount)', False, 'from sqlalchemy import func\n'), ((20, 28, 20, 58), 'sqlalchemy.func.sum', 'func.sum', ({(20, 37, 20, 57): 'OrderTicket.quantity'}, {}), '(OrderTicket.quantity)', False, 'from sqlalchemy import func\n')]
g4brielvs/spaCy
spacy/lang/sr/__init__.py
cca8651fc8133172ebaa9d9fc438ed1fbf34fb33
from .stop_words import STOP_WORDS from .tokenizer_exceptions import TOKENIZER_EXCEPTIONS from .lex_attrs import LEX_ATTRS from ...language import Language class SerbianDefaults(Language.Defaults): tokenizer_exceptions = TOKENIZER_EXCEPTIONS lex_attr_getters = LEX_ATTRS stop_words = STOP_WORDS class Serbian(Language): lang = "sr" Defaults = SerbianDefaults __all__ = ["Serbian"]
[]
TJUsym/TJU_Advanced_CV_Homework
mmdet/ops/dcn/__init__.py
2d85943390e9ba53b80988e0ab8d50aef0cd17da
from .functions.deform_conv import deform_conv, modulated_deform_conv from .functions.deform_pool import deform_roi_pooling from .modules.deform_conv import (DeformConv, ModulatedDeformConv, DeformConvPack, ModulatedDeformConvPack) from .modules.deform_pool import (DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformRoIPoolingPack) __all__ = [ 'DeformConv', 'DeformConvPack', 'ModulatedDeformConv', 'ModulatedDeformConvPack', 'DeformRoIPooling', 'DeformRoIPoolingPack', 'ModulatedDeformRoIPoolingPack', 'deform_conv', 'modulated_deform_conv', 'deform_roi_pooling' ]
[]
zaubermaerchen/imas_cg_api
api/skill/serializer.py
45ebdde8c47ff4fabbf58b75721721f142afb46b
# coding: utf-8 from rest_framework import serializers from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count): self.name = name self.count = count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count', instance.count) return instance
[((7, 23, 7, 72), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', (), '', False, 'from rest_framework import serializers\n'), ((35, 11, 35, 48), 'rest_framework.serializers.CharField', 'serializers.CharField', (), '', False, 'from rest_framework import serializers\n'), ((36, 12, 36, 38), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((25, 15, 25, 60), 'data.models.SkillValue.get_value_list', 'SkillValue.get_value_list', ({(25, 41, 25, 59): 'obj.skill_value_id'}, {}), '(obj.skill_value_id)', False, 'from data.models import Skill, SkillValue\n')]
sichkar-valentyn/Image_processing_in_Python
Codes/Converting_RGB_to_GreyScale.py
43d7c979bcd742cc202a28c2dea6ea5bc87562a2
# File: Converting_RGB_to_GreyScale.py # Description: Opening RGB image as array, converting to GreyScale and saving result into new file # Environment: PyCharm and Anaconda environment # # MIT License # Copyright (c) 2018 Valentyn N Sichkar # github.com/sichkar-valentyn # # Reference to: # Valentyn N Sichkar. Image processing in Python // GitHub platform. DOI: 10.5281/zenodo.1343603 # Opening RGB image as array, converting to GreyScale and saving result into new file # Importing needed libraries import numpy as np from PIL import Image import matplotlib.pyplot as plt from skimage import color from skimage import io import scipy.misc # Creating an array from image data image_RGB = Image.open("images/eagle.jpg") image_np = np.array(image_RGB) # Checking the type of the array print(type(image_np)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_np.shape) # Showing image with every channel separately channel_R = image_np[:, :, 0] channel_G = image_np[:, :, 1] channel_B = image_np[:, :, 2] # Creating a figure with subplots f, ax = plt.subplots(nrows=2, ncols=2) # ax is (2, 2) np array and to make it easier to read we use 'flatten' function # Or we can call each time ax[0, 0] ax0, ax1, ax2, ax3 = ax.flatten() # Adjusting first subplot ax0.imshow(channel_R, cmap='Reds') ax0.set_xlabel('') ax0.set_ylabel('') ax0.set_title('Red channel') # Adjusting second subplot ax1.imshow(channel_G, cmap='Greens') ax1.set_xlabel('') ax1.set_ylabel('') ax1.set_title('Green channel') # Adjusting third subplot ax2.imshow(channel_B, cmap='Blues') ax2.set_xlabel('') ax2.set_ylabel('') ax2.set_title('Blue channel') # Adjusting fourth subplot ax3.imshow(image_np) ax3.set_xlabel('') ax3.set_ylabel('') ax3.set_title('Original image') # Function to make distance between figures plt.tight_layout() # Giving the name to the window with figure f.canvas.set_window_title('Eagle image in three channels R, G and B') # Showing the plots plt.show() # Converting RGB image into GrayScale image # Using formula: # Y' = 0.299 R + 0.587 G + 0.114 B image_RGB = Image.open("images/eagle.jpg") image_np = np.array(image_RGB) image_GreyScale = image_np[:, :, 0] * 0.299 + image_np[:, :, 1] * 0.587 + image_np[:, :, 2] * 0.114 # Checking the type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_GreyScale.shape) # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Showing the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Preparing array for saving - creating three channels with the same data in each # Firstly, creating array with zero elements # And by 'image_GreyScale.shape + tuple([3])' we add one more element '3' to the tuple # Now the shape will be (1080, 1920, 3) - which is tuple type image_GreyScale_with_3_channels = np.zeros(image_GreyScale.shape + tuple([3])) # Secondly, reshaping GreyScale image from 2D to 3D x = image_GreyScale.reshape((1080, 1920, 1)) # Finally, writing all data in three channels image_GreyScale_with_3_channels[:, :, 0] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 1] = x[:, :, 0] image_GreyScale_with_3_channels[:, :, 2] = x[:, :, 0] # Saving image into a file from obtained 3D array scipy.misc.imsave("images/result_1.jpg", image_GreyScale_with_3_channels) # Checking that image was written with three channels and they are identical result_1 = Image.open("images/result_1.jpg") result_1_np = np.array(result_1) print(result_1_np.shape) print(np.array_equal(result_1_np[:, :, 0], result_1_np[:, :, 1])) print(np.array_equal(result_1_np[:, :, 1], result_1_np[:, :, 2])) # Showing saved resulted image # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Here we don't need to specify the map like cmap='Greys' plt.imshow(result_1_np) plt.show() # Another way to convert RGB image into GreyScale image image_RGB = io.imread("images/eagle.jpg") image_GreyScale = color.rgb2gray(image_RGB) # Checking the type of the array print(type(image_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_GreyScale.shape) # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Showing the image by using obtained array plt.imshow(image_GreyScale, cmap='Greys') plt.show() # Saving converted image into a file from processed array scipy.misc.imsave("images/result_2.jpg", image_GreyScale) # One more way for converting image_RGB_as_GreyScale = io.imread("images/eagle.jpg", as_gray=True) # Checking the type of the array print(type(image_RGB_as_GreyScale)) # <class 'numpy.ndarray'> # Checking the shape of the array print(image_RGB_as_GreyScale.shape) # Giving the name to the window with figure plt.figure('GreyScaled image from RGB') # Showing the image by using obtained array plt.imshow(image_RGB_as_GreyScale, cmap='Greys') plt.show() # Saving converted image into a file from processed array scipy.misc.imsave("images/result_3.jpg", image_RGB_as_GreyScale)
[((27, 12, 27, 42), 'PIL.Image.open', 'Image.open', ({(27, 23, 27, 41): '"""images/eagle.jpg"""'}, {}), "('images/eagle.jpg')", False, 'from PIL import Image\n'), ((28, 11, 28, 30), 'numpy.array', 'np.array', ({(28, 20, 28, 29): 'image_RGB'}, {}), '(image_RGB)', True, 'import numpy as np\n'), ((41, 8, 41, 38), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((71, 0, 71, 18), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((75, 0, 75, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((81, 12, 81, 42), 'PIL.Image.open', 'Image.open', ({(81, 23, 81, 41): '"""images/eagle.jpg"""'}, {}), "('images/eagle.jpg')", False, 'from PIL import Image\n'), ((82, 11, 82, 30), 'numpy.array', 'np.array', ({(82, 20, 82, 29): 'image_RGB'}, {}), '(image_RGB)', True, 'import numpy as np\n'), ((89, 0, 89, 39), 'matplotlib.pyplot.figure', 'plt.figure', ({(89, 11, 89, 38): '"""GreyScaled image from RGB"""'}, {}), "('GreyScaled image from RGB')", True, 'import matplotlib.pyplot as plt\n'), ((91, 0, 91, 41), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((92, 0, 92, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((107, 11, 107, 44), 'PIL.Image.open', 'Image.open', ({(107, 22, 107, 43): '"""images/result_1.jpg"""'}, {}), "('images/result_1.jpg')", False, 'from PIL import Image\n'), ((108, 14, 108, 32), 'numpy.array', 'np.array', ({(108, 23, 108, 31): 'result_1'}, {}), '(result_1)', True, 'import numpy as np\n'), ((114, 0, 114, 39), 'matplotlib.pyplot.figure', 'plt.figure', ({(114, 11, 114, 38): '"""GreyScaled image from RGB"""'}, {}), "('GreyScaled image from RGB')", True, 'import matplotlib.pyplot as plt\n'), ((116, 0, 116, 23), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(116, 11, 116, 22): 'result_1_np'}, {}), '(result_1_np)', True, 'import matplotlib.pyplot as plt\n'), ((117, 0, 117, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((121, 12, 121, 41), 'skimage.io.imread', 'io.imread', ({(121, 22, 121, 40): '"""images/eagle.jpg"""'}, {}), "('images/eagle.jpg')", False, 'from skimage import io\n'), ((122, 18, 122, 43), 'skimage.color.rgb2gray', 'color.rgb2gray', ({(122, 33, 122, 42): 'image_RGB'}, {}), '(image_RGB)', False, 'from skimage import color\n'), ((128, 0, 128, 39), 'matplotlib.pyplot.figure', 'plt.figure', ({(128, 11, 128, 38): '"""GreyScaled image from RGB"""'}, {}), "('GreyScaled image from RGB')", True, 'import matplotlib.pyplot as plt\n'), ((130, 0, 130, 41), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((131, 0, 131, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((137, 25, 137, 68), 'skimage.io.imread', 'io.imread', (), '', False, 'from skimage import io\n'), ((143, 0, 143, 39), 'matplotlib.pyplot.figure', 'plt.figure', ({(143, 11, 143, 38): '"""GreyScaled image from RGB"""'}, {}), "('GreyScaled image from RGB')", True, 'import matplotlib.pyplot as plt\n'), ((145, 0, 145, 48), 'matplotlib.pyplot.imshow', 'plt.imshow', (), '', True, 'import matplotlib.pyplot as plt\n'), ((146, 0, 146, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((110, 6, 110, 64), 'numpy.array_equal', 'np.array_equal', ({(110, 21, 110, 41): 'result_1_np[:, :, (0)]', (110, 43, 110, 63): 'result_1_np[:, :, (1)]'}, {}), '(result_1_np[:, :, (0)], result_1_np[:, :, (1)])', True, 'import numpy as np\n'), ((111, 6, 111, 64), 'numpy.array_equal', 'np.array_equal', ({(111, 21, 111, 41): 'result_1_np[:, :, (1)]', (111, 43, 111, 63): 'result_1_np[:, :, (2)]'}, {}), '(result_1_np[:, :, (1)], result_1_np[:, :, (2)])', True, 'import numpy as np\n')]
hamza-gheggad/gcp-iam-collector
template_renderer.py
02b46453b9ec23af07a0d81f7250f1de61e0ee23
import colorsys import json from jinja2 import Environment, PackageLoader import graph def create_html(formatted_nodes, formatted_edges, role_color_map, output_name): env = Environment(loader=PackageLoader('visualisation', '.')) template = env.get_template('visualisation.template') default_filters = list(graph.type_properties.keys()) all_roles=list(role_color_map.keys()) print(all_roles) html = template.render(formatted_nodes=formatted_nodes, formatted_edges=formatted_edges, type_properties=graph.type_properties, default_filters=default_filters, all_roles=all_roles) with open(output_name, "w+") as resource_file: resource_file.write(html) def get_description(node): desc = node.get_type_name() + "</br>" if node.title: desc = desc + node.title + "</br>" if node.properties: for k, v in node.properties.items(): desc = desc + k + ": " + str(v) + "</br>" return desc def render(nodes, edges, output_name): color_map = roles_to_color_map(edges=edges) formatted_nodes, formatted_edges = format_graph(nodes, edges, color_map) create_html(formatted_nodes, formatted_edges, color_map, output_name) def color_for_role(role, all_roles): hue = float(all_roles.index(role)) / len(all_roles) return '#%02x%02x%02x' % tuple(int(c) * 255 for c in colorsys.hsv_to_rgb(hue, 1, 0.85)) def sanitise_role(role): return str(role).replace('roles/', '') \ .lower() \ .replace('writer', 'editor') \ .replace('reader', 'viewer') def roles_to_color_map(edges): all_roles = list({sanitise_role(e.role) for e in edges if e.role}) role_map = {} for role in all_roles: role_map[role] = color_for_role(role, all_roles) role_map['other'] = '#00c0ff' return role_map def format_graph(nodes, edges, role_color_map): nodes_list = [] node_ids = {} for counter, node in enumerate(nodes): node_ids[node.id] = counter value = { 'id': counter, 'shape': 'icon', 'label': node.name, 'type': node.node_type, 'icon': { 'face': 'Font Awesome 5 Free', 'code': node.get_font_code(), 'size': node.get_size(), 'color': node.get_color(), 'weight': 'bold' } } description = get_description(node) if description: value['title'] = description nodes_list.append(json.dumps(value).replace("\\\\", "\\")) edges_list = [] for edge in edges: value = { 'from': node_ids[edge.node_from.id], 'to': node_ids[edge.node_to.id], 'arrows': 'to', } if edge.label: value['label'] = edge.label if edge.title: value['title'] = edge.title value['role'] = sanitise_role(edge.role) if edge.role else 'other' value['color'] = role_color_map[value['role']] edges_list.append(json.dumps(value)) return nodes_list, edges_list
[((11, 27, 11, 55), 'graph.type_properties.keys', 'graph.type_properties.keys', ({}, {}), '()', False, 'import graph\n'), ((9, 29, 9, 64), 'jinja2.PackageLoader', 'PackageLoader', ({(9, 43, 9, 58): '"""visualisation"""', (9, 60, 9, 63): '"""."""'}, {}), "('visualisation', '.')", False, 'from jinja2 import Environment, PackageLoader\n'), ((101, 26, 101, 43), 'json.dumps', 'json.dumps', ({(101, 37, 101, 42): 'value'}, {}), '(value)', False, 'import json\n'), ((44, 57, 44, 90), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', ({(44, 77, 44, 80): 'hue', (44, 82, 44, 83): '(1)', (44, 85, 44, 89): '(0.85)'}, {}), '(hue, 1, 0.85)', False, 'import colorsys\n'), ((85, 26, 85, 43), 'json.dumps', 'json.dumps', ({(85, 37, 85, 42): 'value'}, {}), '(value)', False, 'import json\n')]
danglotb/powerapi
powerapi/cli/tools.py
67b2508588bfe1e20d90f9fe6bccda34d3455262
# Copyright (c) 2018, INRIA # Copyright (c) 2018, University of Lille # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys import logging from functools import reduce from powerapi.exception import PowerAPIException from powerapi.cli.parser import MainParser, ComponentSubParser from powerapi.cli.parser import store_true from powerapi.cli.parser import BadValueException, MissingValueException from powerapi.cli.parser import BadTypeException, BadContextException from powerapi.cli.parser import UnknowArgException from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB from powerapi.puller import PullerActor from powerapi.pusher import PusherActor def enable_log(arg, val, args, acc): acc[arg] = logging.DEBUG return args, acc def check_csv_files(files): return reduce(lambda acc, f: acc and os.access(f, os.R_OK), files.split(','), True) def extract_file_names(arg, val, args, acc): acc[arg] = val.split(',') return args, acc class CommonCLIParser(MainParser): def __init__(self): MainParser.__init__(self) self.add_argument('v', 'verbose', flag=True, action=enable_log, default=logging.NOTSET, help='enable verbose mode') self.add_argument('s', 'stream', flag=True, action=store_true, default=False, help='enable stream mode') subparser_mongo_input = ComponentSubParser('mongodb') subparser_mongo_input.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_input.add_argument('d', 'db', help='specify MongoDB database name', ) subparser_mongo_input.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_input.add_argument('n', 'name', help='specify puller name', default='puller_mongodb') subparser_mongo_input.add_argument('m', 'model', help='specify data type that will be storen in the database', default='HWPCReport') self.add_component_subparser('input', subparser_mongo_input, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_csv_input = ComponentSubParser('csv') subparser_csv_input.add_argument('f', 'files', help='specify input csv files with this format : file1,file2,file3', action=extract_file_names, default=[], check=check_csv_files, check_msg='one or more csv files couldn\'t be read') subparser_csv_input.add_argument('m', 'model', help='specify data type that will be storen in the database', default='HWPCReport') subparser_csv_input.add_argument('n', 'name', help='specify puller name', default='puller_csv') self.add_component_subparser('input', subparser_csv_input, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_mongo_output = ComponentSubParser('mongodb') subparser_mongo_output.add_argument('u', 'uri', help='sepcify MongoDB uri') subparser_mongo_output.add_argument('d', 'db', help='specify MongoDB database name') subparser_mongo_output.add_argument('c', 'collection', help='specify MongoDB database collection') subparser_mongo_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_mongo_output.add_argument('n', 'name', help='specify puller name', default='pusher_mongodb') self.add_component_subparser('output', subparser_mongo_output, help_str='specify a database output : --db_output database_name ARG1 ARG2 ...') subparser_csv_output = ComponentSubParser('csv') subparser_csv_output.add_argument('d', 'directory', help='specify directory where where output csv files will be writen') subparser_csv_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_csv_output.add_argument('n', 'name', help='specify puller name', default='pusher_csv') self.add_component_subparser('output', subparser_csv_output, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_influx_output = ComponentSubParser('influxdb') subparser_influx_output.add_argument('u', 'uri', help='sepcify InfluxDB uri') subparser_influx_output.add_argument('d', 'db', help='specify InfluxDB database name') subparser_influx_output.add_argument('p', 'port', help='specify InfluxDB connection port', type=int) subparser_influx_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_influx_output.add_argument('n', 'name', help='specify puller name', default='pusher_influxdb') self.add_component_subparser('output', subparser_influx_output, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') subparser_opentsdb_output = ComponentSubParser('opentsdb') subparser_opentsdb_output.add_argument('u', 'uri', help='sepcify openTSDB host') subparser_opentsdb_output.add_argument('p', 'port', help='specify openTSDB connection port', type=int) subparser_opentsdb_output.add_argument('metric_name', help='specify metric name') subparser_opentsdb_output.add_argument('m', 'model', help='specify data type that will be storen in the database', default='PowerReport') subparser_opentsdb_output.add_argument('n', 'name', help='specify puller name', default='pusher_opentsdb') self.add_component_subparser('output', subparser_opentsdb_output, help_str='specify a database input : --db_output database_name ARG1 ARG2 ... ') def parse_argv(self): try: return self.parse(sys.argv[1:]) except BadValueException as exn: msg = 'CLI error : argument ' + exn.argument_name + ' : ' + exn.msg print(msg, file=sys.stderr) except MissingValueException as exn: msg = 'CLI error : argument ' + exn.argument_name + ' : expect a value' print(msg, file=sys.stderr) except BadTypeException as exn: msg = 'CLI error : argument ' + exn.argument_name + ' : expect ' msg += exn.article + ' ' + exn.type_name print(msg, file=sys.stderr) except UnknowArgException as exn: msg = 'CLI error : unknow argument ' + exn.argument_name print(msg, file=sys.stderr) except BadContextException as exn: msg = 'CLI error : argument ' + exn.argument_name msg += ' not used in the correct context\nUse it with the following arguments :' for main_arg_name, context_name in exn.context_list: msg += '\n --' + main_arg_name + ' ' + context_name print(msg, file=sys.stderr) sys.exit() class Generator: def __init__(self, component_group_name): self.component_group_name = component_group_name def generate(self, config): if self.component_group_name not in config: print('CLI error : no ' + self.component_group_name + ' specified', file=sys.stderr) sys.exit() actors = {} for component_type, components_list in config[self.component_group_name].items(): for component_name, component_config in components_list.items(): try: actors[component_name] = self._gen_actor(component_type, component_config, config) except KeyError as exn: msg = 'CLI error : argument ' + exn.args[0] msg += ' needed with --output ' + component_type print(msg, file=sys.stderr) sys.exit() return actors def _gen_actor(self, component_name, component_config, main_config): raise NotImplementedError() class ModelNameAlreadyUsed(PowerAPIException): """ Exception raised when attempting to add to a DBActorGenerator a model factory with a name already bound to another model factory in the DBActorGenerator """ class ModelNameAlreadyUsed(PowerAPIException): """ Exception raised when attempting to add to a DBActorGenerator a database factory with a name already bound to another database factory in the DBActorGenerator """ class DBActorGenerator(Generator): def __init__(self, component_group_name): Generator.__init__(self, component_group_name) self.model_factory = { 'HWPCReport': HWPCModel(), 'PowerReport': PowerModel(), 'FormulaReport': FormulaModel(), 'ControlReport': ControlModel(), } self.db_factory = { 'mongodb': lambda db_config: MongoDB(db_config['uri'], db_config['db'], db_config['collection']), 'csv': lambda db_config: CsvDB(current_path=os.getcwd() if 'directory' not in db_config else db_config['directory'], files=[] if 'files' not in db_config else db_config['files']), 'influxdb': lambda db_config: InfluxDB(db_config['uri'], db_config['port'], db_config['db']), 'opentsdb': lambda db_config: OpenTSDB(db_config['uri'], db_config['port'], db_config['metric_name']), } def add_model_factory(self, model_name, model_factory): if model_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[model_name] = model_factory def add_db_factory(self, db_name, db_factory): if db_name in self.model_factory: raise ModelNameAlreadyUsed() self.model_factory[db_name] = db_factory def _generate_db(self, db_name, db_config, main_config): return self.db_factory[db_name](db_config) def _gen_actor(self, db_name, db_config, main_config): db = self._generate_db(db_name, db_config, main_config) model = self.model_factory[db_config['model']] name = db_config['name'] return self._actor_factory(name, db, model, main_config['stream'], main_config['verbose']) def _actor_factory(self, name, db, model, stream_mode, level_logger): raise NotImplementedError() class PullerGenerator(DBActorGenerator): def __init__(self, report_filter): DBActorGenerator.__init__(self, 'input') self.report_filter = report_filter def _actor_factory(self, name, db, model, stream_mode, level_logger): return PullerActor(name, db, self.report_filter, model, stream_mode, level_logger) class PusherGenerator(DBActorGenerator): def __init__(self): DBActorGenerator.__init__(self, 'output') def _actor_factory(self, name, db, model, stream_mode, level_logger): return PusherActor(name, model, db, level_logger)
[((63, 8, 63, 33), 'powerapi.cli.parser.MainParser.__init__', 'MainParser.__init__', ({(63, 28, 63, 32): 'self'}, {}), '(self)', False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((69, 32, 69, 61), 'powerapi.cli.parser.ComponentSubParser', 'ComponentSubParser', ({(69, 51, 69, 60): '"""mongodb"""'}, {}), "('mongodb')", False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((79, 30, 79, 55), 'powerapi.cli.parser.ComponentSubParser', 'ComponentSubParser', ({(79, 49, 79, 54): '"""csv"""'}, {}), "('csv')", False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((90, 33, 90, 62), 'powerapi.cli.parser.ComponentSubParser', 'ComponentSubParser', ({(90, 52, 90, 61): '"""mongodb"""'}, {}), "('mongodb')", False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((101, 31, 101, 56), 'powerapi.cli.parser.ComponentSubParser', 'ComponentSubParser', ({(101, 50, 101, 55): '"""csv"""'}, {}), "('csv')", False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((110, 34, 110, 64), 'powerapi.cli.parser.ComponentSubParser', 'ComponentSubParser', ({(110, 53, 110, 63): '"""influxdb"""'}, {}), "('influxdb')", False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((120, 36, 120, 66), 'powerapi.cli.parser.ComponentSubParser', 'ComponentSubParser', ({(120, 55, 120, 65): '"""opentsdb"""'}, {}), "('opentsdb')", False, 'from powerapi.cli.parser import MainParser, ComponentSubParser\n'), ((159, 8, 159, 18), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((254, 15, 254, 90), 'powerapi.puller.PullerActor', 'PullerActor', ({(254, 27, 254, 31): 'name', (254, 33, 254, 35): 'db', (254, 37, 254, 55): 'self.report_filter', (254, 57, 254, 62): 'model', (254, 64, 254, 75): 'stream_mode', (254, 77, 254, 89): 'level_logger'}, {}), '(name, db, self.report_filter, model, stream_mode, level_logger)', False, 'from powerapi.puller import PullerActor\n'), ((263, 15, 263, 57), 'powerapi.pusher.PusherActor', 'PusherActor', ({(263, 27, 263, 31): 'name', (263, 33, 263, 38): 'model', (263, 40, 263, 42): 'db', (263, 44, 263, 56): 'level_logger'}, {}), '(name, model, db, level_logger)', False, 'from powerapi.pusher import PusherActor\n'), ((170, 12, 170, 22), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((209, 26, 209, 37), 'powerapi.report_model.HWPCModel', 'HWPCModel', ({}, {}), '()', False, 'from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel\n'), ((210, 27, 210, 39), 'powerapi.report_model.PowerModel', 'PowerModel', ({}, {}), '()', False, 'from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel\n'), ((211, 29, 211, 43), 'powerapi.report_model.FormulaModel', 'FormulaModel', ({}, {}), '()', False, 'from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel\n'), ((212, 29, 212, 43), 'powerapi.report_model.ControlModel', 'ControlModel', ({}, {}), '()', False, 'from powerapi.report_model import HWPCModel, PowerModel, FormulaModel, ControlModel\n'), ((52, 41, 52, 62), 'os.access', 'os.access', ({(52, 51, 52, 52): 'f', (52, 54, 52, 61): 'os.R_OK'}, {}), '(f, os.R_OK)', False, 'import os\n'), ((216, 41, 216, 108), 'powerapi.database.MongoDB', 'MongoDB', ({(216, 49, 216, 65): "db_config['uri']", (216, 67, 216, 82): "db_config['db']", (216, 84, 216, 107): "db_config['collection']"}, {}), "(db_config['uri'], db_config['db'], db_config['collection'])", False, 'from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB\n'), ((219, 42, 219, 104), 'powerapi.database.InfluxDB', 'InfluxDB', ({(219, 51, 219, 67): "db_config['uri']", (219, 69, 219, 86): "db_config['port']", (219, 88, 219, 103): "db_config['db']"}, {}), "(db_config['uri'], db_config['port'], db_config['db'])", False, 'from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB\n'), ((220, 42, 220, 113), 'powerapi.database.OpenTSDB', 'OpenTSDB', ({(220, 51, 220, 67): "db_config['uri']", (220, 69, 220, 86): "db_config['port']", (220, 88, 220, 112): "db_config['metric_name']"}, {}), "(db_config['uri'], db_config['port'], db_config['metric_name'])", False, 'from powerapi.database import MongoDB, CsvDB, InfluxDB, OpenTSDB\n'), ((182, 20, 182, 30), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((217, 56, 217, 67), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')]
PyXRD/pyxrd
pyxrd/mixture/models/insitu_behaviours/insitu_behaviour.py
26bacdf64f3153fa74b8caa62e219b76d91a55c1
# coding=UTF-8 # ex:ts=4:sw=4:et=on # # Copyright (c) 2013, Mathijs Dumon # All rights reserved. # Complete license can be found in the LICENSE file. from mvc.models.properties import StringProperty from pyxrd.generic.io.custom_io import storables, Storable from pyxrd.generic.models.base import DataModel from pyxrd.refinement.refinables.mixins import RefinementGroup @storables.register() class InSituBehaviour(DataModel, RefinementGroup, Storable): """ Interface class for coding in-situ behaviour scripts. Sub-classes should override or implement the methods below. """ # MODEL INTEL: class Meta(DataModel.Meta): store_id = "InSituBehaviour" # Override this so it is a unique string concrete = False # Indicates this cannot be instantiated and added in the UI mixture = property(DataModel.parent.fget, DataModel.parent.fset) # REFINEMENT GROUP IMPLEMENTATION: @property def refine_title(self): return "In-situ behaviour" @property def refine_descriptor_data(self): return dict( phase_name=self.phase.refine_title, component_name="*" ) #: The name of this Behaviour name = StringProperty( default="New Behaviour", text="Name", visible=True, persistent=True, tabular=True ) # ------------------------------------------------------------ # Initialization and other internals # ------------------------------------------------------------ def __init__(self, *args, **kwargs): my_kwargs = self.pop_kwargs(kwargs, *[prop.label for prop in InSituBehaviour.Meta.get_local_persistent_properties()] ) super(InSituBehaviour, self).__init__(*args, **kwargs) kwargs = my_kwargs with self.data_changed.hold(): self.name = self.get_kwarg(kwargs, self.name, "name") pass #end of constructor # ------------------------------------------------------------ # Methods & Functions # ------------------------------------------------------------ def apply(self, phase): assert phase is not None, "Cannot apply on None" assert self.is_compatible_with(phase), "`%r` is not compatible with phase `%r`" % (self, phase) def is_compatible_with(self, phase): return False # sub classes need to override this pass #end of class
[((15, 1, 15, 21), 'pyxrd.generic.io.custom_io.storables.register', 'storables.register', ({}, {}), '()', False, 'from pyxrd.generic.io.custom_io import storables, Storable\n'), ((42, 11, 45, 5), 'mvc.models.properties.StringProperty', 'StringProperty', (), '', False, 'from mvc.models.properties import StringProperty\n')]
vishalbelsare/SLAPP3
1 plainProgrammingBug/start 1 plainProgrammingBug.py
da187b771831aaaabaee16a26ad341db2e968104
# start 1 plainProgrammingBug.py import random def SimpleBug(): # the environment worldXSize = 80 worldYSize = 80 # the bug xPos = 40 yPos = 40 # the action for i in range(100): xPos += randomMove() yPos += randomMove() xPos = (xPos + worldXSize) % worldXSize yPos = (yPos + worldYSize) % worldYSize print ("I moved to X = ", xPos, " Y = ", yPos) # returns -1, 0, 1 with equal probability def randomMove(): return random.randint(-1, 1) SimpleBug() """ you can eliminate the randomMove() function substituting xPos += randomMove() yPos += randomMove() with xPos += random.randint(-1, 1) yPos += random.randint(-1, 1) but the use of the function allows us to use here a self-explanatory name """
[((27, 11, 27, 32), 'random.randint', 'random.randint', ({(27, 26, 27, 28): '(-1)', (27, 30, 27, 31): '(1)'}, {}), '(-1, 1)', False, 'import random\n')]
kjco/bioinformatics-algorithms
ba5a-min-coins/money_change.py
3c466157b89c1cbd54749563e39d86a307d7a3f3
money = 8074 #money = 18705 #coin_list = [24,23,21,5,3,1] coin_list = [24,13,12,7,5,3,1] #coin_list = map(int, open('dataset_71_8.txt').read().split(',')) d = {0:0} for m in range(1,money+1): min_coin = 1000000 for coin in coin_list: if m >= coin: if d[m-coin]+1 < min_coin: min_coin = d[m-coin]+1 d[m] = min_coin #print d print d[money]
[]
igordejanovic/textx-bibtex
examples/remove_comments.py
b1374a39b96da9c1bc979c367b9ed3feb04f4f01
""" Remove comments from bib file. """ from textx import metamodel_for_language from txbibtex import bibentry_str BIB_FILE = 'references.bib' bibfile = metamodel_for_language('bibtex').model_from_file(BIB_FILE) # Drop line comments. print('\n'.join([bibentry_str(e) for e in bibfile.entries if e.__class__.__name__ != 'BibLineComment']))
[((8, 10, 8, 42), 'textx.metamodel_for_language', 'metamodel_for_language', ({(8, 33, 8, 41): '"""bibtex"""'}, {}), "('bibtex')", False, 'from textx import metamodel_for_language\n'), ((11, 17, 11, 32), 'txbibtex.bibentry_str', 'bibentry_str', ({(11, 30, 11, 31): 'e'}, {}), '(e)', False, 'from txbibtex import bibentry_str\n')]
bopopescu/Social-Lite
google-cloud-sdk/lib/surface/compute/resource_policies/create/group_placement.py
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Create resource policy command.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from googlecloudsdk.api_lib.compute import base_classes from googlecloudsdk.api_lib.compute import utils as compute_api from googlecloudsdk.api_lib.util import apis from googlecloudsdk.calliope import base from googlecloudsdk.command_lib.compute import flags as compute_flags from googlecloudsdk.command_lib.compute.resource_policies import flags from googlecloudsdk.command_lib.compute.resource_policies import util def _CommonArgs(parser, api_version): """A helper function to build args based on different API version.""" messages = apis.GetMessagesModule('compute', api_version) flags.MakeResourcePolicyArg().AddArgument(parser) flags.AddCommonArgs(parser) flags.AddGroupPlacementArgs(parser, messages) parser.display_info.AddCacheUpdater(None) @base.ReleaseTracks(base.ReleaseTrack.ALPHA) class CreateGroupPlacement(base.CreateCommand): """Create a Google Compute Engine Group Placement Resource Policy.""" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_ALPHA_API_VERSION) def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client policy_ref = flags.MakeResourcePolicyArg().ResolveAsResource( args, holder.resources, scope_lister=compute_flags.GetDefaultScopeLister(holder.client)) messages = holder.client.messages resource_policy = util.MakeGroupPlacementPolicy(policy_ref, args, messages) create_request = messages.ComputeResourcePoliciesInsertRequest( resourcePolicy=resource_policy, project=policy_ref.project, region=policy_ref.region) service = holder.client.apitools_client.resourcePolicies return client.MakeRequests([(service, 'Insert', create_request)])[0] @base.ReleaseTracks(base.ReleaseTrack.BETA) class CreateGroupPlacementBeta(CreateGroupPlacement): """Create a Google Compute Engine Group Placement Resource Policy.""" @staticmethod def Args(parser): _CommonArgs(parser, api_version=compute_api.COMPUTE_BETA_API_VERSION) CreateGroupPlacement.detailed_help = { 'DESCRIPTION': """\ Create a Google Compute Engine Group Placement Resource Policy. """, 'EXAMPLES': """\ To create a Google Compute Engine Group Placement Resource policy with 2 VMs and 2 availability domains, run: $ {command} my-resource-policy --region=REGION --vm-count=2 --availability-domain-count=2 """ }
[((39, 1, 39, 44), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', ({(39, 20, 39, 43): 'base.ReleaseTrack.ALPHA'}, {}), '(base.ReleaseTrack.ALPHA)', False, 'from googlecloudsdk.calliope import base\n'), ((67, 1, 67, 43), 'googlecloudsdk.calliope.base.ReleaseTracks', 'base.ReleaseTracks', ({(67, 20, 67, 42): 'base.ReleaseTrack.BETA'}, {}), '(base.ReleaseTrack.BETA)', False, 'from googlecloudsdk.calliope import base\n'), ((32, 13, 32, 59), 'googlecloudsdk.api_lib.util.apis.GetMessagesModule', 'apis.GetMessagesModule', ({(32, 36, 32, 45): '"""compute"""', (32, 47, 32, 58): 'api_version'}, {}), "('compute', api_version)", False, 'from googlecloudsdk.api_lib.util import apis\n'), ((34, 2, 34, 29), 'googlecloudsdk.command_lib.compute.resource_policies.flags.AddCommonArgs', 'flags.AddCommonArgs', ({(34, 22, 34, 28): 'parser'}, {}), '(parser)', False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((35, 2, 35, 47), 'googlecloudsdk.command_lib.compute.resource_policies.flags.AddGroupPlacementArgs', 'flags.AddGroupPlacementArgs', ({(35, 30, 35, 36): 'parser', (35, 38, 35, 46): 'messages'}, {}), '(parser, messages)', False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((57, 22, 57, 79), 'googlecloudsdk.command_lib.compute.resource_policies.util.MakeGroupPlacementPolicy', 'util.MakeGroupPlacementPolicy', ({(57, 52, 57, 62): 'policy_ref', (57, 64, 57, 68): 'args', (57, 70, 57, 78): 'messages'}, {}), '(policy_ref, args, messages)', False, 'from googlecloudsdk.command_lib.compute.resource_policies import util\n'), ((33, 2, 33, 31), 'googlecloudsdk.command_lib.compute.resource_policies.flags.MakeResourcePolicyArg', 'flags.MakeResourcePolicyArg', ({}, {}), '()', False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((51, 17, 51, 46), 'googlecloudsdk.command_lib.compute.resource_policies.flags.MakeResourcePolicyArg', 'flags.MakeResourcePolicyArg', ({}, {}), '()', False, 'from googlecloudsdk.command_lib.compute.resource_policies import flags\n'), ((54, 21, 54, 71), 'googlecloudsdk.command_lib.compute.flags.GetDefaultScopeLister', 'compute_flags.GetDefaultScopeLister', ({(54, 57, 54, 70): 'holder.client'}, {}), '(holder.client)', True, 'from googlecloudsdk.command_lib.compute import flags as compute_flags\n')]
notoraptor/paperoni
paperoni/io.py
acdf2d3d790b98d6a171177ffd9d6342f86bc7ea
import json from .papers import Papers from .researchers import Researchers def ResearchersFile(filename): """Parse a file containing researchers.""" try: with open(filename, "r") as file: data = json.load(file) except FileNotFoundError: data = {} return Researchers(data, filename=filename) def PapersFile(filename, researchers=None): """Parse a file containing papers.""" try: with open(filename, "r") as file: data = json.load(file) except FileNotFoundError: data = {} return Papers(data, filename=filename, researchers=researchers)
[((11, 19, 11, 34), 'json.load', 'json.load', ({(11, 29, 11, 33): 'file'}, {}), '(file)', False, 'import json\n'), ((21, 19, 21, 34), 'json.load', 'json.load', ({(21, 29, 21, 33): 'file'}, {}), '(file)', False, 'import json\n')]
zachkont/sd2
src/lib/sd2/test_addresses.py
92d8c55a8c7ac51c00ba514be01955aa7162e4ef
############################################################################# # Copyright (c) 2017 SiteWare Corp. All right reserved ############################################################################# import logging import pytest from . import addresses def test_pytest(): assert True def test_object_exists(): assert addresses.cidr_db def test_new_address(): address = addresses.cidr_db.get_address_for_host('test_test_foo') assert address assert address >= addresses.cidr_db.first_address() assert address <= addresses.cidr_db.last_address() addresses.cidr_db.reload() assert addresses.cidr_db.get_address_for_host('test_test_foo') == address assert addresses.cidr_db.has('test_test_foo') addresses.cidr_db.forget('test_test_foo') assert not addresses.cidr_db.has('test_test_foo') addresses.cidr_db.reload() assert not addresses.cidr_db.has('test_test_foo')
[]
Asha-ai/BERT_abstractive_proj
config_model.py
f0e8f659d6b8821cfe0d15f4075e8cb890efdfe9
import texar.tf as tx beam_width = 5 hidden_dim = 768 bert = { 'pretrained_model_name': 'bert-base-uncased' } # See https://texar.readthedocs.io/en/latest/code/modules.html#texar.tf.modules.BERTEncoder.default_hparams bert_encoder = {} # From https://github.com/asyml/texar/blob/413e07f859acbbee979f274b52942edd57b335c1/examples/transformer/config_model.py#L27-L45 # with adjustments for BERT decoder = { 'dim': hidden_dim, 'num_blocks': 6, 'multihead_attention': { 'num_heads': 8, 'output_dim': hidden_dim }, 'initializer': { 'type': 'variance_scaling_initializer', 'kwargs': { 'scale': 1.0, 'mode': 'fan_avg', 'distribution': 'uniform', }, }, 'poswise_feedforward': tx.modules.default_transformer_poswise_net_hparams(output_dim=hidden_dim) } loss_label_confidence = 0.9 opt = { 'optimizer': { 'type': 'AdamOptimizer', 'kwargs': { 'beta1': 0.9, 'beta2': 0.997, 'epsilon': 1e-9 } } } lr = { # The 'learning_rate_schedule' can have the following 3 values: # - 'static' -> A simple static learning rate, specified by 'static_lr' # - 'aiayn' -> The learning rate used in the "Attention is all you need" paper. # - 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' -> The learning rate for Texar's Transformer example 'learning_rate_schedule': 'aiayn', # The learning rate constant used for the 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'lr_constant': 2 * (hidden_dim ** -0.5), # The warmup steps for the 'aiayn' and 'constant.linear_warmup.rsqrt_decay.rsqrt_depth' learning rate 'warmup_steps': 4000, # The static learning rate, when 'static' is used. 'static_lr': 1e-3, # A multiplier that can be applied to the 'aiayn' learning rate. 'aiayn_multiplier': 0.2 }
[((30, 27, 30, 100), 'texar.tf.modules.default_transformer_poswise_net_hparams', 'tx.modules.default_transformer_poswise_net_hparams', (), '', True, 'import texar.tf as tx\n')]
e-elson/bd
wishes/migrations/0005_auto_20201029_0904.py
e35c59686e5ec81925c22353e269601f286634db
# Generated by Django 3.1.2 on 2020-10-29 09:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('wishes', '0004_auto_20201029_0857'), ] operations = [ migrations.AlterField( model_name='gallery', name='image', field=models.FilePathField(path='/images'), ), ]
[((16, 18, 16, 54), 'django.db.models.FilePathField', 'models.FilePathField', (), '', False, 'from django.db import migrations, models\n')]
NASA-DEVELOP/dnppy
undeployed/legacy/Landsat/DNtoReflectance.py
8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b
#------------------------------------------------------------------------------- # Name: Landsat Digital Numbers to Radiance/Reflectance # Purpose: To convert landsat 4,5, or 7 pixel values from digital numbers # to Radiance, Reflectance, or Temperature # Author: Quinten Geddes [email protected] # NASA DEVELOP Program # Created: 19/10/2012 #------------------------------------------------------------------------------- import arcpy import math arcpy.CheckOutExtension("Spatial") def DNtoReflectance(Lbands,MetaData,OutputType="Reflectance/Temperature",Save=False,OutputFolder=""): """This function is used to convert Landsat 4,5, or 7 pixel values from digital numbers to Radiance, Reflectance, or Temperature (if using Band 6) -----Inputs------ Lbands: GeoTIFF files containing individual bands of Landsat imagery. These must have the original names as downloaded and must be from a single scene. MetaData: The metadata text file that is downloaded with the Landsat Bands themselves. This may be either the old or new MTL.txt file. OutputType: Choose whether the output should be: "Radiance" "Reflectance/Temperature" - Calculates Reflectance for spectral bands and Temperature in Kelvin for Thermal bands Save: Boolean value that indicates whether the output rasters will be saved permanantly Each band will be saved as an individual GeoTIFF file and be named accoriding to the original filename and the output pixel unit *if this is true, then the OutputFolder variable must also be set OutputFolder: Folder in which to save the output rasters -----Outputs----- A list of arcpy raster objects in a sequence that mirrors that of the input Lbands """ OutList=[] #These lists will be used to parse the meta data text file and locate relevant information #metadata format was changed August 29, 2012. This tool can process either the new or old format newMeta=['LANDSAT_SCENE_ID = "','DATE_ACQUIRED = ',"SUN_ELEVATION = ", "RADIANCE_MAXIMUM_BAND_{0} = ","RADIANCE_MINIMUM_BAND_{0} = ", "QUANTIZE_CAL_MAX_BAND_{0} = ","QUANTIZE_CAL_MIN_BAND_{0} = "] oldMeta=['BAND1_FILE_NAME = "',"ACQUISITION_DATE = ","SUN_ELEVATION = ", "LMAX_BAND{0} = ","LMIN_BAND{0} = ", "QCALMAX_BAND{0} = ","QCALMIN_BAND{0} = "] f=open(MetaData) MText=f.read() #the presence of a PRODUCT_CREATION_TIME category is used to identify old metadata #if this is not present, the meta data is considered new. #Band6length refers to the length of the Band 6 name string. In the new metadata this string is longer if "PRODUCT_CREATION_TIME" in MText: Meta=oldMeta Band6length=2 else: Meta=newMeta Band6length=8 #The tilename is located using the newMeta/oldMeta indixes and the date of capture is recorded if Meta==newMeta: TileName=MText.split(Meta[0])[1].split('"')[0] year=TileName[9:13] jday=TileName[13:16] elif Meta==oldMeta: TileName=MText.split(Meta[0])[1].split('"')[0] year=TileName[13:17] jday=TileName[17:20] date=MText.split(Meta[1])[1].split('\n')[0] #the spacecraft from which the imagery was capture is identified #this info determines the solar exoatmospheric irradiance (ESun) for each band spacecraft=MText.split('SPACECRAFT_ID = "')[1].split('"')[0] ThermBands=["6"] if "7" in spacecraft: ESun=(1969.0,1840.0,1551.0,1044.0,255.700,0. ,82.07,1368.00) ThermBands=["B6_VCID_1","B6_VCID_2"] elif "5" in spacecraft: ESun=(1957.0,1826.0,1554.0,1036.0,215.0 ,0. ,80.67) elif "4" in spacecraft: ESun=(1957.0,1825.0,1557.0,1033.0,214.9 ,0. ,80.72) elif "8" in spacecraft: ESun=(1857.0,1996.0,1812.0,1516.0,983.3 ,251.8,85.24,0.0,389.3,0.,0.) ThermBands=["10","11"] else: arcpy.AddError("This tool only works for Landsat 4, 5, 7 or 8 ") raise arcpy.ExecuteError() #determing if year is leap year and setting the Days in year accordingly if float(year) % 4 ==0: DIY=366. else:DIY=365. #using the date to determing the distance from the sun theta =2*math.pi*float(jday)/DIY dSun2 = (1.00011 + 0.034221*math.cos(theta) + 0.001280*math.sin(theta) + 0.000719*math.cos(2*theta)+ 0.000077*math.sin(2*theta) ) SZA=90.-float(MText.split(Meta[2])[1].split("\n")[0]) #Calculating values for each band for pathname in Lbands: try: BandNum=pathname.split("\\")[-1].split("B")[1][0:2] try: int(BandNum) except: BandNum=pathname.split("\\")[-1].split("B")[1][0] except: msg="Error reading Band {0}. Bands must have original names as downloaded.".format(str(inputbandnum)) arcpy.AddError(msg) print msg raise arcpy.ExecuteError #changing Band 6 name to match metadata if BandNum=="6" and spacecraft[8]=="7": BandNum=pathname.split("\\")[-1].split("B")[1][0:Band6length] print "Processing Band {0}".format(BandNum) Oraster=arcpy.Raster(pathname) #using the oldMeta/newMeta indixes to pull the min/max for radiance/Digital numbers LMax= float(MText.split(Meta[3].format(BandNum))[1].split("\n")[0]) LMin= float(MText.split(Meta[4].format(BandNum))[1].split("\n")[0]) QCalMax=float(MText.split(Meta[5].format(BandNum))[1].split("\n")[0]) QCalMin=float(MText.split(Meta[6].format(BandNum))[1].split("\n")[0]) Radraster=(((LMax - LMin)/(QCalMax-QCalMin)) * (Oraster - QCalMin)) +LMin Oraster=0 if OutputType=="Radiance": Radraster.save("{0}\\{1}_B{2}_Radiance.tif".format(OutputFolder,TileName,BandNum)) Radraster=0 elif OutputType=="Reflectance/Temperature": #Calculating temperature for band 6 if present if BandNum in ThermBands: Refraster=1282.71/(arcpy.sa.Ln((666.09/Radraster)+1.0)) BandPath="{0}\\{1}_B{2}_Temperature.tif".format(OutputFolder,TileName,BandNum) arcpy.AddMessage("Proceeded through if") #Otherwise calculate reflectance else: Refraster=( math.pi * Radraster * dSun2) / (ESun[int(BandNum[0])-1] * math.cos(SZA*math.pi/180) ) BandPath="{0}\\{1}_B{2}_TOA_Reflectance.tif".format(OutputFolder,TileName,BandNum) arcpy.AddMessage("Proceeded through else") if Save==True: Refraster.save(BandPath) OutList.append(arcpy.Raster(BandPath)) else: OutList.append(Refraster) del Refraster,Radraster arcpy.AddMessage( "Reflectance Calculated for Band {0}".format(BandNum)) print "Reflectance Calculated for Band {0}".format(BandNum) f.close() return OutList
[]
termux-one/EasY_HaCk
.modules/.theHarvester/discovery/twittersearch.py
0a8d09ca4b126b027b6842e02fa0c29d8250e090
import string import requests import sys import myparser import re class search_twitter: def __init__(self, word, limit): self.word = word.replace(' ', '%20') self.results = "" self.totalresults = "" self.server = "www.google.com" self.hostname = "www.google.com" self.userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100116 Firefox/3.7" self.quantity = "100" self.limit = int(limit) self.counter = 0 def do_search(self): try: urly="https://"+ self.server + "/search?num=100&start=" + str(self.counter) + "&hl=en&meta=&q=site%3Atwitter.com%20intitle%3A%22on+Twitter%22%20" + self.word except Exception, e: print e headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0'} try: r=requests.get(urly,headers=headers) except Exception,e: print e self.results = r.content self.totalresults += self.results def get_people(self): rawres = myparser.parser(self.totalresults, self.word) return rawres.people_twitter() def process(self): while (self.counter < self.limit): self.do_search() self.counter += 100 print "\tSearching " + str(self.counter) + " results.."
[]
genaforvena/nn_scrapper
scrap_instagram.py
897766a52202aa056afd657995ed39b2b91e1fe2
import urllib.request import json access_token = "265791501.a4af066.f45a9f44719a4b2cb2d137118524e32b" api_url = "https://api.instagram.com/v1" nn_lat = 56.296504 nn_lng = 43.936059 def request(endpoint, req_params = ""): req = api_url + endpoint + "?access_token=" + access_token + "&" + req_params print(req) raw_response = urllib.request.urlopen(req).read() return json.loads(raw_response.decode('utf8')) locations = request("/locations/search", "lat=" + str(nn_lat) + "&lng=" + str(nn_lng))["data"] print(locations) for location in locations: location_id = location["id"] location_media = request("/locations/" + str(location_id) + "/media/recent") print(location_media)
[]
kajusK/HiddenPlaces
tests/unit/utils/test_validators.py
aa976f611a419bc33f8a65f0314956ec09fe2bfd
"""Unit tests for app.validators. """ from wtforms import ValidationError import flask from pytest import raises from app.utils.validators import password_rules, image_file, allowed_file class DummyField(object): """Dummy field object to emulate wtforms field.""" def __init__(self, data=None, errors=(), raw_data=None): self.data = data self.errors = list(errors) self.raw_data = raw_data def gettext(self, string): return string def ngettext(self, singular, plural, n): return singular class DummyForm(dict): """Dummy form object to emulate wtforms form.""" pass class DummyFile(object): """Dummy file like class to emulate uploaded file handler.""" def __init__(self, filename): self.filename = filename def __repr__(self): return self.filename def _run_validator_check(subtests, validator, valid, invalid): """Runs tests again validator with valid and invalid inputs. Args: subtest: Subtests fixture. validator: Validator instance to run tests against valid: List of valid inputs invalid: List of invalid inputs """ field = DummyField() for item in valid: field.data = item with subtests.test(item=item): validator(DummyForm(), field) for item in invalid: field.data = item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for x in valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context): validator = allowed_file(message="custom message") field = DummyField() field.data = DummyFile("blah.foo") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == "custom message" def test_image_file(subtests, req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [DummyFile(x) for x in valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests, req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context): validator = image_file(message="custom message") field = DummyField() field.data = DummyFile("blah") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == "custom message" def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid = ["as123.21", "abcdef", "sdadadaswasasa", "1234567", "...,.,..,", "AAAAAAA", "AbCdEf"] invalid = ["abc", "123", "....", "aBcDe", "a1.V3"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid = ["abcDEf", "HellOO", "ABCDEZ", "A.b#3CZ", "ADSDSA"] invalid = ["abcdEf", "helloo", "A231sdsd"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid = ["abcdefg", "axzBAR", "123abcdsa", "AbCdEfGh", "..as..2ds.."] invalid = ["foOBAR", "123ABcdSA", "1a2b.C#"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid = ["1bcd4A.d", "123456", "a?9#.0"] invalid = ["2ds.#<", "abcdef", "ABCDEF", "x2U.'Q"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid = ["ab.?123!", ".#@dS9", "abcdef123><?"] invalid = ["abcdef", ".23134", "AbCd123,]"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid = ["ABc1.2", "abcDEF123#%^", "a2B.C?"] invalid = ["helloo", "ABCDEF", "Ab1.?c"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests): validator = password_rules(length=100, message="custom message") field = DummyField() field.data = "wrong" with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == "custom message"
[((60, 16, 60, 30), 'app.utils.validators.allowed_file', 'allowed_file', ({}, {}), '()', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((73, 16, 73, 30), 'app.utils.validators.allowed_file', 'allowed_file', ({}, {}), '()', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((88, 16, 88, 54), 'app.utils.validators.allowed_file', 'allowed_file', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((101, 16, 101, 28), 'app.utils.validators.image_file', 'image_file', ({}, {}), '()', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((114, 16, 114, 28), 'app.utils.validators.image_file', 'image_file', ({}, {}), '()', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((129, 16, 129, 52), 'app.utils.validators.image_file', 'image_file', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((142, 16, 143, 44), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((151, 16, 152, 44), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((159, 16, 160, 44), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((167, 16, 168, 44), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((175, 16, 176, 41), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((183, 16, 184, 41), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((191, 16, 191, 68), 'app.utils.validators.password_rules', 'password_rules', (), '', False, 'from app.utils.validators import password_rules, image_file, allowed_file\n'), ((68, 9, 68, 49), 'flask.current_app.test_request_context', 'flask.current_app.test_request_context', ({}, {}), '()', False, 'import flask\n'), ((83, 9, 83, 49), 'flask.current_app.test_request_context', 'flask.current_app.test_request_context', ({}, {}), '()', False, 'import flask\n'), ((94, 9, 94, 49), 'flask.current_app.test_request_context', 'flask.current_app.test_request_context', ({}, {}), '()', False, 'import flask\n'), ((109, 9, 109, 49), 'flask.current_app.test_request_context', 'flask.current_app.test_request_context', ({}, {}), '()', False, 'import flask\n'), ((124, 9, 124, 49), 'flask.current_app.test_request_context', 'flask.current_app.test_request_context', ({}, {}), '()', False, 'import flask\n'), ((135, 9, 135, 49), 'flask.current_app.test_request_context', 'flask.current_app.test_request_context', ({}, {}), '()', False, 'import flask\n'), ((195, 9, 195, 32), 'pytest.raises', 'raises', ({(195, 16, 195, 31): 'ValidationError'}, {}), '(ValidationError)', False, 'from pytest import raises\n'), ((95, 13, 95, 36), 'pytest.raises', 'raises', ({(95, 20, 95, 35): 'ValidationError'}, {}), '(ValidationError)', False, 'from pytest import raises\n'), ((136, 13, 136, 36), 'pytest.raises', 'raises', ({(136, 20, 136, 35): 'ValidationError'}, {}), '(ValidationError)', False, 'from pytest import raises\n'), ((55, 17, 55, 40), 'pytest.raises', 'raises', ({(55, 24, 55, 39): 'ValidationError'}, {}), '(ValidationError)', False, 'from pytest import raises\n')]
vshulyak/ts-eval
ts_eval/utils/nans.py
2049b1268cf4272f5fa1471851523f8da14dd84c
import warnings import numpy as np def nans_in_same_positions(*arrays): """ Compares all provided arrays to see if they have NaNs in the same positions. """ if len(arrays) == 0: return True for arr in arrays[1:]: if not (np.isnan(arrays[0]) == np.isnan(arr)).all(): return False return True def nanmeanw(arr, axis=None): """ Computes nanmean without raising a warning in case of NaNs in the dataset """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) return np.nanmean(arr, axis=axis)
[((22, 9, 22, 34), 'warnings.catch_warnings', 'warnings.catch_warnings', ({}, {}), '()', False, 'import warnings\n'), ((23, 8, 23, 64), 'warnings.simplefilter', 'warnings.simplefilter', (), '', False, 'import warnings\n'), ((24, 15, 24, 41), 'numpy.nanmean', 'np.nanmean', (), '', True, 'import numpy as np\n'), ((13, 16, 13, 35), 'numpy.isnan', 'np.isnan', ({(13, 25, 13, 34): 'arrays[0]'}, {}), '(arrays[0])', True, 'import numpy as np\n'), ((13, 39, 13, 52), 'numpy.isnan', 'np.isnan', ({(13, 48, 13, 51): 'arr'}, {}), '(arr)', True, 'import numpy as np\n')]
UOC/dlkit
tests/authorization/test_searches.py
a9d265db67e81b9e0f405457464e762e2c03f769
"""Unit tests of authorization searches.""" import pytest from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only from dlkit.abstract_osid.osid import errors from dlkit.primordium.id.primitives import Id from dlkit.primordium.type.primitives import Type from dlkit.runtime import PROXY_SESSION, proxy_example from dlkit.runtime.managers import Runtime REQUEST = proxy_example.SimpleRequest() CONDITION = PROXY_SESSION.get_proxy_condition() CONDITION.set_http_request(REQUEST) PROXY = PROXY_SESSION.get_proxy(CONDITION) DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'}) @pytest.fixture(scope="class", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def authorization_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description = 'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope="function") def authorization_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_authorization_search() @pytest.mark.usefixtures("authorization_search_class_fixture", "authorization_search_test_fixture") class TestAuthorizationSearch(object): """Tests for AuthorizationSearch""" @pytest.mark.skip('unimplemented test') def test_search_among_authorizations(self): """Tests search_among_authorizations""" pass @pytest.mark.skip('unimplemented test') def test_order_authorization_results(self): """Tests order_authorization_results""" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_record(self): """Tests get_authorization_search_record""" pass @pytest.mark.usefixtures("authorization_search_results_class_fixture", "authorization_search_results_test_fixture") class TestAuthorizationSearchResults(object): """Tests for AuthorizationSearchResults""" @pytest.mark.skip('unimplemented test') def test_get_authorizations(self): """Tests get_authorizations""" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_query_inspector(self): """Tests get_authorization_query_inspector""" pass @pytest.mark.skip('unimplemented test') def test_get_authorization_search_results_record(self): """Tests get_authorization_search_results_record""" pass @pytest.fixture(scope="class", params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE']) def vault_search_class_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.service_config = request.param request.cls.svc_mgr = Runtime().get_service_manager( 'AUTHORIZATION', proxy=PROXY, implementation=request.cls.service_config) create_form = request.cls.svc_mgr.get_vault_form_for_create([]) create_form.display_name = 'Test catalog' create_form.description = 'Test catalog description' request.cls.catalog = request.cls.svc_mgr.create_vault(create_form) def class_tear_down(): request.cls.svc_mgr.delete_vault(request.cls.catalog.ident) request.addfinalizer(class_tear_down) @pytest.fixture(scope="function") def vault_search_test_fixture(request): # From test_templates/resource.py::ResourceSearch::init_template request.cls.search = request.cls.catalog.get_vault_search() @pytest.mark.usefixtures("vault_search_class_fixture", "vault_search_test_fixture") class TestVaultSearch(object): """Tests for VaultSearch""" @pytest.mark.skip('unimplemented test') def test_search_among_vaults(self): """Tests search_among_vaults""" pass @pytest.mark.skip('unimplemented test') def test_order_vault_results(self): """Tests order_vault_results""" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_record(self): """Tests get_vault_search_record""" pass @pytest.mark.usefixtures("vault_search_results_class_fixture", "vault_search_results_test_fixture") class TestVaultSearchResults(object): """Tests for VaultSearchResults""" @pytest.mark.skip('unimplemented test') def test_get_vaults(self): """Tests get_vaults""" pass @pytest.mark.skip('unimplemented test') def test_get_vault_query_inspector(self): """Tests get_vault_query_inspector""" pass @pytest.mark.skip('unimplemented test') def test_get_vault_search_results_record(self): """Tests get_vault_search_results_record""" pass
[((15, 10, 15, 39), 'dlkit.runtime.proxy_example.SimpleRequest', 'proxy_example.SimpleRequest', ({}, {}), '()', False, 'from dlkit.runtime import PROXY_SESSION, proxy_example\n'), ((16, 12, 16, 47), 'dlkit.runtime.PROXY_SESSION.get_proxy_condition', 'PROXY_SESSION.get_proxy_condition', ({}, {}), '()', False, 'from dlkit.runtime import PROXY_SESSION, proxy_example\n'), ((18, 8, 18, 42), 'dlkit.runtime.PROXY_SESSION.get_proxy', 'PROXY_SESSION.get_proxy', ({(18, 32, 18, 41): 'CONDITION'}, {}), '(CONDITION)', False, 'from dlkit.runtime import PROXY_SESSION, proxy_example\n'), ((20, 15, 20, 96), 'dlkit.primordium.type.primitives.Type', 'Type', ({}, {}), "(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority':\n 'DEFAULT'})", False, 'from dlkit.primordium.type.primitives import Type\n'), ((23, 1, 24, 176), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((43, 1, 43, 33), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((49, 1, 49, 99), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(49, 25, 49, 61): '"""authorization_search_class_fixture"""', (49, 63, 49, 98): '"""authorization_search_test_fixture"""'}, {}), "('authorization_search_class_fixture',\n 'authorization_search_test_fixture')", False, 'import pytest\n'), ((68, 1, 68, 115), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(68, 25, 68, 69): '"""authorization_search_results_class_fixture"""', (68, 71, 68, 114): '"""authorization_search_results_test_fixture"""'}, {}), "('authorization_search_results_class_fixture',\n 'authorization_search_results_test_fixture')", False, 'import pytest\n'), ((87, 1, 88, 176), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((107, 1, 107, 33), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((113, 1, 113, 83), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(113, 25, 113, 53): '"""vault_search_class_fixture"""', (113, 55, 113, 82): '"""vault_search_test_fixture"""'}, {}), "('vault_search_class_fixture',\n 'vault_search_test_fixture')", False, 'import pytest\n'), ((132, 1, 132, 99), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', ({(132, 25, 132, 61): '"""vault_search_results_class_fixture"""', (132, 63, 132, 98): '"""vault_search_results_test_fixture"""'}, {}), "('vault_search_results_class_fixture',\n 'vault_search_results_test_fixture')", False, 'import pytest\n'), ((52, 5, 52, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(52, 22, 52, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((57, 5, 57, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(57, 22, 57, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((62, 5, 62, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(62, 22, 62, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((71, 5, 71, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(71, 22, 71, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((76, 5, 76, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(76, 22, 76, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((81, 5, 81, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(81, 22, 81, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((116, 5, 116, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(116, 22, 116, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((121, 5, 121, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(121, 22, 121, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((126, 5, 126, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(126, 22, 126, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((135, 5, 135, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(135, 22, 135, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((140, 5, 140, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(140, 22, 140, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((145, 5, 145, 43), 'pytest.mark.skip', 'pytest.mark.skip', ({(145, 22, 145, 42): '"""unimplemented test"""'}, {}), "('unimplemented test')", False, 'import pytest\n'), ((28, 26, 28, 35), 'dlkit.runtime.managers.Runtime', 'Runtime', ({}, {}), '()', False, 'from dlkit.runtime.managers import Runtime\n'), ((92, 26, 92, 35), 'dlkit.runtime.managers.Runtime', 'Runtime', ({}, {}), '()', False, 'from dlkit.runtime.managers import Runtime\n')]
keceli/mechdriver
mechroutines/models/_flux.py
978994ba5c77b6df00078b639c4482dacf269440
""" NEW: Handle flux files """ import autofile def read_flux(ts_save_path, vrc_locs=(0,)): """ Read the geometry from the filesys """ vrc_fs = autofile.fs.vrctst(ts_save_path) if vrc_fs[-1].file.flux.exists(vrc_locs): flux_str = vrc_fs[-1].file.flux.read(vrc_locs) else: flux_str = None return flux_str
[((12, 13, 12, 45), 'autofile.fs.vrctst', 'autofile.fs.vrctst', ({(12, 32, 12, 44): 'ts_save_path'}, {}), '(ts_save_path)', False, 'import autofile\n')]
nachiket273/ML_Algo_Implemented
RandomForest/RandomForest.py
74ae47fdf620545fdf8c934c5997784faadaebb7
import math import numpy as np import pandas as pd from sklearn.base import BaseEstimator import sys import os sys.path.append(os.path.abspath('../DecisionTree')) from DecisionTree import DecisionTree class RandomForest(BaseEstimator): """ Simple implementation of Random Forest. This class has implementation for Random Forest classifier and regressor. Dataset bagging is done by simple numpy random choice with replacement. For classification the prediction is by majority vote. For regression tree the prediction is averge of all estimator predictions. Args: n_estimators Number of base estimators (Decision Trees here) max_features Maximum features to be used to construct tree. Default: - If classifier, default is square root of total features. - If regressor, default is total number of features. max_depth The maximum depth to which estimators needs to be constructed. Default: np.inf min_samples_split Minimum number of samples need to present for split at the node. Default: 2 criterion criterion to be used for split. For classification tree following criterion are supported: - gini - entropy For regression tree following criterion are supported: - mse (mean squared error) - mae (mean absolute error) Default: gini random_seed random seed value for numpy operations. Default: 0 """ def __init__(self, n_estimators, max_features=0, max_depth=np.inf, min_samples_split=2, criterion='gini', random_seed=0): self.n_estimators = n_estimators self.max_features = max_features self.max_depth = max_depth self.min_samples_split = min_samples_split self.criterion = criterion self.random_seed = random_seed self.idxs = [] self.trees = [] for i in range(self.n_estimators): self.trees.append(DecisionTree(max_depth= self.max_depth, min_samples_split=self.min_samples_split, max_features = self.max_features, criterion=self.criterion, random_seed = self.random_seed)) self.is_classification_forest = False if self.criterion == 'gini' or self.criterion == 'entropy': self.is_classification_forest = True elif self.criterion == 'mse' or self.criterion == 'mae': self.is_classification_forest = False else: raise Exception("Invalid criterion: {}".format(self.criterion)) def get_subsets(self, X, y, num=1): subsets = [] if len(np.shape(y)) == 1: y = np.expand_dims(y, axis=1) Xy = np.concatenate((X, y), axis=1) num_samples = X.shape[0] np.random.shuffle(Xy) rng = np.random.default_rng(seed= self.random_seed) for _ in range(num): idx = rng.choice( range(num_samples), size = np.shape(range(int(num_samples)), ), replace=True ) subsets.append([X[idx], y[idx]]) return subsets def fit(self, X, y): np.random.seed(self.random_seed) if isinstance(X, pd.DataFrame): X = X.to_numpy() subsets = self.get_subsets(X, y, self.n_estimators) if self.max_features == 0: if self.is_classification_forest: self.max_features = int(math.sqrt(X.shape[1])) else: self.max_features = int(X.shape[1]) # Bagging - choose random features for each estimator # if max_features is provided, else use square root of # total number of features. for i, _ in enumerate(self.trees): self.trees[i].max_features = self.max_features X_sub, y_sub = subsets[i] self.trees[i].fit(X_sub, y_sub) def predict(self, X): all_preds = np.empty((X.shape[0], self.n_estimators)) for i, tree in enumerate(self.trees): preds = tree.predict(X) all_preds[:, i] = preds y_preds = [] for preds in all_preds: if self.is_classification_forest: y_preds.append(np.bincount(preds.astype('int')).argmax()) else: y_preds.append(np.average(preds)) return y_preds
[((7, 16, 7, 50), 'os.path.abspath', 'os.path.abspath', ({(7, 32, 7, 49): '"""../DecisionTree"""'}, {}), "('../DecisionTree')", False, 'import os\n'), ((73, 13, 73, 43), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((75, 8, 75, 29), 'numpy.random.shuffle', 'np.random.shuffle', ({(75, 26, 75, 28): 'Xy'}, {}), '(Xy)', True, 'import numpy as np\n'), ((76, 14, 76, 59), 'numpy.random.default_rng', 'np.random.default_rng', (), '', True, 'import numpy as np\n'), ((89, 8, 89, 40), 'numpy.random.seed', 'np.random.seed', ({(89, 23, 89, 39): 'self.random_seed'}, {}), '(self.random_seed)', True, 'import numpy as np\n'), ((109, 20, 109, 61), 'numpy.empty', 'np.empty', ({(109, 29, 109, 60): '(X.shape[0], self.n_estimators)'}, {}), '((X.shape[0], self.n_estimators))', True, 'import numpy as np\n'), ((71, 16, 71, 41), 'numpy.expand_dims', 'np.expand_dims', (), '', True, 'import numpy as np\n'), ((54, 30, 58, 83), 'DecisionTree.DecisionTree', 'DecisionTree', (), '', False, 'from DecisionTree import DecisionTree\n'), ((70, 15, 70, 26), 'numpy.shape', 'np.shape', ({(70, 24, 70, 25): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((96, 40, 96, 61), 'math.sqrt', 'math.sqrt', ({(96, 50, 96, 60): 'X.shape[1]'}, {}), '(X.shape[1])', False, 'import math\n'), ((120, 31, 120, 48), 'numpy.average', 'np.average', ({(120, 42, 120, 47): 'preds'}, {}), '(preds)', True, 'import numpy as np\n')]
iotctl/pycopy
tests/basics/generator_pend_throw.py
eeb841afea61b19800d054b3b289729665fc9aa4
def gen(): i = 0 while 1: yield i i += 1 g = gen() try: g.pend_throw except AttributeError: print("SKIP") raise SystemExit print(next(g)) print(next(g)) g.pend_throw(ValueError()) v = None try: v = next(g) except Exception as e: print("raised", repr(e)) print("ret was:", v) # It's legal to pend exception in a just-started generator, just the same # as it's legal to .throw() into it. g = gen() g.pend_throw(ValueError()) try: next(g) except ValueError: print("ValueError from just-started gen")
[]
USArmyResearchLab/ARL_Battlespace
src/UnitTypes/ProjectileModule.py
2f17a478f62c20a4db387d5d3e4bbeaa3197cd49
# -*- coding: utf-8 -*- """ Created on Tue Dec 15 09:49:47 2020 @author: james.z.hare """ from src.UnitModule import UnitClass, advance from copy import deepcopy import math class ProjectileClass(UnitClass): """ The Projectile Class This is a subclass to the UnitClass Virtual Functions ----------------- - `__copy__()` to make shallow copies - `__deepcopy__(memo)` to make deep copies - `possibleActions(State)` to identify legal actions - `observe(Unit)` to observe units located within VisibleRange - `overlaps(Unit)` to identify if the unit overlaps with another unit - `execute(Action, State)` to execute the action Attributes ---------- ID: a unique identifier of this unit Owner: the player the unit belongs to Health: the health of the unit Extent: the space occupied by unit Position: location of unit Orientation: as the name says VisibleRange: how far the unit can observe Actions: dict dictionary of actions common accross all units ActionOptions: list of list of action options. Attack: int that defines whether the unit is attacking in an advance action RemaingLifetime: int that defines the total number of turns until the unit is dead """ def __init__(self, ID, Owner, Health, RemainingLifetime=math.inf): UnitClass.__init__(self, ID, Owner, Health, Extent=(1,1)) self.Actions = { "advance": lambda x: advance(self, x) } self.ActionOptions = ( ( "advance", ), ) self.Attack = None self.RemainingLifetime = RemainingLifetime def __copy__(self): Duplicate = ProjectileClass(self.ID, self.Owner, self.Health) Duplicate.Position = self.Position Duplicate.Orientation = self.Orientation Duplicate.Attack = self.Attack Duplicate.RemainingLifetime = self.RemainingLifetime return Duplicate def __deepcopy__(self, memo): Default = None Exists = memo.get(self, Default) if Exists is not Default: return Exists Duplicate = ProjectileClass(deepcopy(self.ID, memo), deepcopy(self.Owner ,memo), deepcopy(self.Health, memo)) Duplicate.Position = deepcopy(self.Position, memo) Duplicate.Orientation = deepcopy(self.Orientation, memo) Duplicate.Attack = deepcopy(self.Attack, memo) Duplicate.RemainingLifetime = deepcopy(self.RemainingLifetime, memo) memo[self] = Duplicate return Duplicate def possibleActions(self, State): """ Identifies the set of feasible actions given the board size and position of the unit Parameters ---------- State: StateClass Returns ------- TrueActions: list[str] A list of the feasible actions """ return self.ActionOptions def observe(self, Unit): if Unit.ID == self.ID: return Unit return None def overlaps(self, Unit): MyOccupiedSpace = set([ (self.Position[0]+x, self.Position[1]+y, self.Position[2]) for x in range(self.Extent[0]) for y in range(self.Extent[1]) ]) #print(Unit) TheirOccupiedSpace = set([ (Unit.Position[0]+x, Unit.Position[1]+y, Unit.Position[2]) for x in range(Unit.Extent[0]) for y in range(Unit.Extent[1]) ]) return len(MyOccupiedSpace.intersection(TheirOccupiedSpace))>0 def execute(self, Actions, State): """ Execute `Actions` on `State`. Parameters ---------- Actions : list[str] A set of actions to be performed on `State`. State : StateClass State on which to inflict actions. Returns ------- Changes : list Resulting state of executed `Actions`. """ NewState = deepcopy(State) Changes = [] for Action in Actions: ActionResult = self.Actions[Action](NewState) ActionResult[1].RemainingLifetime -= 1 if isinstance(ActionResult, list): Changes += ActionResult else: Changes.append(ActionResult) return Changes # Will be used as the projectile for the missile launcher unit class MissileClass(ProjectileClass): def __init__(self, ID, Owner, Position, Life=1): ProjectileClass.__init__(self, ID, Owner, Positon=Position, Life=Life)
[((56, 8, 56, 65), 'src.UnitModule.UnitClass.__init__', 'UnitClass.__init__', (), '', False, 'from src.UnitModule import UnitClass, advance\n'), ((76, 29, 76, 58), 'copy.deepcopy', 'deepcopy', ({(76, 38, 76, 51): 'self.Position', (76, 53, 76, 57): 'memo'}, {}), '(self.Position, memo)', False, 'from copy import deepcopy\n'), ((77, 32, 77, 64), 'copy.deepcopy', 'deepcopy', ({(77, 41, 77, 57): 'self.Orientation', (77, 59, 77, 63): 'memo'}, {}), '(self.Orientation, memo)', False, 'from copy import deepcopy\n'), ((78, 27, 78, 54), 'copy.deepcopy', 'deepcopy', ({(78, 36, 78, 47): 'self.Attack', (78, 49, 78, 53): 'memo'}, {}), '(self.Attack, memo)', False, 'from copy import deepcopy\n'), ((79, 38, 79, 76), 'copy.deepcopy', 'deepcopy', ({(79, 47, 79, 69): 'self.RemainingLifetime', (79, 71, 79, 75): 'memo'}, {}), '(self.RemainingLifetime, memo)', False, 'from copy import deepcopy\n'), ((126, 19, 126, 34), 'copy.deepcopy', 'deepcopy', ({(126, 28, 126, 33): 'State'}, {}), '(State)', False, 'from copy import deepcopy\n'), ((75, 36, 75, 59), 'copy.deepcopy', 'deepcopy', ({(75, 45, 75, 52): 'self.ID', (75, 54, 75, 58): 'memo'}, {}), '(self.ID, memo)', False, 'from copy import deepcopy\n'), ((75, 61, 75, 87), 'copy.deepcopy', 'deepcopy', ({(75, 70, 75, 80): 'self.Owner', (75, 82, 75, 86): 'memo'}, {}), '(self.Owner, memo)', False, 'from copy import deepcopy\n'), ((75, 89, 75, 116), 'copy.deepcopy', 'deepcopy', ({(75, 98, 75, 109): 'self.Health', (75, 111, 75, 115): 'memo'}, {}), '(self.Health, memo)', False, 'from copy import deepcopy\n'), ((57, 46, 57, 62), 'src.UnitModule.advance', 'advance', ({(57, 54, 57, 58): 'self', (57, 60, 57, 61): 'x'}, {}), '(self, x)', False, 'from src.UnitModule import UnitClass, advance\n')]
HelloYeew/helloyeew-lab-computer-programming-i
OOP_MiniQuiz/run_car_Level2.py
60b05072f32f23bab4a336b506ba7f66e52c045d
from car import * def compare(car1,car2): print(car1) print(car2) car1 = Car("Nissan","Tiida",450000) car2 = Car("Toyota","Vios",400000) car3 = Car("BMW","X3",3400000) compare(car3,car1) compare(car1,car2)
[]
michel-slm/python-prelude
prelude/monads.py
b3ca89ff2bf150f772764f59d2796d2fcce1013d
from abc import ABCMeta, abstractmethod from prelude.typeclasses import Monad from prelude.decorators import monad_eq, singleton @monad_eq class Either(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return Right(val) @abstractmethod def __iter__(self): pass class Left(Either): def __init__(self, val): self.__val = val def __rshift__(self, f): return self def __iter__(self): return iter([]) def __eq__(self, other): return type(self) == type(other) def __repr__(self): return "Left({})".format(self.__val) class Right(Either): def __init__(self, val): self.__val = val def __rshift__(self, f): return f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return "Right({})".format(self.__val) class Maybe(Monad): __metaclass__ = ABCMeta @classmethod def mreturn(cls, val): return Just(val) @abstractmethod def __iter__(self): pass @monad_eq class Just(Maybe): def __init__(self, val): self.__val = val def __rshift__(self, f): return f(self.__val) def __iter__(self): yield self.__val def __repr__(self): return "Just({})".format(self.__val) @singleton class Nothing(Maybe): def __rshift__(self, f): return self def __iter__(self): return iter([]) def __repr__(self): return "Nothing()"
[]
JJavier98/TFG-Dron-de-Vigilancia
Deep Sort/src/imgconverter.py
7fd68a981854ac480ad2f0c936a0dd58d2a9f38b
#!/usr/bin/env python from __future__ import print_function import roslib roslib.load_manifest('msgs_to_cv2') import sys import rospy import cv2 from std_msgs.msg import String from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError class image_converter: def __init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/bebop/image_raw",Image,self.callback) def callback(self,data): try: cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print(e) cv2.imshow("hola", cv_image) cv2.waitKey(3) def main(args): while True: ic = image_converter() rospy.init_node('image_converter', anonymous=True) """ try: rospy.spin() except KeyboardInterrupt: print("Shutting down") cv2.destroyAllWindows() """ if __name__ == '__main__': main(sys.argv)
[]
Food-X-Technologies/foodx_devops_tools
foodx_devops_tools/azure/__init__.py
57d1bf1304d9c9a386eaffa427f9eb36c410c350
# Copyright (c) 2021 Food-X Technologies # # This file is part of foodx_devops_tools. # # You should have received a copy of the MIT License along with # foodx_devops_tools. If not, see <https://opensource.org/licenses/MIT>. """Azure related utilities."""
[]
posita/beartype
beartype/vale/__init__.py
e56399686e1f2ffd5128a4030b19314504e32450
#!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See "LICENSE" for further details. ''' **Beartype validators.** This submodule publishes a PEP-compliant hierarchy of subscriptable (indexable) classes enabling callers to validate the internal structure of arbitrarily complex scalars, data structures, and third-party objects. Like annotation objects defined by the :mod:`typing` module (e.g., :attr:`typing.Union`), these classes dynamically generate PEP-compliant type hints when subscripted (indexed) and are thus intended to annotate callables and variables. Unlike annotation objects defined by the :mod:`typing` module, these classes are *not* explicitly covered by existing PEPs and thus *not* directly usable as annotations. Instead, callers are expected to (in order): #. Annotate callable parameters and returns to be validated with :pep:`593`-compliant :attr:`typing.Annotated` type hints. #. Subscript those hints with (in order): #. The type of those parameters and returns. #. One or more subscriptions of classes declared by this submodule. ''' # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To avoid polluting the public module namespace, external attributes # should be locally imported at module scope *ONLY* under alternate private # names (e.g., "from argparse import ArgumentParser as _ArgumentParser" rather # than merely "from argparse import ArgumentParser"). #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! from beartype.vale._is._valeis import _IsFactory from beartype.vale._is._valeistype import ( _IsInstanceFactory, _IsSubclassFactory, ) from beartype.vale._is._valeisobj import _IsAttrFactory from beartype.vale._is._valeisoper import _IsEqualFactory # ....................{ SINGLETONS }.................... # Public factory singletons instantiating these private factory classes. Is = _IsFactory(basename='Is') IsAttr = _IsAttrFactory(basename='IsAttr') IsEqual = _IsEqualFactory(basename='IsEqual') IsInstance = _IsInstanceFactory(basename='IsInstance') IsSubclass = _IsSubclassFactory(basename='IsSubclass') # Delete all private factory classes imported above for safety. del ( _IsFactory, _IsAttrFactory, _IsEqualFactory, _IsInstanceFactory, _IsSubclassFactory, ) # ....................{ TODO }.................... #FIXME: As intelligently requested by @Saphyel at #32, add support for #additional classes support constraints resembling: # #* String constraints: # * Email. # * Uuid. # * Choice. # * Language. # * Locale. # * Country. # * Currency. #* Comparison constraints # * IdenticalTo. # * NotIdenticalTo. # * LessThan. # * GreaterThan. # * Range. # * DivisibleBy. #FIXME: Add a new BeartypeValidator.get_cause_or_none() method with the same #signature and docstring as the existing CauseSleuth.get_cause_or_none() #method. This new BeartypeValidator.get_cause_or_none() method should then be #called by the "_peperrorannotated" submodule to generate human-readable #exception messages. Note that this implies that: #* The BeartypeValidator.__init__() method will need to additionally accept a new # mandatory "get_cause_or_none: Callable[[], Optional[str]]" parameter, which # that method should then localize to "self.get_cause_or_none". #* Each __class_getitem__() dunder method of each "_BeartypeValidatorFactoryABC" subclass will need # to additionally define and pass that callable when creating and returning # its "BeartypeValidator" instance. #FIXME: *BRILLIANT IDEA.* Holyshitballstime. The idea here is that we can #leverage all of our existing "beartype.is" infrastructure to dynamically #synthesize PEP-compliant type hints that would then be implicitly supported by #any runtime type checker. At present, subscriptions of "Is" (e.g., #"Annotated[str, Is[lambda text: bool(text)]]") are only supported by beartype #itself. Of course, does anyone care? I mean, if you're using a runtime type #checker, you're probably *ONLY* using beartype. Right? That said, this would #technically improve portability by allowing users to switch between different #checkers... except not really, since they'd still have to import beartype #infrastructure to do so. So, this is probably actually useless. # #Nonetheless, the idea itself is trivial. We declare a new #"beartype.is.Portable" singleton accessed in the same way: e.g., # from beartype import beartype # from beartype.is import Portable # NonEmptyStringTest = Is[lambda text: bool(text)] # NonEmptyString = Portable[str, NonEmptyStringTest] # @beartype # def munge_it(text: NonEmptyString) -> str: ... # #So what's the difference between "typing.Annotated" and "beartype.is.Portable" #then? Simple. The latter dynamically generates one new PEP 3119-compliant #metaclass and associated class whenever subscripted. Clearly, this gets #expensive in both space and time consumption fast -- which is why this won't #be the default approach. For safety, this new class does *NOT* subclass the #first subscripted class. Instead: #* This new metaclass of this new class simply defines an __isinstancecheck__() # dunder method. For the above example, this would be: # class NonEmptyStringMetaclass(object): # def __isinstancecheck__(cls, obj) -> bool: # return isinstance(obj, str) and NonEmptyStringTest(obj) #* This new class would then be entirely empty. For the above example, this # would be: # class NonEmptyStringClass(object, metaclass=NonEmptyStringMetaclass): # pass # #Well, so much for brilliant. It's slow and big, so it seems doubtful anyone #would actually do that. Nonetheless, that's food for thought for you.
[((46, 5, 46, 30), 'beartype.vale._is._valeis._IsFactory', '_IsFactory', (), '', False, 'from beartype.vale._is._valeis import _IsFactory\n'), ((47, 9, 47, 42), 'beartype.vale._is._valeisobj._IsAttrFactory', '_IsAttrFactory', (), '', False, 'from beartype.vale._is._valeisobj import _IsAttrFactory\n'), ((48, 10, 48, 45), 'beartype.vale._is._valeisoper._IsEqualFactory', '_IsEqualFactory', (), '', False, 'from beartype.vale._is._valeisoper import _IsEqualFactory\n'), ((49, 13, 49, 54), 'beartype.vale._is._valeistype._IsInstanceFactory', '_IsInstanceFactory', (), '', False, 'from beartype.vale._is._valeistype import _IsInstanceFactory, _IsSubclassFactory\n'), ((50, 13, 50, 54), 'beartype.vale._is._valeistype._IsSubclassFactory', '_IsSubclassFactory', (), '', False, 'from beartype.vale._is._valeistype import _IsInstanceFactory, _IsSubclassFactory\n')]
movermeyer/cellardoor
tests/test_authentication.py
25192b07224ff7bd33fd29ebac07340bef53a2ed
import unittest from mock import Mock import base64 from cellardoor import errors from cellardoor.authentication import * from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self): identifier = FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify = Mock(return_value=None) id_two = FooIdentifier() id_two.identify = Mock(return_value='two') id_three = FooIdentifier() id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three, auth_three) ] ) environ = {} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) self.assertEquals(identified_credentials, {'username':'foo', 'password':'bar'})
[((37, 24, 37, 48), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((39, 31, 39, 55), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((40, 8, 40, 29), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((50, 20, 50, 43), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((52, 20, 52, 44), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((54, 22, 54, 48), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((56, 26, 56, 50), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((58, 26, 58, 50), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((60, 28, 60, 54), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((61, 8, 61, 29), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((80, 15, 80, 36), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ({}, {}), '()', False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((86, 15, 86, 36), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ({}, {}), '()', False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((92, 15, 92, 36), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ({}, {}), '()', False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((98, 15, 98, 36), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ({}, {}), '()', False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((104, 15, 104, 36), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ({}, {}), '()', False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((105, 16, 105, 51), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(105, 42, 105, 50): '"""foobar"""'}, {}), "('foobar')", False, 'import base64\n'), ((111, 15, 111, 36), 'cellardoor.authentication.basic.BasicAuthIdentifier', 'BasicAuthIdentifier', ({}, {}), '()', False, 'from cellardoor.authentication.basic import BasicAuthIdentifier\n'), ((112, 16, 112, 52), 'base64.standard_b64encode', 'base64.standard_b64encode', ({(112, 42, 112, 51): '"""foo:bar"""'}, {}), "('foo:bar')", False, 'import base64\n')]
somritabanerjee/speedplusbaseline
src/styleaug/__init__.py
5913c611d8c182ad8070abcf5f1baffc554dfd90
from .styleAugmentor import StyleAugmentor
[]
Westlake-AI/openmixup
configs/classification/imagenet/mixups/convnext/convnext_tiny_smooth_mix_8xb256_accu2_ema_fp16.py
ea81250819e740dd823e30cb7ce382d14a3c1b91
_base_ = [ '../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py', '../../../_base_/default_runtime.py', ] # model settings model = dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode="cutmix", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of memory mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True in_channels=768, num_classes=1000) ) # interval for accumulate gradient update_interval = 2 # total: 8 x bs256 x 2 accumulates = bs4096 # additional hooks custom_hooks = [ dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W + m * W momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval, ), ] # optimizer optimizer = dict( type='AdamW', lr=4e-3, # lr = 5e-4 * (256 * 4) * 4 accumulate / 1024 = 4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex use_fp16 = True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6, ) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=300)
[]
PaNOSC-ViNYL/McStasScript
mcstasscript/interface/reader.py
bd94ebc6cac290c3c9662871df40d76edbe4a44e
import os from mcstasscript.instr_reader.control import InstrumentReader from mcstasscript.interface.instr import McStas_instr class McStas_file: """ Reader of McStas files, can add to an existing McStasScript instrument instance or create a corresponding McStasScript python file. Methods ------- add_to_instr(Instr) Add information from McStas file to McStasScript Instr instance write_python_file(filename) Write python file named filename that reproduce the McStas instr """ def __init__(self, filename): """ Initialization of McStas_file class, needs McStas instr filename Parameters ---------- filename (str) Name of McStas instrument file to be read """ # Check filename if not os.path.isfile(filename): raise ValueError("Given filename, \"" + filename + "\" could not be found.") self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr): """ Adds information from the McStas file to McStasScript instr Parameters ---------- Instr (McStasScript McStas_instr instance) McStas_instr instance to add instrument information to """ # Check Instr if not isinstance(Instr, McStas_instr): raise TypeError("Given object is not of type McStas_instr!") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): """ Writes python file that reproduces McStas instrument file Parameters ---------- filename (str) Filename of python file to be written """ if "force" in kwargs: force = kwargs["force"] else: force = False # Check product_filename is available if os.path.isfile(filename): if force: os.remove(filename) else: raise ValueError("Filename \"" + filename + "\" already exists, you can overwrite with " + "force=True") self.Reader.generate_py_version(filename)
[((37, 22, 37, 48), 'mcstasscript.instr_reader.control.InstrumentReader', 'InstrumentReader', ({(37, 39, 37, 47): 'filename'}, {}), '(filename)', False, 'from mcstasscript.instr_reader.control import InstrumentReader\n'), ((71, 11, 71, 35), 'os.path.isfile', 'os.path.isfile', ({(71, 26, 71, 34): 'filename'}, {}), '(filename)', False, 'import os\n'), ((33, 15, 33, 39), 'os.path.isfile', 'os.path.isfile', ({(33, 30, 33, 38): 'filename'}, {}), '(filename)', False, 'import os\n'), ((73, 16, 73, 35), 'os.remove', 'os.remove', ({(73, 26, 73, 34): 'filename'}, {}), '(filename)', False, 'import os\n')]
ucsd-progsys/csolve-bak
src/regrtest.py
89cfeb5403e617f45ece4bae9f88f8e6cd7ca934
#!/usr/bin/python # Copyright (c) 2009 The Regents of the University of California. All rights reserved. # # Permission is hereby granted, without written agreement and without # license or royalty fees, to use, copy, modify, and distribute this # software and its documentation for any purpose, provided that the # above copyright notice and the following two paragraphs appear in # all copies of this software. # # IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN # IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess, optparse, sys, socket, os import misc.rtest as rtest solve = "./csolve -c".split() null = open("/dev/null", "w") now = (time.asctime(time.localtime(time.time()))).replace(" ","_") logfile = "../tests/logs/regrtest_results_%s_%s" % (socket.gethostname (), now) argcomment = "//! run with " def logged_sys_call(args, out=None, err=None): print "exec: " + " ".join(args) return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out = null else: out = None if time: time = ["time"] else: time = [] hygiene_flags = [("--csolveprefix=%s" % (file)), "-o", "/dev/null"] out = open(file + ".log", "w") rv = logged_sys_call(time + solve + flags + hygiene_flags + [file], out) out.close() return rv def run_script(file,quiet): if quiet: out = null else: out = None return logged_sys_call(file, out) def getfileargs(file): f = open(file) l = f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(" ") else: return [] class Config (rtest.TestConfig): def __init__ (self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs = dargs if os.path.exists("../tests/postests/coreutils/"): logged_sys_call(["../tests/postests/coreutils/makeCoreUtil.sh", "init"], None) def run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(".c"): fargs = getfileargs(file) return solve_quals(file, True, False, True, fargs) elif file.endswith(".sh"): return run_script(file, True) def is_test (self, file): return (file.endswith(".sh") and os.access(file, os.X_OK)) \ or (file.endswith(".c") and not file.endswith(".csolve.save.c") and not file.endswith(".ssa.c")) ##################################################################################### #testdirs = [("../postests", 0)] #testdirs = [("../negtests", 1)] #testdirs = [("../slowtests", 1)] #DEFAULT testdirs = [("../tests/postests", 0), ("../tests/negtests", [1, 2])] #testdirs = [("../tests/microtests", 0)] parser = optparse.OptionParser() parser.add_option("-t", "--threads", dest="threadcount", default=1, type=int, help="spawn n threads") parser.add_option("-o", "--opts", dest="opts", default="", type=str, help="additional arguments to csolve") parser.disable_interspersed_args() options, args = parser.parse_args() runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount)) exit (runner.run ())
[]
NathanMH/ComputerClub
country_capital_guesser.py
197585c1a77f71ee363547740d6e09f945e7526f
#! /usr/bin/env python3 ####################### """#################### Index: 1. Imports and Readme 2. Functions 3. Main 4. Testing ####################""" ####################### ################################################################### # 1. IMPORTS AND README ################################################################### import easygui import country_list_getter ################################################################### # 2. FUNCTIONS ################################################################### # Dictionary. It has keys (Canada, France etc...) and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox("Do you want to play a game?", "Country Guesser", ("Yes", "No")) def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers / total_questions) * 100), 2) if score >= 50: return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/happy_puppy.jpg", ["Yes", "No"]) else: return easygui.buttonbox("Your score: " + str(score) + ". Do you want to play again?", "~/Documents/ComputerClub/assets/sad_puppy.jpg", ["Yes", "No"]) def main_question_box(country): return easygui.enterbox("What is the capital of: " + country + "?", "Country Capital Guesser!!") ################################################################### # 3. MAIN ################################################################### def funtime(): playing = 1 correct_answers = 0 total_questions = 0 ask_to_play() while playing: for key, value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer = input("Name the capital of: " + key + "\n").lower() total_questions += 1 # Short for total_questions = total_questions + 1 if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1 print("Correct!") else: print("Wrong!") # Should we keep playing? response = input("Would you like to play again?: \n") if response.lower() == "yes" or response == "y": playing = 1 else: playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print("You scored " + str(correct_answers)+ "/" + str(total_questions) + " (" + str(correct_percent) + "%)") ################################################################### # 4. TESTING ################################################################### # COUNTRIES_CAPITALS = {"Canada": "Ottawa", "United States": "Washington", "France": "Paris"} def test_1(): pass # ask_to_play() # main_question_box("Canada") funtime()
[((25, 0, 25, 26), 'country_list_getter.main', 'country_list_getter.main', ({}, {}), '()', False, 'import country_list_getter\n'), ((29, 11, 29, 89), 'easygui.ynbox', 'easygui.ynbox', ({(29, 25, 29, 54): '"""Do you want to play a game?"""', (29, 56, 29, 73): '"""Country Guesser"""', (29, 75, 29, 88): "('Yes', 'No')"}, {}), "('Do you want to play a game?', 'Country Guesser', ('Yes', 'No'))", False, 'import easygui\n'), ((39, 11, 39, 100), 'easygui.enterbox', 'easygui.enterbox', ({(39, 28, 39, 70): "('What is the capital of: ' + country + '?')", (39, 72, 39, 99): '"""Country Capital Guesser!!"""'}, {}), "('What is the capital of: ' + country + '?',\n 'Country Capital Guesser!!')", False, 'import easygui\n')]
aframires/freesound-loop-annotator
data_analysis/audiocommons_ffont/scripts/rekordbox_xml_to_analysis_rhythm_rekordbox_file.py
a24e0c23bfc671e41e8627150e7b9fcae5c8cb13
# Need this to import from parent directory when running outside pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json, load_from_json import click import xml.etree.ElementTree from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found = False for document in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document break return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): """ Read information from rekordbox_rhythm.xml present in dataset_path and convert it into analsysis_rhythm_rekordbox.json to be stored in the same folder and compatible with our evaluation framework. """ rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(), label="Converting...") as metadata_keys: for key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not False: tempo_entry = entry.find('TEMPO') if tempo_entry is not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key] = {"RekBox": { "bpm": bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True) if __name__ == '__main__': rekordbox_file_to_analysis_file()
[((28, 1, 28, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((29, 1, 29, 31), 'click.argument', 'click.argument', ({(29, 16, 29, 30): '"""dataset_path"""'}, {}), "('dataset_path')", False, 'import click\n'), ((38, 20, 38, 80), 'os.path.join', 'os.path.join', ({(38, 33, 38, 45): 'dataset_path', (38, 47, 38, 79): '"""analysis_rhythm_rekordbox.json"""'}, {}), "(dataset_path, 'analysis_rhythm_rekordbox.json')", False, 'import os\n'), ((54, 4, 54, 55), 'ac_utils.general.save_to_json', 'save_to_json', (), '', False, 'from ac_utils.general import save_to_json, load_from_json\n'), ((37, 35, 37, 78), 'os.path.join', 'os.path.join', ({(37, 48, 37, 60): 'dataset_path', (37, 62, 37, 77): '"""metadata.json"""'}, {}), "(dataset_path, 'metadata.json')", False, 'import os\n'), ((4, 45, 4, 71), 'os.path.realpath', 'os.path.realpath', ({(4, 62, 4, 70): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((36, 49, 36, 99), 'os.path.join', 'os.path.join', ({(36, 62, 36, 74): 'dataset_path', (36, 76, 36, 98): '"""rekordbox_rhythm.xml"""'}, {}), "(dataset_path, 'rekordbox_rhythm.xml')", False, 'import os\n')]
SergeoLacruz/inventree-python
inventree/part.py
94681428f61de4ca51171e685812ebc436b9be42
# -*- coding: utf-8 -*- import logging import re import inventree.base import inventree.stock import inventree.company import inventree.build logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): """ Class representing the PartCategory database model """ URL = 'part/category' def getParts(self, **kwargs): return Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self): if self.parent: return PartCategory(self._api, self.parent) else: return None def getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True): """ fetch_parent: enable to fetch templates for parent categories """ parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): """ Class representing the Part database model """ URL = 'part' def getCategory(self): """ Return the part category associated with this part """ return PartCategory(self._api, self.category) def getTestTemplates(self): """ Return all test templates associated with this part """ return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): """ Return the supplier parts associated with this part """ return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): """ Return the items required to make this part """ return BomItem.list(self._api, part=self.pk) def isUsedIn(self): """ Return a list of all the parts this part is used in """ return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs): """ Return the builds associated with this part """ return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self): """ Return the stock items associated with this part """ return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): """ Return parameters associated with this part """ return Parameter.list(self._api, part=self.pk) def getRelated(self): """ Return related parts associated with this part """ return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): """ Returns the InternalPrice list for this part """ return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity: int, price: float): """ Set the internal price for this part """ return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment, comment=''): """ Upload an attachment (file) against this Part. Args: attachment: Either a string (filename) or a file object comment: Attachment comment """ return PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment): """ Class representing a file attachment for a Part """ URL = 'part/attachment' REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject): """ Class representing a test template for a Part """ URL = 'part/test-template' @classmethod def generateTestKey(cls, test_name): """ Generate a 'key' for this test """ key = test_name.strip().lower() key = key.replace(' ', '') # Remove any characters that cannot be used to represent a variable key = re.sub(r'[^a-zA-Z0-9]', '', key) return key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): """ Class representing the BomItem database model """ URL = 'bom' class InternalPrice(inventree.base.InventreeObject): """ Class representing the InternalPrice model """ URL = 'part/internal-price' @classmethod def setInternalPrice(cls, api, part, quantity: int, price: float): """ Set the internal price for this part """ data = { 'part': part, 'quantity': quantity, 'price': price, } # Send the data to the server return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): """ Class representing a relationship between parts""" URL = 'part/related' @classmethod def add_related(cls, api, part1, part2): data = { 'part_1': part1, 'part_2': part2, } # Send the data to the server if api.post(cls.URL, data): logging.info("Related OK") ret = True else: logging.warning("Related failed") ret = False return ret class Parameter(inventree.base.InventreeObject): """class representing the Parameter database model """ URL = 'part/parameter' def getunits(self): """ Get the dimension and units for this parameter """ return [element for element in ParameterTemplate.list(self._api) if element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): """ class representing the Parameter Template database model""" URL = 'part/parameter/template'
[((12, 9, 12, 39), 'logging.getLogger', 'logging.getLogger', ({(12, 27, 12, 38): '"""inventree"""'}, {}), "('inventree')", False, 'import logging\n'), ((140, 14, 140, 46), 're.sub', 're.sub', ({(140, 21, 140, 36): '"""[^a-zA-Z0-9]"""', (140, 38, 140, 40): '""""""', (140, 42, 140, 45): 'key'}, {}), "('[^a-zA-Z0-9]', '', key)", False, 'import re\n'), ((189, 12, 189, 38), 'logging.info', 'logging.info', ({(189, 25, 189, 37): '"""Related OK"""'}, {}), "('Related OK')", False, 'import logging\n'), ((192, 12, 192, 45), 'logging.warning', 'logging.warning', ({(192, 28, 192, 44): '"""Related failed"""'}, {}), "('Related failed')", False, 'import logging\n')]
avstarkov/aiohttp
tests/test_web_urldispatcher.py
b0a03cffccf677bf316227522a9b841c15dcb869
import functools import os import shutil import tempfile from unittest import mock from unittest.mock import MagicMock import pytest from aiohttp import abc, web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): """ Give a path for a temporary directory The directory is destroyed at the end of the test. """ # Temporary directory. tmp_dir = tempfile.mkdtemp() def teardown(): # Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( "show_index,status,prefix,data", [pytest.param(False, 403, '/', None, id="index_forbidden"), pytest.param(True, 200, '/', b'<html>\n<head>\n<title>Index of /.</title>\n' b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n' b'<li><a href="/my_dir">my_dir/</a></li>\n' b'<li><a href="/my_file">my_file</a></li>\n' b'</ul>\n</body>\n</html>', id="index_root"), pytest.param(True, 200, '/static', b'<html>\n<head>\n<title>Index of /.</title>\n' b'</head>\n<body>\n<h1>Index of /.</h1>\n<ul>\n' b'<li><a href="/static/my_dir">my_dir/</a></li>\n' b'<li><a href="/static/my_file">my_file</a></li>\n' b'</ul>\n</body>\n</html>', id="index_static")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data): """ Tests the operation of static file server. Try to access the root of static file server, and make sure that correct HTTP statuses are returned depending if we directory index should be shown or not. """ # Put a file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write('world') app = web.Application() # Register global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get(prefix) assert r.status == status if data: assert r.headers['Content-Type'] == "text/html; charset=utf-8" read_ = (await r.read()) assert read_ == data async def test_follow_symlink(tmp_dir_path, aiohttp_client): """ Tests the access to a symlink, in static folder """ data = 'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test text'), ('test dir name', 'test dir file .txt', 'test text file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): """ Checks operation of static files with spaces """ my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as fw: fw.write(data) app = web.Application() url = os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app) r = await client.get(url) assert r.status == 200 assert (await r.text()) == data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): """ Tests accessing non-existing resource Try to access a non-exiting resource and make sure that 404 HTTP status returned. """ app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path, request_url): """ Tests accessing a resource with """ app = web.Application() async def handler(request): return web.Response() app.router.add_get(registered_path, handler) client = await aiohttp_client(app) r = await client.get(request_url) assert r.status == 200 async def test_handler_metadata_persistence(): """ Tests accessing metadata of a handler after registering it on the app router. """ app = web.Application() async def async_handler(request): """Doc""" return web.Response() def sync_handler(request): """Doc""" return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in app.router.resources(): for route in resource: assert route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): """ Tests the unauthorized access to a folder of static file server. Try to list a folder content of static file server when server does not have permissions to do so for the folder. """ my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock() path.joinpath.return_value = path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/my_dir') assert r.status == 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): """ Tests the access to a looped symlink, which could not be resolved. """ my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/my_symlink') assert r.status == 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): """ Tests the access to a resource that is neither a file nor a directory. Checks that if a special resource is accessed (f.e. named pipe or UNIX domain socket) then 404 HTTP status returned. """ app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock() special.is_dir.return_value = False special.is_file.return_value = False path = MagicMock() path.joinpath.side_effect = lambda p: (special if p == 'special' else path) path.resolve.return_value = path special.resolve.return_value = special path_constructor.return_value = path # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root of the static directory. r = await client.get('/special') assert r.status == 404 async def test_partialy_applied_handler(aiohttp_client): app = web.Application() async def handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client = await aiohttp_client(app) r = await client.get('/') data = (await r.read()) assert data == b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name is None assert route.resource is None assert "<SystemRoute 201: test>" == repr(route) assert 201 == route.status assert 'test' == route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self, request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client = await aiohttp_client(app) resp = await client.get('/') assert resp.status == 412 async def test_allow_head(aiohttp_client): """ Test allow_head on routes. """ app = web.Application() async def handler(_): return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client = await aiohttp_client(app) r = await client.get('/a') assert r.status == 200 await r.release() r = await client.head('/a') assert r.status == 200 await r.release() r = await client.get('/b') assert r.status == 200 await r.release() r = await client.head('/b') assert r.status == 405 await r.release() @pytest.mark.parametrize("path", [ '/a', '/{a}', ]) def test_reuse_last_added_resource(path): """ Test that adding a route with the same name and path of the last added resource doesn't create a new resource. """ app = web.Application() async def handler(request): return web.Response() app.router.add_get(path, handler, name="a") app.router.add_post(path, handler, name="a") assert len(app.router.resources()) == 1 def test_resource_raw_match(): app = web.Application() async def handler(request): return web.Response() route = app.router.add_get("/a", handler, name="a") assert route.resource.raw_match("/a") route = app.router.add_get("/{b}", handler, name="b") assert route.resource.raw_match("/{b}") resource = app.router.add_static("/static", ".") assert not resource.raw_match("/static") async def test_add_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self): return web.Response() async def post(self): return web.Response() app.router.add_view("/a", MyView) client = await aiohttp_client(app) r = await client.get("/a") assert r.status == 200 await r.release() r = await client.post("/a") assert r.status == 200 await r.release() r = await client.put("/a") assert r.status == 405 await r.release() async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view("/a") class MyView(web.View): async def get(self): return web.Response() async def post(self): return web.Response() app = web.Application() app.router.add_routes(routes) client = await aiohttp_client(app) r = await client.get("/a") assert r.status == 200 await r.release() r = await client.post("/a") assert r.status == 200 await r.release() r = await client.put("/a") assert r.status == 405 await r.release() async def test_web_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self): return web.Response() async def post(self): return web.Response() app.router.add_routes([ web.view("/a", MyView) ]) client = await aiohttp_client(app) r = await client.get("/a") assert r.status == 200 await r.release() r = await client.post("/a") assert r.status == 200 await r.release() r = await client.put("/a") assert r.status == 405 await r.release()
[((14, 1, 14, 33), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((112, 1, 115, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(112, 25, 112, 49): '"""dir_name,filename,data"""', (112, 51, 115, 1): "[('', 'test file.txt', 'test text'), ('test dir name', 'test dir file .txt',\n 'test text file folder')]"}, {}), "('dir_name,filename,data', [('', 'test file.txt',\n 'test text'), ('test dir name', 'test dir file .txt',\n 'test text file folder')])", False, 'import pytest\n'), ((161, 1, 165, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(161, 25, 161, 54): '"""registered_path,request_url"""', (161, 56, 165, 1): "[('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab')]"}, {}), "('registered_path,request_url', [('/a:b', '/a:b'), (\n '/a@b', '/a@b'), ('/a:b', '/a%3Ab')])", False, 'import pytest\n'), ((351, 1, 354, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(351, 25, 351, 31): '"""path"""', (351, 33, 354, 1): "['/a', '/{a}']"}, {}), "('path', ['/a', '/{a}'])", False, 'import pytest\n'), ((21, 14, 21, 32), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ({}, {}), '()', False, 'import tempfile\n'), ((57, 19, 57, 56), 'os.path.join', 'os.path.join', ({(57, 32, 57, 44): 'tmp_dir_path', (57, 46, 57, 55): '"""my_file"""'}, {}), "(tmp_dir_path, 'my_file')", False, 'import os\n'), ((61, 18, 61, 54), 'os.path.join', 'os.path.join', ({(61, 31, 61, 43): 'tmp_dir_path', (61, 45, 61, 53): '"""my_dir"""'}, {}), "(tmp_dir_path, 'my_dir')", False, 'import os\n'), ((62, 4, 62, 25), 'os.mkdir', 'os.mkdir', ({(62, 13, 62, 24): 'my_dir_path'}, {}), '(my_dir_path)', False, 'import os\n'), ((64, 19, 64, 62), 'os.path.join', 'os.path.join', ({(64, 32, 64, 43): 'my_dir_path', (64, 45, 64, 61): '"""my_file_in_dir"""'}, {}), "(my_dir_path, 'my_file_in_dir')", False, 'import os\n'), ((68, 10, 68, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((90, 18, 90, 54), 'os.path.join', 'os.path.join', ({(90, 31, 90, 43): 'tmp_dir_path', (90, 45, 90, 53): '"""my_dir"""'}, {}), "(tmp_dir_path, 'my_dir')", False, 'import os\n'), ((91, 4, 91, 25), 'os.mkdir', 'os.mkdir', ({(91, 13, 91, 24): 'my_dir_path'}, {}), '(my_dir_path)', False, 'import os\n'), ((93, 19, 93, 62), 'os.path.join', 'os.path.join', ({(93, 32, 93, 43): 'my_dir_path', (93, 45, 93, 61): '"""my_file_in_dir"""'}, {}), "(my_dir_path, 'my_file_in_dir')", False, 'import os\n'), ((97, 22, 97, 62), 'os.path.join', 'os.path.join', ({(97, 35, 97, 47): 'tmp_dir_path', (97, 49, 97, 61): '"""my_symlink"""'}, {}), "(tmp_dir_path, 'my_symlink')", False, 'import os\n'), ((98, 4, 98, 44), 'os.symlink', 'os.symlink', ({(98, 15, 98, 26): 'my_dir_path', (98, 28, 98, 43): 'my_symlink_path'}, {}), '(my_dir_path, my_symlink_path)', False, 'import os\n'), ((100, 10, 100, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((122, 18, 122, 54), 'os.path.join', 'os.path.join', ({(122, 31, 122, 43): 'tmp_dir_path', (122, 45, 122, 53): 'dir_name'}, {}), '(tmp_dir_path, dir_name)', False, 'import os\n'), ((127, 19, 127, 54), 'os.path.join', 'os.path.join', ({(127, 32, 127, 43): 'my_dir_path', (127, 45, 127, 53): 'filename'}, {}), '(my_dir_path, filename)', False, 'import os\n'), ((132, 10, 132, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((134, 10, 134, 47), 'os.path.join', 'os.path.join', ({(134, 23, 134, 26): '"""/"""', (134, 28, 134, 36): 'dir_name', (134, 38, 134, 46): 'filename'}, {}), "('/', dir_name, filename)", False, 'import os\n'), ((150, 10, 150, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((170, 10, 170, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((186, 10, 186, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((211, 18, 211, 54), 'os.path.join', 'os.path.join', ({(211, 31, 211, 43): 'tmp_dir_path', (211, 45, 211, 53): '"""my_dir"""'}, {}), "(tmp_dir_path, 'my_dir')", False, 'import os\n'), ((212, 4, 212, 25), 'os.mkdir', 'os.mkdir', ({(212, 13, 212, 24): 'my_dir_path'}, {}), '(my_dir_path)', False, 'import os\n'), ((214, 10, 214, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((236, 18, 236, 58), 'os.path.join', 'os.path.join', ({(236, 31, 236, 43): 'tmp_dir_path', (236, 45, 236, 57): '"""my_symlink"""'}, {}), "(tmp_dir_path, 'my_symlink')", False, 'import os\n'), ((237, 4, 237, 40), 'os.symlink', 'os.symlink', ({(237, 15, 237, 26): 'my_dir_path', (237, 28, 237, 39): 'my_dir_path'}, {}), '(my_dir_path, my_dir_path)', False, 'import os\n'), ((239, 10, 239, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((256, 10, 256, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((281, 10, 281, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((326, 10, 326, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((360, 10, 360, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((372, 10, 372, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((388, 10, 388, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((415, 13, 415, 32), 'aiohttp.web.RouteTableDef', 'web.RouteTableDef', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((425, 10, 425, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((444, 10, 444, 27), 'aiohttp.web.Application', 'web.Application', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((25, 8, 25, 30), 'shutil.rmtree', 'shutil.rmtree', ({(25, 22, 25, 29): 'tmp_dir'}, {}), '(tmp_dir)', False, 'import shutil\n'), ((33, 5, 33, 62), 'pytest.param', 'pytest.param', (), '', False, 'import pytest\n'), ((34, 5, 40, 34), 'pytest.param', 'pytest.param', (), '', False, 'import pytest\n'), ((41, 5, 47, 36), 'pytest.param', 'pytest.param', (), '', False, 'import pytest\n'), ((125, 8, 125, 29), 'os.mkdir', 'os.mkdir', ({(125, 17, 125, 28): 'my_dir_path'}, {}), '(my_dir_path)', False, 'import os\n'), ((173, 15, 173, 29), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((190, 15, 190, 29), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((194, 15, 194, 29), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((197, 9, 197, 41), 'pytest.warns', 'pytest.warns', ({(197, 22, 197, 40): 'DeprecationWarning'}, {}), '(DeprecationWarning)', False, 'import pytest\n'), ((216, 9, 216, 43), 'unittest.mock.patch', 'mock.patch', ({(216, 20, 216, 42): '"""pathlib.Path.__new__"""'}, {}), "('pathlib.Path.__new__')", False, 'from unittest import mock\n'), ((217, 15, 217, 26), 'unittest.mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from unittest.mock import MagicMock\n'), ((258, 9, 258, 43), 'unittest.mock.patch', 'mock.patch', ({(258, 20, 258, 42): '"""pathlib.Path.__new__"""'}, {}), "('pathlib.Path.__new__')", False, 'from unittest import mock\n'), ((259, 18, 259, 29), 'unittest.mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from unittest.mock import MagicMock\n'), ((263, 15, 263, 26), 'unittest.mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from unittest.mock import MagicMock\n'), ((284, 15, 284, 38), 'aiohttp.web.Response', 'web.Response', (), '', False, 'from aiohttp import abc, web\n'), ((286, 9, 286, 41), 'pytest.warns', 'pytest.warns', ({(286, 22, 286, 40): 'DeprecationWarning'}, {}), '(DeprecationWarning)', False, 'import pytest\n'), ((296, 24, 296, 54), 'aiohttp.web.HTTPCreated', 'web.HTTPCreated', (), '', False, 'from aiohttp import abc, web\n'), ((297, 9, 297, 36), 'pytest.raises', 'pytest.raises', ({(297, 23, 297, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((329, 15, 329, 29), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((363, 15, 363, 29), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((375, 15, 375, 29), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((287, 41, 287, 77), 'functools.partial', 'functools.partial', ({(287, 59, 287, 66): 'handler', (287, 68, 287, 76): "b'hello'"}, {}), "(handler, b'hello')", False, 'import functools\n'), ((311, 18, 311, 46), 'aiohttp.web.HTTPPreconditionFailed', 'web.HTTPPreconditionFailed', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((392, 19, 392, 33), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((395, 19, 395, 33), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((420, 19, 420, 33), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((423, 19, 423, 33), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((448, 19, 448, 33), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((451, 19, 451, 33), 'aiohttp.web.Response', 'web.Response', ({}, {}), '()', False, 'from aiohttp import abc, web\n'), ((454, 8, 454, 30), 'aiohttp.web.view', 'web.view', ({(454, 17, 454, 21): '"""/a"""', (454, 23, 454, 29): 'MyView'}, {}), "('/a', MyView)", False, 'from aiohttp import abc, web\n')]
nairouz/R-GAE
R-GMM-VGAE/model_citeseer.py
acc7bfe36153a4c7d6f68e21a557bb4d99dab639
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Authors : Nairouz Mrabah ([email protected]) & Mohamed Fawzi Touati ([email protected]) # @Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering # @License : MIT License import torch import numpy as np import torch.nn as nn import scipy.sparse as sp import torch.nn.functional as F from tqdm import tqdm from torch.optim import Adam from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import StepLR from preprocessing import sparse_to_tuple from sklearn.neighbors import NearestNeighbors from sklearn import metrics from munkres import Munkres def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X = X.detach().numpy() centers = centers.detach().numpy() if X.size == 0: q = np.array([]) else: q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha)) q = q ** ((alpha + 1.0) / 2.0) q = np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices = [] conf_indices = [] q = q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label = true_label self.pred_label = predict_label def clusteringAcc(self): # best mapping between true_label and predict label l1 = list(set(self.true_label)) numclass1 = len(l1) l2 = list(set(self.pred_label)) numclass2 = len(l2) if numclass1 != numclass2: print('Class Not equal, Error!!!!') return 0 cost = np.zeros((numclass1, numclass2), dtype=int) for i, c1 in enumerate(l1): mps = [i1 for i1, e1 in enumerate(self.true_label) if e1 == c1] for j, c2 in enumerate(l2): mps_d = [i1 for i1 in mps if self.pred_label[i1] == c2] cost[i][j] = len(mps_d) # match two clustering results by Munkres algorithm m = Munkres() cost = cost.__neg__().tolist() indexes = m.compute(cost) # get the match results new_predict = np.zeros(len(self.pred_label)) for i, c in enumerate(l1): # correponding label in l2: c2 = l2[indexes[i][1]] # ai is the index with label==c2 in the pred_label list ai = [ind for ind, elm in enumerate(self.pred_label) if elm == c2] new_predict[ai] = c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) ) fh.write('\r\n') fh.flush() fh.close() return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation = activation def forward(self, inputs, adj): x = inputs x = torch.mm(x,self.weight) x = torch.mm(adj, x) outputs = self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE training parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) # GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag') for _ in epoch_bar: opti.zero_grad() _,_, z = self.encode(features, adj) x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1): pi = self.pi mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c det = 1e-2 Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss = Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj, labels, emb, unconf_indices, conf_indices): k = 0 y_pred = self.predict(emb) emb_unconf = emb[unconf_indices] adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]] = 1 for j in adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]): adj[k, j] = 0 adj = adj.tocsr() adj_label = adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label, weight_tensor def train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9) import os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted = [] epoch_stable = 0 for epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch == 0: adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind else : epoch_stable += 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable >= 15: epoch_stable = 0 beta1 = beta1 * 0.96 beta2 = beta2 * 0.98 if epoch % 50 == 0 and epoch <= 200 : adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm = clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z): pi = self.pi log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self, x_features, adj): hidden = self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean return self.mean, self.logstd ,sampled_z @staticmethod def decode(z): A_pred = torch.sigmoid(torch.matmul(z,z.t())) return A_pred
[((22, 17, 22, 56), 'numpy.sqrt', 'np.sqrt', ({(22, 25, 22, 55): '6.0 / (input_dim + output_dim)'}, {}), '(6.0 / (input_dim + output_dim))', True, 'import numpy as np\n'), ((42, 18, 42, 41), 'numpy.zeros', 'np.zeros', ({(42, 27, 42, 40): '(q.shape[0],)'}, {}), '((q.shape[0],))', True, 'import numpy as np\n'), ((43, 8, 43, 29), 'numpy.argsort', 'np.argsort', (), '', True, 'import numpy as np\n'), ((51, 21, 51, 58), 'numpy.asarray', 'np.asarray', (), '', True, 'import numpy as np\n'), ((52, 19, 52, 54), 'numpy.asarray', 'np.asarray', (), '', True, 'import numpy as np\n'), ((30, 12, 30, 24), 'numpy.array', 'np.array', ({(30, 21, 30, 23): '[]'}, {}), '([])', True, 'import numpy as np\n'), ((71, 15, 71, 58), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((80, 12, 80, 21), 'munkres.Munkres', 'Munkres', ({}, {}), '()', False, 'from munkres import Munkres\n'), ((96, 14, 96, 66), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', ({(96, 37, 96, 52): 'self.true_label', (96, 54, 96, 65): 'new_predict'}, {}), '(self.true_label, new_predict)', False, 'from sklearn import metrics\n'), ((98, 19, 98, 82), 'sklearn.metrics.f1_score', 'metrics.f1_score', (), '', False, 'from sklearn import metrics\n'), ((99, 26, 99, 96), 'sklearn.metrics.precision_score', 'metrics.precision_score', (), '', False, 'from sklearn import metrics\n'), ((100, 23, 100, 90), 'sklearn.metrics.recall_score', 'metrics.recall_score', (), '', False, 'from sklearn import metrics\n'), ((101, 19, 101, 82), 'sklearn.metrics.f1_score', 'metrics.f1_score', (), '', False, 'from sklearn import metrics\n'), ((102, 26, 102, 96), 'sklearn.metrics.precision_score', 'metrics.precision_score', (), '', False, 'from sklearn import metrics\n'), ((103, 23, 103, 90), 'sklearn.metrics.recall_score', 'metrics.recall_score', (), '', False, 'from sklearn import metrics\n'), ((107, 14, 107, 84), 'sklearn.metrics.normalized_mutual_info_score', 'metrics.normalized_mutual_info_score', ({(107, 51, 107, 66): 'self.true_label', (107, 68, 107, 83): 'self.pred_label'}, {}), '(self.true_label, self.pred_label)', False, 'from sklearn import metrics\n'), ((108, 19, 108, 80), 'sklearn.metrics.adjusted_rand_score', 'metrics.adjusted_rand_score', ({(108, 47, 108, 62): 'self.true_label', (108, 64, 108, 79): 'self.pred_label'}, {}), '(self.true_label, self.pred_label)', False, 'from sklearn import metrics\n'), ((130, 12, 130, 35), 'torch.mm', 'torch.mm', ({(130, 21, 130, 22): 'x', (130, 23, 130, 34): 'self.weight'}, {}), '(x, self.weight)', False, 'import torch\n'), ((131, 12, 131, 28), 'torch.mm', 'torch.mm', ({(131, 21, 131, 24): 'adj', (131, 26, 131, 27): 'x'}, {}), '(adj, x)', False, 'import torch\n'), ((156, 14, 156, 87), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', (), '', False, 'from sklearn.mixture import GaussianMixture\n'), ((165, 23, 165, 53), 'torch.from_numpy', 'torch.from_numpy', ({(165, 40, 165, 52): 'gmm.weights_'}, {}), '(gmm.weights_)', False, 'import torch\n'), ((166, 25, 166, 53), 'torch.from_numpy', 'torch.from_numpy', ({(166, 42, 166, 52): 'gmm.means_'}, {}), '(gmm.means_)', False, 'import torch\n'), ((208, 20, 208, 46), 'preprocessing.sparse_to_tuple', 'sparse_to_tuple', ({(208, 36, 208, 45): 'adj_label'}, {}), '(adj_label)', False, 'from preprocessing import sparse_to_tuple\n'), ((221, 15, 221, 52), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (), '', False, 'from torch.optim.lr_scheduler import StepLR\n'), ((267, 15, 267, 29), 'torch.cat', 'torch.cat', ({(267, 25, 267, 26): 'G', (267, 27, 267, 28): '(1)'}, {}), '(G, 1)', False, 'import torch\n'), ((280, 15, 280, 38), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((150, 33, 150, 81), 'torch.randn', 'torch.randn', ({(150, 45, 150, 59): 'self.nClusters', (150, 61, 150, 80): 'self.embedding_size'}, {}), '(self.nClusters, self.embedding_size)', False, 'import torch\n'), ((151, 41, 151, 89), 'torch.randn', 'torch.randn', ({(151, 53, 151, 67): 'self.nClusters', (151, 69, 151, 88): 'self.embedding_size'}, {}), '(self.nClusters, self.embedding_size)', False, 'import torch\n'), ((167, 44, 167, 78), 'torch.from_numpy', 'torch.from_numpy', ({(167, 61, 167, 77): 'gmm.covariances_'}, {}), '(gmm.covariances_)', False, 'import torch\n'), ((207, 26, 207, 46), 'scipy.sparse.eye', 'sp.eye', ({(207, 33, 207, 45): 'adj.shape[0]'}, {}), '(adj.shape[0])', True, 'import scipy.sparse as sp\n'), ((209, 45, 209, 77), 'torch.LongTensor', 'torch.LongTensor', ({(209, 62, 209, 76): 'adj_label[0].T'}, {}), '(adj_label[0].T)', False, 'import torch\n'), ((210, 36, 210, 67), 'torch.FloatTensor', 'torch.FloatTensor', ({(210, 54, 210, 66): 'adj_label[1]'}, {}), '(adj_label[1])', False, 'import torch\n'), ((211, 36, 211, 60), 'torch.Size', 'torch.Size', ({(211, 47, 211, 59): 'adj_label[2]'}, {}), '(adj_label[2])', False, 'import torch\n'), ((219, 29, 219, 83), 'torch.load', 'torch.load', ({(219, 40, 219, 82): "(save_path + dataset + '/pretrain/model.pk')"}, {}), "(save_path + dataset + '/pretrain/model.pk')", False, 'import torch\n'), ((23, 14, 23, 47), 'torch.rand', 'torch.rand', ({(23, 25, 23, 34): 'input_dim', (23, 36, 23, 46): 'output_dim'}, {}), '(input_dim, output_dim)', False, 'import torch\n'), ((34, 25, 34, 40), 'numpy.transpose', 'np.transpose', ({(34, 38, 34, 39): 'q'}, {}), '(q)', True, 'import numpy as np\n'), ((34, 43, 34, 60), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((149, 31, 149, 57), 'torch.ones', 'torch.ones', ({(149, 42, 149, 56): 'self.nClusters'}, {}), '(self.nClusters)', False, 'import torch\n'), ((189, 13, 189, 68), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', (), '', False, 'from sklearn.neighbors import NearestNeighbors\n'), ((287, 37, 287, 59), 'torch.exp', 'torch.exp', ({(287, 47, 287, 58): 'self.logstd'}, {}), '(self.logstd)', False, 'import torch\n'), ((183, 96, 183, 123), 'torch.sum', 'torch.sum', ({(183, 106, 183, 120): '(1 + z_sigma2_log)', (183, 121, 183, 122): '(1)'}, {}), '(1 + z_sigma2_log, 1)', False, 'import torch\n'), ((201, 19, 201, 41), 'numpy.isin', 'np.isin', ({(201, 27, 201, 33): 'idx[i]', (201, 35, 201, 40): 'adj_k'}, {}), '(idx[i], adj_k)', True, 'import numpy as np\n'), ((204, 19, 204, 45), 'numpy.isin', 'np.isin', ({(204, 27, 204, 28): 'j', (204, 30, 204, 44): 'unconf_indices'}, {}), '(j, unconf_indices)', True, 'import numpy as np\n'), ((204, 51, 204, 73), 'numpy.isin', 'np.isin', ({(204, 59, 204, 65): 'idx[i]', (204, 67, 204, 72): 'adj_k'}, {}), '(idx[i], adj_k)', True, 'import numpy as np\n'), ((270, 29, 270, 44), 'numpy.log', 'np.log', ({(270, 36, 270, 43): '(np.pi * 2)'}, {}), '(np.pi * 2)', True, 'import numpy as np\n'), ((270, 70, 270, 91), 'torch.exp', 'torch.exp', ({(270, 80, 270, 90): 'log_sigma2'}, {}), '(log_sigma2)', False, 'import torch\n'), ((32, 43, 32, 63), 'numpy.expand_dims', 'np.expand_dims', ({(32, 58, 32, 59): 'X', (32, 61, 32, 62): '(1)'}, {}), '(X, 1)', True, 'import numpy as np\n')]
VaibhavBhujade/Blockchain-ERP-interoperability
odoo-13.0/addons/stock_account/models/account_chart_template.py
b5190a037fb6615386f7cbad024d51b0abd4ba03
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, models, _ import logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = "account.chart.template" @api.model def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] # Property Stock Journal value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1) if value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1) vals = { 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s' % value.id, } properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)]) if properties: # the property exist: modify it properties.write(vals) else: # create the property PropertyObj.create(vals) todo_list = [ # Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in todo_list: account = getattr(self, record) value = account and 'account.account,' + str(acc_template_ref[account.id]) or False if value: field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1) vals = { 'name': record, 'company_id': company.id, 'fields_id': field.id, 'value': value, } properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1) if not properties: # create the property PropertyObj.create(vals) elif not properties.value_reference: # update the property if False properties.write(vals) return res
[((7, 10, 7, 37), 'logging.getLogger', 'logging.getLogger', ({(7, 28, 7, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((15, 35, 15, 59), 'odoo._', '_', ({(15, 37, 15, 58): '"""Inventory Valuation"""'}, {}), "('Inventory Valuation')", False, 'from odoo import api, models, _\n')]
BarneyQiao/pcl.pytorch
lib/roi_data/loader.py
4e0280e5e1470f705e620eda26f881d627c5016c
import math import numpy as np import numpy.random as npr import torch import torch.utils.data as data import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import default_collate from torch._six import int_classes as _int_classes from core.config import cfg from roi_data.minibatch import get_minibatch import utils.blob as blob_utils # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True): self._roidb = roidb self._num_classes = num_classes self.training = training self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch is valid ? If not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim # for key in blobs: # if key != 'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): """Given the ratio_list, we want to make the RATIO same for each minibatch on each GPU. Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE scale. Since all prepared images will have same min side length of cfg.TRAIN.SCALES[0], we can pad and batch images base on that. """ DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i in range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1) if ratio_list[right_idx] < 1: # for ratio < 1, we preserve the leftmost in each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for ratio > 1, we preserve the rightmost in each batch. target_ratio = ratio_list[right_idx] else: # for ratio cross 1, we make it to be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index = ratio_index self.num_data = len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r"""Wraps another sampler to yield a mini-batch of indices. Args: sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_last (bool): If ``True``, the sampler will drop the last batch if its size would be less than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]] """ def __init__(self, sampler, batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise ValueError("sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}" .format(sampler)) if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \ batch_size <= 0: raise ValueError("batch_size should be a positive integeral value, " "but got batch_size={}".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError("drop_last should be a boolean value, but got " "drop_last={}".format(drop_last)) self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last def __iter__(self): batch = [] for idx in self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if len(batch) == self.batch_size: yield batch batch = [] if len(batch) > 0 and not self.drop_last: yield batch def __len__(self): if self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size - 1) // self.batch_size def collate_minibatch(list_of_blobs): """Stack samples seperately and return a list of minibatches A batch contains NUM_GPUS minibatches and image size in different minibatch may be different. Hence, we need to stack smaples from each minibatch seperately. """ Batch = {key: [] for key in list_of_blobs[0]} # Because roidb consists of entries of variable length, it can't be batch into a tensor. # So we keep roidb in the type of "list of ndarray". lists = [] for blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key in minibatch: Batch[key].append(minibatch[key]) return Batch
[((51, 27, 51, 49), 'numpy.empty', 'np.empty', ({(51, 36, 51, 48): '(DATA_SIZE,)'}, {}), '((DATA_SIZE,))', True, 'import numpy as np\n'), ((27, 23, 27, 66), 'roi_data.minibatch.get_minibatch', 'get_minibatch', ({(27, 37, 27, 46): 'single_db', (27, 48, 27, 65): 'self._num_classes'}, {}), '(single_db, self._num_classes)', False, 'from roi_data.minibatch import get_minibatch\n'), ((52, 24, 52, 68), 'numpy.ceil', 'np.ceil', ({(52, 32, 52, 67): 'DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH'}, {}), '(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)', True, 'import numpy as np\n'), ((78, 20, 78, 50), 'numpy.random.permutation', 'npr.permutation', ({(78, 36, 78, 49): 'self.num_data'}, {}), '(self.num_data)', True, 'import numpy.random as npr\n'), ((153, 20, 153, 46), 'torch.utils.data.dataloader.default_collate', 'default_collate', ({(153, 36, 153, 45): 'mini_list'}, {}), '(mini_list)', False, 'from torch.utils.data.dataloader import default_collate\n')]
andywu113/fuhe_predict
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
7fd816ae83467aa659d420545cd3e25a5e933d5f
import warnings from distutils.version import LooseVersion import numpy as np import pytest from scipy import linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use another dataset that has multiple drops diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples = y.size def test_simple(): # Principle of Lars is to keep covariances tied and decreasing # also test verbose output from io import StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same, with precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path with precomputed Gram and Xy gives the right answer G = np.dot(X.T, X) Xy = np.dot(X.T, y) for method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation def test_lars_lstsq(): # Test that Lars gives least square solution at the end # of the path X1 = 3 * X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default value change when numpy >= 1.14 rcond = None if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square solution at the end # of the path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is robust to collinearity in input X = np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]]) y = np.array([1., 0., 0]) rng = np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded n_samples = 10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option returns the correct output alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _, coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram and Xy remains # correct X, y = 3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values of precompute G = np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when input is a singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test that checks that LARS Lasso is handling rank # deficient input data (with n_features < rank) in the same way # as coordinate descent Lasso y = [5, 0, 5] for X in ( [[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]] ): # To be able to use the coefs to compute the objective function, # we need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using coordinate descent give the # same results. X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test, with normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using coordinate descent give the # same results when early stopping is used. # (test : before, in the middle, and in the last part of the path) alphas_min = [10, 0.9, 1e-4] for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) # same test, with normalization for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the path length of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned design, and check that # it does not blow up, and stays somewhat close to a solution given # by the coordinate descent solver # Also test that lasso_path (using lars_path output style) gives # the same result as lars_path and previous lasso output style # under these conditions. rng = np.random.RandomState(42) # Generate data n, m = 70, 100 k = 5 X = rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y = np.dot(X, w) sigma = 0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the LARS has to go # far in the path to converge, and check that LARS and coordinate # descent give the same answers # Note it used to be the case that Lars had to use the drop for good # strategy for this but this is no longer the case with the # equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y = [10, 10, 1] alpha = .0001 def objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features(): # assure that at least some features get added if necessary # test for 6d2b4c # Hilbert matrix n = 5 H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of length 6 + 1 in a Lars going down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure that estimators receiving multidimensional y do the right thing Y = np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_lars_cv(): # Test the LassoLarsCV object by checking that the optimal alpha # increases as the number of samples increases. # This property is not actually guaranteed in general and is just a # property of the given dataset, with the given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X = np.c_[X, x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0 def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using automated memory mapping on large input, the # fold data is in read-only mode # This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 def test_lars_path_positive_constraint(): # this is the main test for the positive parameter on the lars_path method # the estimator classes just make use of this function # we do the test on the diabetes dataset # ensure that we get negative coefficients when positive=False # and all positive when positive=True # for method 'lar' (default) and lasso # Once deprecation of LAR + positive option is done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso' _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() < 0 _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >= 0 # now we gonna test the positive option for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option of all estimator # classes in this same function here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using coordinate descent give the # same results when using the positive option # This test is basically a copy of the above with additional positive # option. However for the middle part, the comparison of coefficient values # for a range of alphas, we had to make an adaptations. See below. # not normalized data X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas chosen for coefficient comparison here is restricted # as compared with the above test without the positive option. This is due # to the circumstance that the Lars-Lasso algorithm does not converge to # the least-squares-solution for small alphas, see 'Least Angle Regression' # by Efron et al 2004. The coefficients are typically in congruence up to # the smallest alpha reached by the Lars-Lasso algorithm and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees with the LassoLars # implementation available in R (lars library) under the following # scenarios: # 1) fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True # Let's generate the data used in the bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario 1: Let's compare R vs sklearn when fit_intercept=False and # normalize=False ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare R vs sklearn when fit_intercept=True and # normalize=True # # Note: When normalize is equal to True, R returns the coefficients in # their original units, that is, they are rescaled back, whereas sklearn # does not do that, therefore, we need to do this step before comparing # their results. ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned by sklearn before comparing # against the R result (read the note above) temp = X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): """ Test that user input regarding copy_X is not being overridden (it was until at least version 0.21) """ lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): """ Test that user input to .fit for copy_X overrides default __init__ value """ lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X == np.array_equal(X, X_copy)
[((24, 11, 24, 35), 'sklearn.datasets.load_diabetes', 'datasets.load_diabetes', ({}, {}), '()', False, 'from sklearn import linear_model, datasets\n'), ((26, 4, 26, 18), 'numpy.dot', 'np.dot', ({(26, 11, 26, 14): 'X.T', (26, 16, 26, 17): 'X'}, {}), '(X.T, X)', True, 'import numpy as np\n'), ((27, 5, 27, 19), 'numpy.dot', 'np.dot', ({(27, 12, 27, 15): 'X.T', (27, 17, 27, 18): 'y'}, {}), '(X.T, y)', True, 'import numpy as np\n'), ((86, 1, 86, 52), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(86, 25, 86, 33): '"""method"""', (86, 35, 86, 51): "['lar', 'lasso']"}, {}), "('method', ['lar', 'lasso'])", False, 'import pytest\n'), ((87, 1, 87, 54), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(87, 25, 87, 38): '"""return_path"""', (87, 40, 87, 53): '[True, False]'}, {}), "('return_path', [True, False])", False, 'import pytest\n'), ((117, 1, 117, 68), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(117, 28, 117, 67): '"""ignore: `rcond` parameter will change"""'}, {}), "('ignore: `rcond` parameter will change')", False, 'import pytest\n'), ((131, 1, 131, 67), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(131, 28, 131, 66): '"""ignore:`rcond` parameter will change"""'}, {}), "('ignore:`rcond` parameter will change')", False, 'import pytest\n'), ((202, 1, 202, 62), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(202, 28, 202, 61): '"""ignore: The default value of cv"""'}, {}), "('ignore: The default value of cv')", False, 'import pytest\n'), ((203, 1, 205, 75), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(204, 8, 204, 20): '"""classifier"""', (205, 8, 205, 74): '[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]'}, {}), "('classifier', [linear_model.Lars, linear_model.\n LarsCV, linear_model.LassoLarsIC])", False, 'import pytest\n'), ((437, 1, 437, 62), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(437, 28, 437, 61): '"""ignore: The default value of cv"""'}, {}), "('ignore: The default value of cv')", False, 'import pytest\n'), ((454, 1, 454, 52), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(454, 28, 454, 51): '"""ignore::FutureWarning"""'}, {}), "('ignore::FutureWarning')", False, 'import pytest\n'), ((500, 1, 500, 63), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(500, 28, 500, 62): '"""ignore: The default of the `iid`"""'}, {}), "('ignore: The default of the `iid`')", False, 'import pytest\n'), ((539, 1, 539, 62), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', ({(539, 28, 539, 61): '"""ignore: The default value of cv"""'}, {}), "('ignore: The default value of cv')", False, 'import pytest\n'), ((708, 1, 708, 49), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(708, 25, 708, 33): '"""copy_X"""', (708, 35, 708, 48): '[True, False]'}, {}), "('copy_X', [True, False])", False, 'import pytest\n'), ((724, 1, 724, 49), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(724, 25, 724, 33): '"""copy_X"""', (724, 35, 724, 48): '[True, False]'}, {}), "('copy_X', [True, False])", False, 'import pytest\n'), ((64, 23, 65, 35), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((100, 9, 100, 23), 'numpy.dot', 'np.dot', ({(100, 16, 100, 19): 'X.T', (100, 21, 100, 22): 'y'}, {}), '(X.T, y)', True, 'import numpy as np\n'), ((101, 4, 102, 24), 'sklearn.utils.testing.assert_raises', 'assert_raises', (), '', False, 'from sklearn.utils.testing import assert_raises\n'), ((107, 8, 107, 22), 'numpy.dot', 'np.dot', ({(107, 15, 107, 18): 'X.T', (107, 20, 107, 21): 'X'}, {}), '(X.T, X)', True, 'import numpy as np\n'), ((108, 9, 108, 23), 'numpy.dot', 'np.dot', ({(108, 16, 108, 19): 'X.T', (108, 21, 108, 22): 'y'}, {}), '(X.T, y)', True, 'import numpy as np\n'), ((123, 10, 123, 42), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((128, 4, 128, 52), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(128, 30, 128, 39): 'clf.coef_', (128, 41, 128, 51): 'coef_lstsq'}, {}), '(clf.coef_, coef_lstsq)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((136, 23, 136, 67), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((138, 4, 138, 60), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(138, 30, 138, 40): 'coef_lstsq', (138, 42, 138, 59): 'coef_path_[:, (-1)]'}, {}), '(coef_lstsq, coef_path_[:, (-1)])', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((143, 8, 145, 31), 'numpy.array', 'np.array', ({(143, 17, 145, 30): '[[3.0, 3.0, 1.0], [2.0, 2.0, 0.0], [1.0, 1.0, 0]]'}, {}), '([[3.0, 3.0, 1.0], [2.0, 2.0, 0.0], [1.0, 1.0, 0]])', True, 'import numpy as np\n'), ((146, 8, 146, 29), 'numpy.array', 'np.array', ({(146, 17, 146, 28): '[1.0, 0.0, 0]'}, {}), '([1.0, 0.0, 0])', True, 'import numpy as np\n'), ((147, 10, 147, 34), 'numpy.random.RandomState', 'np.random.RandomState', ({(147, 32, 147, 33): '0'}, {}), '(0)', True, 'import numpy as np\n'), ((157, 8, 157, 27), 'numpy.zeros', 'np.zeros', ({(157, 17, 157, 26): 'n_samples'}, {}), '(n_samples)', True, 'import numpy as np\n'), ((158, 23, 161, 59), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((167, 29, 168, 27), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((169, 22, 170, 46), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((172, 4, 172, 54), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(172, 30, 172, 34): 'coef', (172, 36, 172, 53): 'coef_path_[:, (-1)]'}, {}), '(coef, coef_path_[:, (-1)])', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((178, 29, 179, 35), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((180, 22, 181, 54), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((183, 4, 183, 54), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(183, 30, 183, 34): 'coef', (183, 36, 183, 53): 'coef_path_[:, (-1)]'}, {}), '(coef, coef_path_[:, (-1)])', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((191, 8, 191, 22), 'numpy.dot', 'np.dot', ({(191, 15, 191, 18): 'X.T', (191, 20, 191, 21): 'X'}, {}), '(X.T, X)', True, 'import numpy as np\n'), ((192, 9, 192, 23), 'numpy.dot', 'np.dot', ({(192, 16, 192, 19): 'X.T', (192, 21, 192, 22): 'y'}, {}), '(X.T, y)', True, 'import numpy as np\n'), ((193, 29, 194, 59), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((195, 22, 196, 78), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((198, 4, 198, 54), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(198, 30, 198, 34): 'coef', (198, 36, 198, 53): 'coef_path_[:, (-1)]'}, {}), '(coef, coef_path_[:, (-1)])', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((208, 8, 208, 22), 'numpy.dot', 'np.dot', ({(208, 15, 208, 18): 'X.T', (208, 20, 208, 21): 'X'}, {}), '(X.T, X)', True, 'import numpy as np\n'), ((220, 9, 220, 38), 'numpy.array', 'np.array', ({(220, 18, 220, 37): '[[1, 1.0], [1.0, 1.0]]'}, {}), '([[1, 1.0], [1.0, 1.0]])', True, 'import numpy as np\n'), ((221, 9, 221, 25), 'numpy.array', 'np.array', ({(221, 18, 221, 24): '[1, 1]'}, {}), '([1, 1])', True, 'import numpy as np\n'), ((222, 22, 222, 52), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', ({(222, 45, 222, 47): 'X1', (222, 49, 222, 51): 'y1'}, {}), '(X1, y1)', False, 'from sklearn import linear_model, datasets\n'), ((223, 4, 223, 60), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(223, 30, 223, 41): 'coef_path.T', (223, 43, 223, 59): '[[0, 0], [1, 0]]'}, {}), '(coef_path.T, [[0, 0], [1, 0]])', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((258, 28, 258, 72), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((259, 15, 259, 64), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((269, 17, 269, 48), 'numpy.linspace', 'np.linspace', ({(269, 29, 269, 33): '(0.01)', (269, 35, 269, 43): '(1 - 0.01)', (269, 45, 269, 47): '(20)'}, {}), '(0.01, 1 - 0.01, 20)', True, 'import numpy as np\n'), ((278, 28, 278, 72), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((279, 15, 280, 43), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((319, 12, 319, 36), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', ({}, {}), '()', False, 'from sklearn import linear_model, datasets\n'), ((321, 13, 321, 59), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((323, 4, 323, 64), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(323, 30, 323, 47): 'lasso.alphas_[:3]', (323, 49, 323, 63): 'lasso2.alphas_'}, {}), '(lasso.alphas_[:3], lasso2.alphas_)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((335, 10, 335, 35), 'numpy.random.RandomState', 'np.random.RandomState', ({(335, 32, 335, 34): '42'}, {}), '(42)', True, 'import numpy as np\n'), ((341, 8, 341, 24), 'numpy.zeros', 'np.zeros', ({(341, 17, 341, 23): '(m, 1)'}, {}), '((m, 1))', True, 'import numpy as np\n'), ((342, 8, 342, 23), 'numpy.arange', 'np.arange', ({(342, 18, 342, 19): '0', (342, 21, 342, 22): 'm'}, {}), '(0, m)', True, 'import numpy as np\n'), ((346, 8, 346, 20), 'numpy.dot', 'np.dot', ({(346, 15, 346, 16): 'X', (346, 18, 346, 19): 'w'}, {}), '(X, w)', True, 'import numpy as np\n'), ((350, 32, 350, 76), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((352, 24, 355, 68), 'sklearn.linear_model.lasso_path', 'linear_model.lasso_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((357, 4, 357, 64), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (), '', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((377, 11, 377, 63), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((378, 4, 378, 52), 'sklearn.utils.testing.assert_warns', 'assert_warns', ({(378, 17, 378, 35): 'ConvergenceWarning', (378, 37, 378, 45): 'lars.fit', (378, 47, 378, 48): 'X', (378, 50, 378, 51): 'y'}, {}), '(ConvergenceWarning, lars.fit, X, y)', False, 'from sklearn.utils.testing import assert_warns\n'), ((382, 20, 382, 78), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((386, 4, 386, 47), 'sklearn.utils.testing.assert_less', 'assert_less', ({(386, 16, 386, 24): 'lars_obj', (386, 26, 386, 46): '(cd_obj * (1.0 + 1e-08))'}, {}), '(lars_obj, cd_obj * (1.0 + 1e-08))', False, 'from sklearn.utils.testing import assert_less\n'), ((401, 11, 401, 64), 'sklearn.linear_model.Lars', 'linear_model.Lars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((444, 14, 444, 40), 'sklearn.linear_model.LassoLarsCV', 'linear_model.LassoLarsCV', ({}, {}), '()', False, 'from sklearn import linear_model, datasets\n'), ((471, 15, 471, 46), 'sklearn.linear_model.LassoLarsIC', 'linear_model.LassoLarsIC', ({(471, 40, 471, 45): '"""bic"""'}, {}), "('bic')", False, 'from sklearn import linear_model, datasets\n'), ((472, 15, 472, 46), 'sklearn.linear_model.LassoLarsIC', 'linear_model.LassoLarsIC', ({(472, 40, 472, 45): '"""aic"""'}, {}), "('aic')", False, 'from sklearn import linear_model, datasets\n'), ((473, 10, 473, 35), 'numpy.random.RandomState', 'np.random.RandomState', ({(473, 32, 473, 34): '42'}, {}), '(42)', True, 'import numpy as np\n'), ((480, 4, 480, 52), 'sklearn.utils.testing.assert_greater', 'assert_greater', ({(480, 19, 480, 34): 'lars_bic.alpha_', (480, 36, 480, 51): 'lars_aic.alpha_'}, {}), '(lars_bic.alpha_, lars_aic.alpha_)', False, 'from sklearn.utils.testing import assert_greater\n'), ((485, 18, 485, 55), 'sklearn.linear_model.LassoLarsIC', 'linear_model.LassoLarsIC', ({(485, 43, 485, 54): '"""<unknown>"""'}, {}), "('<unknown>')", False, 'from sklearn import linear_model, datasets\n'), ((486, 4, 486, 52), 'sklearn.utils.testing.assert_raises', 'assert_raises', ({(486, 18, 486, 28): 'ValueError', (486, 30, 486, 45): 'lars_broken.fit', (486, 47, 486, 48): 'X', (486, 50, 486, 51): 'y'}, {}), '(ValueError, lars_broken.fit, X, y)', False, 'from sklearn.utils.testing import assert_raises\n'), ((494, 20, 494, 59), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((520, 8, 521, 46), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((525, 8, 526, 45), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((570, 28, 571, 65), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((572, 15, 572, 79), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((590, 17, 590, 48), 'numpy.linspace', 'np.linspace', ({(590, 29, 590, 33): '(0.6)', (590, 35, 590, 43): '(1 - 0.01)', (590, 45, 590, 47): '(20)'}, {}), '(0.6, 1 - 0.01, 20)', True, 'import numpy as np\n'), ((600, 28, 601, 65), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((602, 15, 603, 58), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((619, 8, 620, 32), 'numpy.array', 'np.array', ({(619, 17, 620, 31): '[-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]'}, {}), '([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366])', True, 'import numpy as np\n'), ((621, 8, 626, 32), 'numpy.array', 'np.array', ({(621, 17, 626, 31): '[[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, \n -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, \n 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, \n 0.40427291]]'}, {}), '([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [\n 0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, \n 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -\n 0.0803561, 0.40427291]])', True, 'import numpy as np\n'), ((643, 8, 658, 41), 'numpy.array', 'np.array', ({(643, 17, 658, 40): '[[0, 0, 0, 0, 0, -79.81036280949903, -83.52878873278283, -83.77765373919071,\n -83.78415693288893, -84.03339059175666], [0, 0, 0, 0, -\n 0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -\n 3.577397088285891, -4.702795355871871, -7.016748621359461, -\n 7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, \n 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, \n 2.811549786389614, 2.813766976061531, 2.817462468949557, \n 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -\n 3.457726183014808, -4.02130452206071, -45.827461592423745, -\n 47.776608869312305, -47.9115616107464, -47.914845922736234, -\n 48.03956233426572]]'}, {}), '([[0, 0, 0, 0, 0, -79.81036280949903, -83.52878873278283, -\n 83.77765373919071, -83.78415693288893, -84.03339059175666], [0, 0, 0, 0,\n -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -\n 3.577397088285891, -4.702795355871871, -7.016748621359461, -\n 7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, \n 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, \n 2.811549786389614, 2.813766976061531, 2.817462468949557, \n 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -\n 3.457726183014808, -4.02130452206071, -45.827461592423745, -\n 47.776608869312305, -47.9115616107464, -47.914845922736234, -\n 48.03956233426572]])', True, 'import numpy as np\n'), ((660, 23, 661, 62), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((665, 4, 665, 55), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (), '', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((685, 9, 691, 41), 'numpy.array', 'np.array', ({(685, 18, 691, 40): '[[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, \n 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, \n 17.389369207545062, 26.9716568156435], [0, 0, -1.569380717440311, -\n 5.924804108067312, -7.996385265061972]]'}, {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026],\n [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, \n 9.245133544334507, 17.389369207545062, 26.9716568156435], [0, 0, -\n 1.569380717440311, -5.924804108067312, -7.996385265061972]])', True, 'import numpy as np\n'), ((693, 24, 694, 62), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((704, 4, 704, 57), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (), '', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((715, 17, 715, 61), 'sklearn.linear_model.least_angle.LassoLarsIC', 'LassoLarsIC', (), '', False, 'from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC\n'), ((716, 10, 716, 34), 'numpy.random.RandomState', 'np.random.RandomState', ({(716, 32, 716, 33): '0'}, {}), '(0)', True, 'import numpy as np\n'), ((730, 17, 730, 46), 'sklearn.linear_model.least_angle.LassoLarsIC', 'LassoLarsIC', (), '', False, 'from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC\n'), ((731, 10, 731, 34), 'numpy.random.RandomState', 'np.random.RandomState', ({(731, 32, 731, 33): '0'}, {}), '(0)', True, 'import numpy as np\n'), ((39, 21, 39, 31), 'io.StringIO', 'StringIO', ({}, {}), '()', False, 'from io import StringIO\n'), ((41, 27, 42, 43), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((69, 14, 69, 30), 'numpy.dot', 'np.dot', ({(69, 21, 69, 24): 'X.T', (69, 26, 69, 29): 'res'}, {}), '(X.T, res)', True, 'import numpy as np\n'), ((83, 8, 83, 31), 'sklearn.utils.testing.assert_allclose', 'assert_allclose', ({(83, 24, 83, 26): 'o1', (83, 28, 83, 30): 'o2'}, {}), '(o1, o2)', False, 'from sklearn.utils.testing import assert_allclose\n'), ((90, 8, 92, 36), 'sklearn.linear_model.lars_path_gram', 'linear_model.lars_path_gram', (), '', False, 'from sklearn import linear_model, datasets\n'), ((93, 8, 95, 36), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((110, 17, 110, 60), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((111, 21, 112, 58), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((127, 17, 127, 52), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (), '', True, 'import numpy as np\n'), ((137, 17, 137, 38), 'numpy.linalg.lstsq', 'np.linalg.lstsq', ({(137, 33, 137, 34): 'X', (137, 36, 137, 37): 'y'}, {}), '(X, y)', True, 'import numpy as np\n'), ((152, 15, 152, 43), 'numpy.dot', 'np.dot', ({(152, 22, 152, 23): 'X', (152, 25, 152, 42): 'coef_path_[:, (-1)]'}, {}), '(X, coef_path_[:, (-1)])', True, 'import numpy as np\n'), ((162, 42, 162, 67), 'numpy.zeros_like', 'np.zeros_like', ({(162, 56, 162, 66): 'coef_path_'}, {}), '(coef_path_)', True, 'import numpy as np\n'), ((215, 8, 215, 64), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', (), '', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((241, 15, 241, 58), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((246, 24, 246, 73), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((250, 8, 250, 51), 'sklearn.utils.testing.assert_less', 'assert_less', ({(250, 20, 250, 28): 'obj_lars', (250, 30, 250, 50): '(obj_cd * (1.0 + 1e-08))'}, {}), '(obj_lars, obj_cd * (1.0 + 1e-08))', False, 'from sklearn.utils.testing import assert_less\n'), ((265, 16, 265, 47), 'scipy.linalg.norm', 'linalg.norm', ({(265, 28, 265, 46): 'c - lasso_cd.coef_'}, {}), '(c - lasso_cd.coef_)', False, 'from scipy import linalg\n'), ((266, 8, 266, 32), 'sklearn.utils.testing.assert_less', 'assert_less', ({(266, 20, 266, 25): 'error', (266, 27, 266, 31): '(0.01)'}, {}), '(error, 0.01)', False, 'from sklearn.utils.testing import assert_less\n'), ((273, 14, 273, 50), 'scipy.linalg.norm', 'linalg.norm', ({(273, 26, 273, 49): 'clf1.coef_ - clf2.coef_'}, {}), '(clf1.coef_ - clf2.coef_)', False, 'from scipy import linalg\n'), ((274, 8, 274, 30), 'sklearn.utils.testing.assert_less', 'assert_less', ({(274, 20, 274, 23): 'err', (274, 25, 274, 29): '(0.001)'}, {}), '(err, 0.001)', False, 'from sklearn.utils.testing import assert_less\n'), ((286, 16, 286, 47), 'scipy.linalg.norm', 'linalg.norm', ({(286, 28, 286, 46): 'c - lasso_cd.coef_'}, {}), '(c - lasso_cd.coef_)', False, 'from scipy import linalg\n'), ((287, 8, 287, 32), 'sklearn.utils.testing.assert_less', 'assert_less', ({(287, 20, 287, 25): 'error', (287, 27, 287, 31): '(0.01)'}, {}), '(error, 0.01)', False, 'from sklearn.utils.testing import assert_less\n'), ((297, 32, 298, 75), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((299, 19, 299, 68), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((302, 16, 302, 63), 'scipy.linalg.norm', 'linalg.norm', ({(302, 28, 302, 62): 'lasso_path[:, (-1)] - lasso_cd.coef_'}, {}), '(lasso_path[:, (-1)] - lasso_cd.coef_)', False, 'from scipy import linalg\n'), ((303, 8, 303, 32), 'sklearn.utils.testing.assert_less', 'assert_less', ({(303, 20, 303, 25): 'error', (303, 27, 303, 31): '(0.01)'}, {}), '(error, 0.01)', False, 'from sklearn.utils.testing import assert_less\n'), ((307, 32, 308, 75), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((309, 19, 310, 47), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((313, 16, 313, 63), 'scipy.linalg.norm', 'linalg.norm', ({(313, 28, 313, 62): 'lasso_path[:, (-1)] - lasso_cd.coef_'}, {}), '(lasso_path[:, (-1)] - lasso_cd.coef_)', False, 'from scipy import linalg\n'), ((314, 8, 314, 32), 'sklearn.utils.testing.assert_less', 'assert_less', ({(314, 20, 314, 25): 'error', (314, 27, 314, 31): '(0.01)'}, {}), '(error, 0.01)', False, 'from sklearn.utils.testing import assert_less\n'), ((396, 11, 396, 23), 'numpy.arange', 'np.arange', ({(396, 21, 396, 22): 'n'}, {}), '(n)', True, 'import numpy as np\n'), ((397, 18, 397, 40), 'numpy.isfinite', 'np.isfinite', ({(397, 30, 397, 39): 'clf.coef_'}, {}), '(clf.coef_)', True, 'import numpy as np\n'), ((412, 8, 412, 30), 'numpy.vstack', 'np.vstack', ({(412, 18, 412, 29): '[y, y ** 2]'}, {}), '([y, y ** 2])', True, 'import numpy as np\n'), ((415, 8, 415, 32), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', ({}, {}), '()', False, 'from sklearn import linear_model, datasets\n'), ((416, 8, 416, 27), 'sklearn.linear_model.Lars', 'linear_model.Lars', ({}, {}), '()', False, 'from sklearn import linear_model, datasets\n'), ((418, 8, 418, 51), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((419, 8, 419, 46), 'sklearn.linear_model.Lars', 'linear_model.Lars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((449, 8, 449, 63), 'numpy.testing.assert_array_less', 'np.testing.assert_array_less', ({(449, 37, 449, 46): 'old_alpha', (449, 48, 449, 62): 'lars_cv.alpha_'}, {}), '(old_alpha, lars_cv.alpha_)', True, 'import numpy as np\n'), ((456, 9, 456, 45), 'warnings.catch_warnings', 'warnings.catch_warnings', (), '', False, 'import warnings\n'), ((457, 14, 457, 39), 'numpy.random.RandomState', 'np.random.RandomState', ({(457, 36, 457, 38): '42'}, {}), '(42)', True, 'import numpy as np\n'), ((461, 18, 461, 54), 'sklearn.linear_model.LassoLarsCV', 'linear_model.LassoLarsCV', (), '', False, 'from sklearn import linear_model, datasets\n'), ((478, 18, 478, 42), 'numpy.where', 'np.where', ({(478, 27, 478, 41): 'lars_bic.coef_'}, {}), '(lars_bic.coef_)', True, 'import numpy as np\n'), ((479, 18, 479, 42), 'numpy.where', 'np.where', ({(479, 27, 479, 41): 'lars_aic.coef_'}, {}), '(lars_aic.coef_)', True, 'import numpy as np\n'), ((482, 16, 482, 35), 'numpy.max', 'np.max', ({(482, 23, 482, 34): 'nonzero_bic'}, {}), '(nonzero_bic)', True, 'import numpy as np\n'), ((495, 9, 495, 34), 'sklearn.utils.testing.TempMemmap', 'TempMemmap', ({(495, 20, 495, 33): 'splitted_data'}, {}), '(splitted_data)', False, 'from sklearn.utils.testing import TempMemmap\n'), ((497, 8, 497, 73), 'sklearn.linear_model.least_angle._lars_path_residues', '_lars_path_residues', (), '', False, 'from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC\n'), ((514, 9, 514, 57), 'pytest.warns', 'pytest.warns', (), '', False, 'import pytest\n'), ((515, 8, 517, 45), 'sklearn.linear_model.lars_path', 'linear_model.lars_path', (), '', False, 'from sklearn import linear_model, datasets\n'), ((578, 16, 578, 47), 'scipy.linalg.norm', 'linalg.norm', ({(578, 28, 578, 46): 'c - lasso_cd.coef_'}, {}), '(c - lasso_cd.coef_)', False, 'from scipy import linalg\n'), ((579, 8, 579, 32), 'sklearn.utils.testing.assert_less', 'assert_less', ({(579, 20, 579, 25): 'error', (579, 27, 579, 31): '(0.01)'}, {}), '(error, 0.01)', False, 'from sklearn.utils.testing import assert_less\n'), ((595, 14, 595, 50), 'scipy.linalg.norm', 'linalg.norm', ({(595, 26, 595, 49): 'clf1.coef_ - clf2.coef_'}, {}), '(clf1.coef_ - clf2.coef_)', False, 'from scipy import linalg\n'), ((596, 8, 596, 30), 'sklearn.utils.testing.assert_less', 'assert_less', ({(596, 20, 596, 23): 'err', (596, 25, 596, 29): '(0.001)'}, {}), '(err, 0.001)', False, 'from sklearn.utils.testing import assert_less\n'), ((607, 16, 607, 47), 'scipy.linalg.norm', 'linalg.norm', ({(607, 28, 607, 46): 'c - lasso_cd.coef_'}, {}), '(c - lasso_cd.coef_)', False, 'from scipy import linalg\n'), ((608, 8, 608, 32), 'sklearn.utils.testing.assert_less', 'assert_less', ({(608, 20, 608, 25): 'error', (608, 27, 608, 31): '(0.01)'}, {}), '(error, 0.01)', False, 'from sklearn.utils.testing import assert_less\n'), ((700, 15, 700, 33), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((701, 20, 701, 45), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((721, 21, 721, 46), 'numpy.array_equal', 'np.array_equal', ({(721, 36, 721, 37): 'X', (721, 39, 721, 45): 'X_copy'}, {}), '(X, X_copy)', True, 'import numpy as np\n'), ((736, 21, 736, 46), 'numpy.array_equal', 'np.array_equal', ({(736, 36, 736, 37): 'X', (736, 39, 736, 45): 'X_copy'}, {}), '(X, X_copy)', True, 'import numpy as np\n'), ((48, 18, 48, 34), 'numpy.dot', 'np.dot', ({(48, 25, 48, 28): 'X.T', (48, 30, 48, 33): 'res'}, {}), '(X.T, res)', True, 'import numpy as np\n'), ((68, 18, 68, 34), 'numpy.dot', 'np.dot', ({(68, 25, 68, 26): 'X', (68, 28, 68, 33): 'coef_'}, {}), '(X, coef_)', True, 'import numpy as np\n'), ((114, 12, 114, 52), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(114, 38, 114, 46): 'expected', (114, 48, 114, 51): 'got'}, {}), '(expected, got)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((126, 20, 126, 48), 'distutils.version.LooseVersion', 'LooseVersion', ({(126, 33, 126, 47): 'np.__version__'}, {}), '(np.__version__)', False, 'from distutils.version import LooseVersion\n'), ((211, 15, 211, 39), 'sklearn.utils.testing.ignore_warnings', 'ignore_warnings', ({(211, 31, 211, 38): 'clf.fit'}, {}), '(clf.fit)', False, 'from sklearn.utils.testing import ignore_warnings\n'), ((325, 18, 325, 40), 'numpy.diff', 'np.diff', ({(325, 26, 325, 39): 'lasso.alphas_'}, {}), '(lasso.alphas_)', True, 'import numpy as np\n'), ((394, 14, 394, 33), 'numpy.arange', 'np.arange', ({(394, 24, 394, 25): '(1)', (394, 27, 394, 32): '(n + 1)'}, {}), '(1, n + 1)', True, 'import numpy as np\n'), ((395, 10, 395, 48), 'sklearn.linear_model.Lars', 'linear_model.Lars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((430, 12, 430, 67), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(430, 38, 430, 47): 'alphas[k]', (430, 49, 430, 66): 'estimator.alphas_'}, {}), '(alphas[k], estimator.alphas_)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((431, 12, 431, 67), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(431, 38, 431, 47): 'active[k]', (431, 49, 431, 66): 'estimator.active_'}, {}), '(active[k], estimator.active_)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((432, 12, 432, 63), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(432, 38, 432, 45): 'coef[k]', (432, 47, 432, 62): 'estimator.coef_'}, {}), '(coef[k], estimator.coef_)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((433, 12, 433, 68), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(433, 38, 433, 45): 'path[k]', (433, 47, 433, 67): 'estimator.coef_path_'}, {}), '(path[k], estimator.coef_path_)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((434, 12, 434, 59), 'sklearn.utils.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(434, 38, 434, 50): 'Y_pred[:, (k)]', (434, 52, 434, 58): 'y_pred'}, {}), '(Y_pred[:, (k)], y_pred)', False, 'from sklearn.utils.testing import assert_array_almost_equal\n'), ((47, 22, 47, 38), 'numpy.dot', 'np.dot', ({(47, 29, 47, 30): 'X', (47, 32, 47, 37): 'coef_'}, {}), '(X, coef_)', True, 'import numpy as np\n'), ((151, 15, 151, 35), 'numpy.isnan', 'np.isnan', ({(151, 24, 151, 34): 'coef_path_'}, {}), '(coef_path_)', True, 'import numpy as np\n'), ((245, 27, 245, 53), 'scipy.linalg.norm', 'linalg.norm', ({(245, 39, 245, 49): 'coef_lars_', (245, 51, 245, 52): '(1)'}, {}), '(coef_lars_, 1)', False, 'from scipy import linalg\n'), ((249, 25, 249, 49), 'scipy.linalg.norm', 'linalg.norm', ({(249, 37, 249, 45): 'coef_cd_', (249, 47, 249, 48): '(1)'}, {}), '(coef_cd_, 1)', False, 'from scipy import linalg\n'), ((270, 15, 270, 67), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((271, 15, 272, 50), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((375, 26, 375, 46), 'scipy.linalg.norm', 'linalg.norm', ({(375, 38, 375, 42): 'coef', (375, 44, 375, 45): '(1)'}, {}), '(coef, 1)', False, 'from scipy import linalg\n'), ((394, 36, 394, 48), 'numpy.arange', 'np.arange', ({(394, 46, 394, 47): 'n'}, {}), '(n)', True, 'import numpy as np\n'), ((591, 15, 592, 69), 'sklearn.linear_model.LassoLars', 'linear_model.LassoLars', (), '', False, 'from sklearn import linear_model, datasets\n'), ((593, 15, 594, 65), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', (), '', False, 'from sklearn import linear_model, datasets\n'), ((244, 38, 244, 59), 'numpy.dot', 'np.dot', ({(244, 45, 244, 46): 'X', (244, 48, 244, 58): 'coef_lars_'}, {}), '(X, coef_lars_)', True, 'import numpy as np\n'), ((248, 53, 248, 72), 'numpy.dot', 'np.dot', ({(248, 60, 248, 61): 'X', (248, 63, 248, 71): 'coef_cd_'}, {}), '(X, coef_cd_)', True, 'import numpy as np\n'), ((374, 53, 374, 68), 'numpy.dot', 'np.dot', ({(374, 60, 374, 61): 'X', (374, 63, 374, 67): 'coef'}, {}), '(X, coef)', True, 'import numpy as np\n')]
FeroxTL/pynginxconfig-new
parser.py
71cb78c635930b0a764d3274646d436e8d2f1c4d
#coding: utf8 import copy import re from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location def parse(s, parent_block): config = copy.copy(s) pos, brackets_level, param_start = 0, 0, 0 while pos < len(config): if config[pos] == '#' and brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\s\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start = 0, 0 continue if config[pos] == ';' and brackets_level == 0: re_option = re.search('\s*(?P<param_name>\w+)\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option: raise Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \n]+', ' ', option['param_options'])) config = config[re_option.end():] pos, param_start = 0, 0 continue if config[pos] == '{': brackets_level += 1 elif config[pos] == '}': brackets_level -= 1 if brackets_level == 0 and param_start is not None: re_block = re.search( '(?P<param_name>\w+)\s*(?P<param_options>.*)\s*{(\n){0,1}(?P<block>(.|\n)*)}', config[param_start:pos + 1], ) block = re_block.groupdict() if block['param_name'].lower() == 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']] = new_block if block['block']: parse(block['block'], new_block) config = config[re_block.end():] pos, param_start = 0, 0 continue pos += 1 if brackets_level != 0: raise Exception('Not closed bracket') qwe = EmptyBlock() parse("""#{ asd #qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal} #1 server { listen 8080 tls; root /data/up1; location / { l200; } location /qwe{ s 500; }#123 }#qweqwe""", qwe) print(qwe.render()) qwe = EmptyBlock() parse(""" servername wqeqweqwe; http { ## # Basic Settings ## sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; # server_tokens off; # server_names_hash_bucket_size 64; # server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip Settings ## gzip on; gzip_disable "msie6"; }#123123 """, qwe) print(qwe.render())
[((66, 6, 66, 18), 'blocks.EmptyBlock', 'EmptyBlock', ({}, {}), '()', False, 'from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location\n'), ((89, 6, 89, 18), 'blocks.EmptyBlock', 'EmptyBlock', ({}, {}), '()', False, 'from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location\n'), ((9, 13, 9, 25), 'copy.copy', 'copy.copy', ({(9, 23, 9, 24): 's'}, {}), '(s)', False, 'import copy\n'), ((14, 31, 14, 94), 're.search', 're.search', ({(14, 41, 14, 79): '"""(?P<offset>[\\\\s\n]*)#(?P<comment>.*)$"""', (14, 81, 14, 87): 'config', (14, 89, 14, 93): 're.M'}, {}), '("""(?P<offset>[\\\\s\n]*)#(?P<comment>.*)$""", config, re.M)', False, 'import re\n'), ((22, 24, 22, 113), 're.search', 're.search', ({(22, 34, 22, 84): '"""\\\\s*(?P<param_name>\\\\w+)\\\\s*(?P<param_options>.*?);"""', (22, 86, 22, 106): 'config[param_start:]', (22, 108, 22, 112): 're.S'}, {}), "('\\\\s*(?P<param_name>\\\\w+)\\\\s*(?P<param_options>.*?);', config[\n param_start:], re.S)", False, 'import re\n'), ((16, 37, 16, 95), 'blocks.Comment', 'Comment', ({(16, 45, 16, 68): "sharp_comment['offset']", (16, 70, 16, 94): "sharp_comment['comment']"}, {}), "(sharp_comment['offset'], sharp_comment['comment'])", False, 'from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location\n'), ((27, 64, 27, 110), 're.sub', 're.sub', ({(27, 71, 27, 79): '"""[ \n]+"""', (27, 81, 27, 84): '""" """', (27, 86, 27, 109): "option['param_options']"}, {}), "('[ \\n]+', ' ', option['param_options'])", False, 'import re\n'), ((39, 27, 42, 17), 're.search', 're.search', ({(40, 20, 40, 97): '"""(?P<param_name>\\\\w+)\\\\s*(?P<param_options>.*)\\\\s*{(\n){0,1}(?P<block>(.|\n)*)}"""', (41, 20, 41, 47): 'config[param_start:pos + 1]'}, {}), '(\n """(?P<param_name>\\\\w+)\\\\s*(?P<param_options>.*)\\\\s*{(\n){0,1}(?P<block>(.|\n)*)}"""\n , config[param_start:pos + 1])', False, 'import re\n'), ((46, 32, 46, 64), 'blocks.Location', 'Location', ({(46, 41, 46, 63): "block['param_options']"}, {}), "(block['param_options'])", False, 'from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location\n'), ((49, 32, 49, 39), 'blocks.Block', 'Block', ({}, {}), '()', False, 'from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location\n')]
triompha/EarthWarrior3D
cocos2d/tools/jenkins-scripts/configs/cocos-2dx-develop-win32.py
d68a347902fa1ca1282df198860f5fb95f326797
import os import subprocess import sys print 'Build Config:' print ' Host:win7 x86' print ' Branch:develop' print ' Target:win32' print ' "%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name = os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name source_dir = source_dir.replace("/", os.sep) os.system("xcopy " + source_dir + " . /E /Y /H") os.system('git pull origin develop') os.system('git submodule update --init --force') ret = subprocess.call('"%VS110COMNTOOLS%..\IDE\devenv.com" "build\cocos2d-win32.vc2012.sln" /Build "Debug|Win32"', shell=True) os.system('git clean -xdf -f') print 'build exit' print ret if ret == 0: exit(0) else: exit(1)
[]
NumberAI/python-bandwidth-iris
iris_sdk/models/data/ord/rate_center_search_order.py
0e05f79d68b244812afb97e00fd65b3f46d00aa3
#!/usr/bin/env python from iris_sdk.models.base_resource import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \ RateCenterSearchOrderMap class RateCenterSearchOrder(RateCenterSearchOrderMap, BaseData): pass
[]
thanusha22/CEC-1
optimizer.py
02ad9247b006a348cc871a5714cf5abfa4a516af
from pathlib import Path import optimizers.PSO as pso import optimizers.MVO as mvo import optimizers.GWO as gwo import optimizers.MFO as mfo import optimizers.CS as cs import optimizers.BAT as bat import optimizers.WOA as woa import optimizers.FFA as ffa import optimizers.SSA as ssa import optimizers.GA as ga import optimizers.HHO as hho import optimizers.SCA as sca import optimizers.JAYA as jaya import optimizers.HYBRID as hybrid import benchmarks import csv import numpy import time import warnings import os import plot_convergence as conv_plot import plot_boxplot as box_plot warnings.simplefilter(action="ignore") def selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb = func_details[1] ub = func_details[2] dim = func_details[3] if algo == "SSA": x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "PSO": x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "GA": x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "BAT": x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "FFA": x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "GWO": x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "WOA": x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "MVO": x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "MFO": x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "CS": x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "HHO": x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "SCA": x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "JAYA": x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo == "HYBRID": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) else: return null return x def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): """ It serves as the main interface of the framework for running the experiments. Parameters ---------- optimizer : list The list of optimizers names objectivefunc : list The list of benchmark functions NumOfRuns : int The number of independent runs params : set The set of parameters which are: 1. Size of population (PopulationSize) 2. The number of iterations (Iterations) export_flags : set The set of Boolean flags which are: 1. Export (Exporting the results in a file) 2. Export_details (Exporting the detailed results in files) 3. Export_convergence (Exporting the covergence plots) 4. Export_boxplot (Exporting the box plots) Returns ----------- N/A """ # Select general parameters for all optimizers (population size, number of iterations) .... PopulationSize = params["PopulationSize"] Iterations = params["Iterations"] # Export results ? Export = export_flags["Export_avg"] Export_details = export_flags["Export_details"] Export_convergence = export_flags["Export_convergence"] Export_boxplot = export_flags["Export_boxplot"] Flag = False Flag_details = False # CSV Header for for the cinvergence CnvgHeader = [] results_directory = time.strftime("%Y-%m-%d-%H-%M-%S") + "/" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0, Iterations): CnvgHeader.append("Iter" + str(l + 1)) for i in range(0, len(optimizer)): for j in range(0, len(objectivefunc)): convergence = [0] * NumOfRuns executionTime = [0] * NumOfRuns for k in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] = x.convergence optimizerName = x.optimizer objfname = x.objfname if Export_details == True: ExportToFile = results_directory + "experiment_details.csv" with open(ExportToFile, "a", newline="\n") as out: writer = csv.writer(out, delimiter=",") if ( Flag_details == False ): # just one time to write the header of the CSV file header = numpy.concatenate( [["Optimizer", "objfname", "ExecutionTime"], CnvgHeader] ) writer.writerow(header) Flag_details = True # at least one experiment executionTime[k] = x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close() if Export == True: ExportToFile = results_directory + "experiment.csv" with open(ExportToFile, "a", newline="\n") as out: writer = csv.writer(out, delimiter=",") if ( Flag == False ): # just one time to write the header of the CSV file header = numpy.concatenate( [["Optimizer", "objfname", "ExecutionTime"], CnvgHeader] ) writer.writerow(header) Flag = True avgExecutionTime = float("%0.2f" % (sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag == False: # Faild to run at least one experiment print( "No Optomizer or Cost function is selected. Check lists of available optimizers and cost functions" ) print("Execution completed")
[((26, 0, 26, 38), 'warnings.simplefilter', 'warnings.simplefilter', (), '', False, 'import warnings\n'), ((113, 24, 113, 58), 'time.strftime', 'time.strftime', ({(113, 38, 113, 57): '"""%Y-%m-%d-%H-%M-%S"""'}, {}), "('%Y-%m-%d-%H-%M-%S')", False, 'import time\n'), ((173, 8, 173, 78), 'plot_convergence.run', 'conv_plot.run', ({(173, 22, 173, 39): 'results_directory', (173, 41, 173, 50): 'optimizer', (173, 52, 173, 65): 'objectivefunc', (173, 67, 173, 77): 'Iterations'}, {}), '(results_directory, optimizer, objectivefunc, Iterations)', True, 'import plot_convergence as conv_plot\n'), ((176, 8, 176, 77), 'plot_boxplot.run', 'box_plot.run', ({(176, 21, 176, 38): 'results_directory', (176, 40, 176, 49): 'optimizer', (176, 51, 176, 64): 'objectivefunc', (176, 66, 176, 76): 'Iterations'}, {}), '(results_directory, optimizer, objectivefunc, Iterations)', True, 'import plot_boxplot as box_plot\n'), ((114, 4, 114, 27), 'pathlib.Path', 'Path', ({(114, 9, 114, 26): 'results_directory'}, {}), '(results_directory)', False, 'from pathlib import Path\n'), ((124, 31, 124, 78), 'benchmarks.getFunctionDetails', 'benchmarks.getFunctionDetails', ({(124, 61, 124, 77): 'objectivefunc[j]'}, {}), '(objectivefunc[j])', False, 'import benchmarks\n'), ((152, 29, 152, 59), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n'), ((166, 24, 168, 21), 'numpy.concatenate', 'numpy.concatenate', ({(167, 24, 167, 85): '[[optimizerName, objfname, avgExecutionTime], avgConvergence]'}, {}), '([[optimizerName, objfname, avgExecutionTime], avgConvergence]\n )', False, 'import numpy\n'), ((132, 33, 132, 63), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n'), ((142, 28, 144, 25), 'numpy.concatenate', 'numpy.concatenate', ({(143, 28, 143, 87): '[[x.optimizer, x.objfname, x.executionTime], x.convergence]'}, {}), '([[x.optimizer, x.objfname, x.executionTime], x.convergence])', False, 'import numpy\n'), ((156, 33, 158, 25), 'numpy.concatenate', 'numpy.concatenate', ({(157, 28, 157, 84): "[['Optimizer', 'objfname', 'ExecutionTime'], CnvgHeader]"}, {}), "([['Optimizer', 'objfname', 'ExecutionTime'], CnvgHeader])", False, 'import numpy\n'), ((136, 37, 138, 29), 'numpy.concatenate', 'numpy.concatenate', ({(137, 32, 137, 88): "[['Optimizer', 'objfname', 'ExecutionTime'], CnvgHeader]"}, {}), "([['Optimizer', 'objfname', 'ExecutionTime'], CnvgHeader])", False, 'import numpy\n'), ((164, 24, 164, 76), 'numpy.mean', 'numpy.mean', (), '', False, 'import numpy\n')]
slawak/dataclasses-avroschema
tests/fields/test_primitive_types.py
04e69a176b3e72bfa0acd3edbd044ecd161b1a68
import dataclasses import pytest from dataclasses_avroschema import fields from . import consts @pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = "a_field" field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {"name": name, "type": avro_type} == field.to_dict() @pytest.mark.parametrize("primitive_type", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = "a_field" field = fields.Field(name, primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {"name": name, "type": avro_type, "default": fields.NULL} == field.to_dict() @pytest.mark.parametrize("primitive_type,default", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name = "a_field" field = fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {"name": name, "type": avro_type, "default": default} == field.to_dict() @pytest.mark.parametrize( "primitive_type,invalid_default", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name = "a_field" field = fields.Field(name, primitive_type, invalid_default) msg = f"Invalid default type. Default should be {primitive_type}" with pytest.raises(AssertionError, match=msg): field.to_dict()
[((10, 1, 10, 73), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(10, 25, 10, 41): '"""primitive_type"""', (10, 43, 10, 72): 'fields.PYTHON_INMUTABLE_TYPES'}, {}), "('primitive_type', fields.PYTHON_INMUTABLE_TYPES)", False, 'import pytest\n'), ((19, 1, 19, 73), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(19, 25, 19, 41): '"""primitive_type"""', (19, 43, 19, 72): 'fields.PYTHON_INMUTABLE_TYPES'}, {}), "('primitive_type', fields.PYTHON_INMUTABLE_TYPES)", False, 'import pytest\n'), ((28, 1, 28, 87), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(28, 25, 28, 49): '"""primitive_type,default"""', (28, 51, 28, 86): 'consts.PRIMITIVE_TYPES_AND_DEFAULTS'}, {}), "('primitive_type,default', consts.\n PRIMITIVE_TYPES_AND_DEFAULTS)", False, 'import pytest\n'), ((37, 1, 39, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(38, 4, 38, 36): '"""primitive_type,invalid_default"""', (38, 38, 38, 81): 'consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS'}, {}), "('primitive_type,invalid_default', consts.\n PRIMITIVE_TYPES_AND_INVALID_DEFAULTS)", False, 'import pytest\n'), ((13, 12, 13, 67), 'dataclasses_avroschema.fields.Field', 'fields.Field', ({(13, 25, 13, 29): 'name', (13, 31, 13, 45): 'primitive_type', (13, 47, 13, 66): 'dataclasses.MISSING'}, {}), '(name, primitive_type, dataclasses.MISSING)', False, 'from dataclasses_avroschema import fields\n'), ((22, 12, 22, 52), 'dataclasses_avroschema.fields.Field', 'fields.Field', ({(22, 25, 22, 29): 'name', (22, 31, 22, 45): 'primitive_type', (22, 47, 22, 51): 'None'}, {}), '(name, primitive_type, None)', False, 'from dataclasses_avroschema import fields\n'), ((31, 12, 31, 55), 'dataclasses_avroschema.fields.Field', 'fields.Field', ({(31, 25, 31, 29): 'name', (31, 31, 31, 45): 'primitive_type', (31, 47, 31, 54): 'default'}, {}), '(name, primitive_type, default)', False, 'from dataclasses_avroschema import fields\n'), ((42, 12, 42, 63), 'dataclasses_avroschema.fields.Field', 'fields.Field', ({(42, 25, 42, 29): 'name', (42, 31, 42, 45): 'primitive_type', (42, 47, 42, 62): 'invalid_default'}, {}), '(name, primitive_type, invalid_default)', False, 'from dataclasses_avroschema import fields\n'), ((45, 9, 45, 49), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n')]
mcx/opensim-core
Bindings/Python/examples/Moco/examplePredictAndTrack.py
c109f8cec3a81c732f335cd39752da6ae573b604
# -------------------------------------------------------------------------- # # OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford University and the Authors # # # # Author(s): Christopher Dembia # # # # Licensed under the Apache License, Version 2.0 (the "License"); you may # # not use this file except in compliance with the License. You may obtain a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software # # distributed under the License is distributed on an "AS IS" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # See the License for the specific language governing permissions and # # limitations under the License. # # -------------------------------------------------------------------------- # import os import math import opensim as osim """ This file performs the following problems using a double pendulum model: 1. predict an optimal trajectory (and controls), 2. track the states from the optimal trajectory, and 3. track the marker trajectories from the optimal trajectory. """ visualize = True # The following environment variable is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False # Create a model of a double pendulum. # ------------------------------------ def createDoublePendulumModel(): model = osim.Model() model.setName("double_pendulum") # Create two links, each with a mass of 1 kg, center of mass at the body's # origin, and moments and products of inertia of zero. b0 = osim.Body("b0", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body("b1", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers to body origin locations. m0 = osim.Marker("m0", b0, osim.Vec3(0)) m1 = osim.Marker("m1", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the bodies with pin joints. Assume each body is 1 m long. j0 = osim.PinJoint("j0", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName("q0") j1 = osim.PinJoint("j1", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName("q1") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName("tau0") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName("tau1") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame("b0_center", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame("b1_center", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML("double_pendulum.osim") return model def solvePrediction(): # Predict the optimal trajectory for a minimum time swing-up. # In the diagram below, + represents the origin, and ---o represents a link # in the double pendulum. # # o # | # o # | # +---o---o + # # iniital pose final pose # study = osim.MocoStudy() study.setName("double_pendulum_predict") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments are name, [lower bound, upper bound], # initial [lower bound, upper bound], # final [lower bound, upper bound]. problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0) problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0, 0) problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0) problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0, 0) problem.setControlInfo("/tau0", [-100, 100]) problem.setControlInfo("/tau1", [-100, 100]) # Cost: minimize final time and error from desired # end effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName("final") finalCost.setWeight(1000.0) finalCost.setPointName("/markerset/m1") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver("ipopt") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState("/jointset/j0/q0/value", [0, -math.pi]) guess.setState("/jointset/j1/q1/value", [0, 2*math.pi]) guess.setState("/jointset/j0/q0/speed", [0, 0]) guess.setState("/jointset/j1/q1/speed", [0, 0]) guess.setControl("/tau0", [0, 0]) guess.setControl("/tau1", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem to a setup file for reference. study.printToXML("examplePredictAndTrack_predict.omoco") # Solve the problem. solution = study.solve() solution.write("examplePredictAndTrack_predict_solution.sto") if visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels(["/markerset/m0", "/markerset/m1"]) for state in statesTraj: model.realizePosition(state) m0 = model.getComponent("markerset/m0") m1 = model.getComponent("markerset/m1") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight to each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m0", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight("/markerset/m1", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict the optimal trajectory for a minimum time swing-up. study = osim.MocoStudy() study.setName("double_pendulum_track") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower bound, upper bound], # initial [lower bound, upper bound], # final [lower bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0) problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0) problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0) problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0) problem.setControlInfo("/tau0", [-150, 150]) problem.setControlInfo("/tau1", [-150, 150]) # Cost: track provided state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName("effort") effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver("ipopt") solver.set_optim_jacobian_approximation("exact") solver.set_optim_hessian_approximation("exact") solver.set_exact_hessian_block_sparsity_mode("dense") # Save the problem to a setup file for reference. study.printToXML("examplePredictAndTrack_track_states.omoco") # Solve the problem. solution = study.solve() solution.write("examplePredictAndTrack_track_states_solution.sto") if visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess): # Predict the optimal trajectory for a minimum time swing-up. study = osim.MocoStudy() study.setName("double_pendulum_track") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower bound, upper bound], # initial [lower bound, upper bound], # final [lower bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo("/jointset/j0/q0/value", [-10, 10], 0) problem.setStateInfo("/jointset/j0/q0/speed", [-50, 50], 0) problem.setStateInfo("/jointset/j1/q1/value", [-10, 10], 0) problem.setStateInfo("/jointset/j1/q1/speed", [-50, 50], 0) problem.setControlInfo("/tau0", [-100, 100]) problem.setControlInfo("/tau1", [-100, 100]) # Cost: track provided marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName("effort") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver("ipopt") solver.set_optim_jacobian_approximation("exact") solver.set_optim_hessian_approximation("exact") solver.set_exact_hessian_block_sparsity_mode("dense") solver.setGuess(guess) # Save the problem to a setup file for reference. study.printToXML("examplePredictAndTrack_track_markers.omoco") # Solve the problem. solution = study.solve() solution.write("examplePredictAndTrack_track_markers_solution.sto") if visualize: study.visualize(solution) return solution optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)
[((35, 3, 35, 38), 'os.getenv', 'os.getenv', ({(35, 13, 35, 37): '"""OPENSIM_USE_VISUALIZER"""'}, {}), "('OPENSIM_USE_VISUALIZER')", False, 'import os\n'), ((41, 12, 41, 24), 'opensim.Model', 'osim.Model', ({}, {}), '()', True, 'import opensim as osim\n'), ((69, 11, 69, 36), 'opensim.CoordinateActuator', 'osim.CoordinateActuator', ({}, {}), '()', True, 'import opensim as osim\n'), ((75, 11, 75, 36), 'opensim.CoordinateActuator', 'osim.CoordinateActuator', ({}, {}), '()', True, 'import opensim as osim\n'), ((82, 19, 82, 48), 'opensim.Ellipsoid', 'osim.Ellipsoid', ({(82, 34, 82, 37): '0.5', (82, 39, 82, 42): '0.1', (82, 44, 82, 47): '0.1'}, {}), '(0.5, 0.1, 0.1)', True, 'import opensim as osim\n'), ((84, 15, 84, 67), 'opensim.PhysicalOffsetFrame', 'osim.PhysicalOffsetFrame', ({(84, 40, 84, 51): '"""b0_center"""', (84, 53, 84, 55): 'b0', (84, 57, 84, 66): 'transform'}, {}), "('b0_center', b0, transform)", True, 'import opensim as osim\n'), ((87, 15, 87, 67), 'opensim.PhysicalOffsetFrame', 'osim.PhysicalOffsetFrame', ({(87, 40, 87, 51): '"""b1_center"""', (87, 53, 87, 55): 'b1', (87, 57, 87, 66): 'transform'}, {}), "('b1_center', b1, transform)", True, 'import opensim as osim\n'), ((109, 12, 109, 28), 'opensim.MocoStudy', 'osim.MocoStudy', ({}, {}), '()', True, 'import opensim as osim\n'), ((131, 13, 131, 37), 'opensim.MocoFinalTimeGoal', 'osim.MocoFinalTimeGoal', ({}, {}), '()', True, 'import opensim as osim\n'), ((135, 16, 135, 42), 'opensim.MocoMarkerFinalGoal', 'osim.MocoMarkerFinalGoal', ({}, {}), '()', True, 'import opensim as osim\n'), ((178, 17, 178, 75), 'opensim.StatesTrajectory.createFromStatesTable', 'osim.StatesTrajectory.createFromStatesTable', ({(178, 61, 178, 66): 'model', (178, 68, 178, 74): 'states'}, {}), '(model, states)', True, 'import opensim as osim\n'), ((180, 25, 180, 51), 'opensim.TimeSeriesTableVec3', 'osim.TimeSeriesTableVec3', ({}, {}), '()', True, 'import opensim as osim\n'), ((192, 20, 192, 43), 'opensim.SetMarkerWeights', 'osim.SetMarkerWeights', ({}, {}), '()', True, 'import opensim as osim\n'), ((196, 11, 196, 67), 'opensim.MarkersReference', 'osim.MarkersReference', ({(196, 33, 196, 51): 'markerTrajectories', (196, 53, 196, 66): 'markerWeights'}, {}), '(markerTrajectories, markerWeights)', True, 'import opensim as osim\n'), ((201, 12, 201, 28), 'opensim.MocoStudy', 'osim.MocoStudy', ({}, {}), '()', True, 'import opensim as osim\n'), ((223, 20, 223, 48), 'opensim.MocoStateTrackingGoal', 'osim.MocoStateTrackingGoal', ({}, {}), '()', True, 'import opensim as osim\n'), ((227, 13, 227, 35), 'opensim.MocoControlGoal', 'osim.MocoControlGoal', ({}, {}), '()', True, 'import opensim as osim\n'), ((256, 12, 256, 28), 'opensim.MocoStudy', 'osim.MocoStudy', ({}, {}), '()', True, 'import opensim as osim\n'), ((278, 21, 278, 50), 'opensim.MocoMarkerTrackingGoal', 'osim.MocoMarkerTrackingGoal', ({}, {}), '()', True, 'import opensim as osim\n'), ((282, 13, 282, 35), 'opensim.MocoControlGoal', 'osim.MocoControlGoal', ({}, {}), '()', True, 'import opensim as osim\n'), ((46, 28, 46, 40), 'opensim.Vec3', 'osim.Vec3', ({(46, 38, 46, 39): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((46, 42, 46, 57), 'opensim.Inertia', 'osim.Inertia', ({(46, 55, 46, 56): '1'}, {}), '(1)', True, 'import opensim as osim\n'), ((48, 28, 48, 40), 'opensim.Vec3', 'osim.Vec3', ({(48, 38, 48, 39): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((48, 42, 48, 57), 'opensim.Inertia', 'osim.Inertia', ({(48, 55, 48, 56): '1'}, {}), '(1)', True, 'import opensim as osim\n'), ((52, 31, 52, 43), 'opensim.Vec3', 'osim.Vec3', ({(52, 41, 52, 42): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((53, 31, 53, 43), 'opensim.Vec3', 'osim.Vec3', ({(53, 41, 53, 42): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((58, 48, 58, 60), 'opensim.Vec3', 'osim.Vec3', ({(58, 58, 58, 59): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((58, 62, 58, 74), 'opensim.Vec3', 'osim.Vec3', ({(58, 72, 58, 73): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((59, 12, 59, 31), 'opensim.Vec3', 'osim.Vec3', ({(59, 22, 59, 24): '-1', (59, 26, 59, 27): '0', (59, 29, 59, 30): '0'}, {}), '(-1, 0, 0)', True, 'import opensim as osim\n'), ((59, 33, 59, 45), 'opensim.Vec3', 'osim.Vec3', ({(59, 43, 59, 44): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((63, 12, 63, 24), 'opensim.Vec3', 'osim.Vec3', ({(63, 22, 63, 23): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((63, 26, 63, 38), 'opensim.Vec3', 'osim.Vec3', ({(63, 36, 63, 37): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((63, 44, 63, 63), 'opensim.Vec3', 'osim.Vec3', ({(63, 54, 63, 56): '-1', (63, 58, 63, 59): '0', (63, 61, 63, 62): '0'}, {}), '(-1, 0, 0)', True, 'import opensim as osim\n'), ((63, 65, 63, 77), 'opensim.Vec3', 'osim.Vec3', ({(63, 75, 63, 76): '0'}, {}), '(0)', True, 'import opensim as osim\n'), ((83, 31, 83, 52), 'opensim.Vec3', 'osim.Vec3', ({(83, 41, 83, 45): '-0.5', (83, 47, 83, 48): '0', (83, 50, 83, 51): '0'}, {}), '(-0.5, 0, 0)', True, 'import opensim as osim\n'), ((139, 35, 139, 53), 'opensim.Vec3', 'osim.Vec3', ({(139, 45, 139, 46): '(0)', (139, 48, 139, 49): '(2)', (139, 51, 139, 52): '(0)'}, {}), '(0, 2, 0)', True, 'import opensim as osim\n'), ((193, 33, 193, 70), 'opensim.MarkerWeight', 'osim.MarkerWeight', ({(193, 51, 193, 66): '"""/markerset/m0"""', (193, 68, 193, 69): '(1)'}, {}), "('/markerset/m0', 1)", True, 'import opensim as osim\n'), ((194, 33, 194, 70), 'opensim.MarkerWeight', 'osim.MarkerWeight', ({(194, 51, 194, 66): '"""/markerset/m1"""', (194, 68, 194, 69): '(5)'}, {}), "('/markerset/m1', 5)", True, 'import opensim as osim\n'), ((224, 31, 224, 60), 'opensim.TableProcessor', 'osim.TableProcessor', ({(224, 51, 224, 59): 'stateRef'}, {}), '(stateRef)', True, 'import opensim as osim\n')]
aaronFritz2302/ZoomAuto
StorageSystem.py
41af90dc35104bfea970b6b61694e105a625535c
import sqlite3 from pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor = conn.cursor() def __init__(self): self.createTable() def createTable(self): ''' Creates A Table If it Doesnt Exist ''' conn.execute("""CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video Text)""") def enterData(self,meetingData): ''' Enters Data From The UI Table To The DataBase ''' meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False) def readData(self): ''' Reads Data From The SQL DataBase ''' self.cursor.execute('''SELECT * FROM MeetingData''') retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video']) return retVal
[((4, 7, 4, 59), 'sqlite3.connect', 'sqlite3.connect', (), '', False, 'import sqlite3\n')]
mflaxman10/pymapd
pymapd/_parsers.py
00b72ae399a0ff829507ee0b3a2b7404f3a06c26
""" Utility methods for parsing data returned from MapD """ import datetime from collections import namedtuple from sqlalchemy import text import mapd.ttypes as T from ._utils import seconds_to_time Description = namedtuple("Description", ["name", "type_code", "display_size", "internal_size", "precision", "scale", "null_ok"]) ColumnDetails = namedtuple("ColumnDetails", ["name", "type", "nullable", "precision", "scale", "comp_param"]) _typeattr = { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val = getattr(val.val, _typeattr[typename] + '_val') base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': val = (base + datetime.timedelta(seconds=val)) elif typename == 'DATE': val = (base + datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val = seconds_to_time(val) return val def _extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals = getattr(val.data, _typeattr[typename] + '_col') vals = [None if null else v for null, v in zip(nulls, vals)] base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': vals = [None if v is None else base + datetime.timedelta(seconds=v) for v in vals] elif typename == 'DATE': vals = [None if v is None else (base + datetime.timedelta(seconds=v)).date() for v in vals] elif typename == 'TIME': vals = [None if v is None else seconds_to_time(v) for v in vals] return vals def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] """ Return a tuple of (name, type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description """ return [Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable) for col in row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in row_desc ] def _is_columnar(data): # type: (T.TQueryResult) -> bool return data.row_set.is_columnar def _load_schema(buf): """ Load a `pyarrow.Schema` from a buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer Returns ------- schema : pyarrow.Schema """ import pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): """ Load a `pandas.DataFrame` from a buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df : pandas.DataFrame """ import pyarrow as pa message = pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): """ Parse the results of a select ipc_gpu into a GpuDataFrame Parameters ---------- tdf : TDataFrame Returns ------- gdf : GpuDataFrame """ import numpy as np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame from numba import cuda from numba.cuda.cudadrv import drvapi from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df = DataFrame() for k, v in reader.to_dict().items(): df[k] = v return df def _bind_parameters(operation, parameters): return (text(operation) .bindparams(**parameters) .compile(compile_kwargs={"literal_binds": True}))
[((12, 14, 14, 52), 'collections.namedtuple', 'namedtuple', ({(12, 25, 12, 38): '"""Description"""', (12, 40, 14, 51): "['name', 'type_code', 'display_size', 'internal_size', 'precision', 'scale',\n 'null_ok']"}, {}), "('Description', ['name', 'type_code', 'display_size',\n 'internal_size', 'precision', 'scale', 'null_ok'])", False, 'from collections import namedtuple\n'), ((15, 16, 17, 59), 'collections.namedtuple', 'namedtuple', ({(15, 27, 15, 42): '"""ColumnDetails"""', (15, 44, 17, 58): "['name', 'type', 'nullable', 'precision', 'scale', 'comp_param']"}, {}), "('ColumnDetails', ['name', 'type', 'nullable', 'precision',\n 'scale', 'comp_param'])", False, 'from collections import namedtuple\n'), ((42, 11, 42, 40), 'datetime.datetime', 'datetime.datetime', ({(42, 29, 42, 33): '1970', (42, 35, 42, 36): '1', (42, 38, 42, 39): '1'}, {}), '(1970, 1, 1)', False, 'import datetime\n'), ((61, 11, 61, 40), 'datetime.datetime', 'datetime.datetime', ({(61, 29, 61, 33): '1970', (61, 35, 61, 36): '1', (61, 38, 61, 39): '1'}, {}), '(1970, 1, 1)', False, 'import datetime\n'), ((118, 13, 118, 44), 'pyarrow.RecordBatchStreamReader', 'pa.RecordBatchStreamReader', ({(118, 40, 118, 43): 'buf'}, {}), '(buf)', True, 'import pyarrow as pa\n'), ((137, 14, 137, 34), 'pyarrow.read_message', 'pa.read_message', ({(137, 30, 137, 33): 'buf'}, {}), '(buf)', True, 'import pyarrow as pa\n'), ((138, 9, 138, 46), 'pyarrow.read_record_batch', 'pa.read_record_batch', ({(138, 30, 138, 37): 'message', (138, 39, 138, 45): 'schema'}, {}), '(message, schema)', True, 'import pyarrow as pa\n'), ((162, 17, 162, 57), 'numba.cuda.cudadrv.drvapi.cu_ipc_mem_handle', 'drvapi.cu_ipc_mem_handle', ({(162, 42, 162, 56): '*tdf.df_handle'}, {}), '(*tdf.df_handle)', False, 'from numba.cuda.cudadrv import drvapi\n'), ((163, 11, 163, 68), 'numba.cuda.driver.IpcHandle', 'cuda.driver.IpcHandle', (), '', False, 'from numba import cuda\n'), ((164, 10, 164, 32), 'numba.cuda.current_context', 'cuda.current_context', ({}, {}), '()', False, 'from numba import cuda\n'), ((171, 12, 171, 29), 'numpy.dtype', 'np.dtype', ({(171, 21, 171, 28): 'np.byte'}, {}), '(np.byte)', True, 'import numpy as np\n'), ((172, 11, 175, 56), 'numba.cuda.devicearray.DeviceNDArray', 'cuda.devicearray.DeviceNDArray', (), '', False, 'from numba import cuda\n'), ((176, 13, 176, 48), 'pygdf.gpuarrow.GpuArrowReader', 'GpuArrowReader', ({(176, 28, 176, 41): 'schema_buffer', (176, 43, 176, 47): 'darr'}, {}), '(schema_buffer, darr)', False, 'from pygdf.gpuarrow import GpuArrowReader\n'), ((177, 9, 177, 20), 'pygdf.dataframe.DataFrame', 'DataFrame', ({}, {}), '()', False, 'from pygdf.dataframe import DataFrame\n'), ((44, 22, 44, 53), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((63, 46, 63, 75), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((185, 12, 185, 27), 'sqlalchemy.text', 'text', ({(185, 17, 185, 26): 'operation'}, {}), '(operation)', False, 'from sqlalchemy import text\n'), ((46, 22, 46, 53), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((67, 40, 67, 69), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n')]
rohit901/featuretools
featuretools/entityset/entity.py
20bee224782acf94909c2bf33239fd5332a8c1de
import logging import warnings import dask.dataframe as dd import numpy as np import pandas as pd from featuretools import variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): """Represents an entity in a Entityset, and stores relevant metadata and data An Entity is analogous to a table in a relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` """ def __init__(self, id, df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): """ Create Entity Args: id (str): Id of Entity. df (pd.DataFrame): Dataframe providing the data for the entity. entityset (EntitySet): Entityset for this Entity. variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's variable_types dict maps string variable ids to types (:class:`.Variable`) or type_string (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of id column in the dataframe. time_index (str): Name of time column in the dataframe. secondary_time_index (dict[str -> str]): Dictionary mapping columns in the dataframe to the time index column they are associated with. last_time_index (pd.Series): Time index of the last event for each instance across all child entities. make_index (bool, optional) : If True, assume index does not exist as a column in dataframe, and create a new column of that name using integers the (0, len(dataframe)). Otherwise, assume index exists in dataframe. """ _validate_entity_params(id, df, time_index) created_index, index, df = _create_index(index, make_index, df) self.id = id self.entityset = entityset self.data = {'df': df, 'last_time_index': last_time_index} self.created_index = created_index self._verbose = verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df = df[[v.id for v in self.variables]] self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u"Entity: {}\n".format(self.id) repr_out += u" Variables:" for v in self.variables: repr_out += u"\n {} (dtype: {})".format(v.id, v.type_string) shape = self.shape repr_out += u"\n Shape:\n (Rows: {}, Columns: {})".format( shape[0], shape[1]) return repr_out @property def shape(self): '''Shape of the entity's dataframe''' return self.df.shape def __eq__(self, other, deep=False): if self.index != other.index: return False if self.time_index != other.time_index: return False if self.secondary_time_index != other.secondary_time_index: return False if len(self.variables) != len(other.variables): return False if set(self.variables) != set(other.variables): return False if deep: if self.last_time_index is None and other.last_time_index is not None: return False elif self.last_time_index is not None and other.last_time_index is None: return False elif self.last_time_index is not None and other.last_time_index is not None: if not self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df, other.df): return False variables = {variable: (variable, ) for variable in self.variables} for variable in other.variables: variables[variable] += (variable, ) for self_var, other_var in variables.values(): if not self_var.__eq__(other_var, deep=True): return False return True def __sizeof__(self): return sum([value.__sizeof__() for value in self.data.values()]) @property def df(self): '''Dataframe providing the data for the entity.''' return self.data["df"] @df.setter def df(self, _df): self.data["df"] = _df @property def last_time_index(self): ''' Time index of the last event for each instance across all child entities. ''' return self.data["last_time_index"] @last_time_index.setter def last_time_index(self, lti): self.data["last_time_index"] = lti def __hash__(self): return id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id): """Get variable instance Args: variable_id (str) : Id of variable to get. Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError : if no variable exist with provided id """ for v in self.variables: if v.id == variable_id: return v raise KeyError("Variable: %s not found in entity" % (variable_id)) @property def variable_types(self): '''Dictionary mapping variable id's to variable types''' return {v.id: type(v) for v in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): """Convert variable in dataframe to different type Args: variable_id (str) : Id of variable to convert. new_type (subclass of `Variable`) : Type of variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data (bool) : If True, convert underlying data in the EntitySet. Raises: RuntimeError : Raises if it cannot convert the underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es["customers"].convert_variable_type("engagement_level", vtypes.Categorical) """ if convert_data: # first, convert the underlying data (or at least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the old variable with the new one, maintaining order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): """Extracts the variables from a dataframe Args: variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's variable_types dict maps string variable ids to types (:class:`.Variable`) or type_strings (str) or (type, kwargs) to pass keyword arguments to the Variable. index (str): Name of index column time_index (str or None): Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns that each map to a list of columns that depend on that secondary time """ variables = [] variable_types = variable_types.copy() or {} string_to_class_map = find_variable_types() # TODO: Remove once Text has been removed from variable types string_to_class_map[Text.type_string] = Text for vid in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn("Variable type {} was unrecognized, Unknown variable type was used instead".format(vtype)) if index not in variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO document how vtype can be tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables += [_v] # convert data once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is at the beginning index_variable = [v for v in variables if v.id == index][0] self.variables = [index_variable] + [v for v in variables if v.id != index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making sure data is sorted, reference indexes to other entities are consistent, and last_time_indexes are consistent. ''' if len(df.columns) != len(self.variables): raise ValueError("Updated dataframe contains {} columns, expecting {}".format(len(df.columns), len(self.variables))) for v in self.variables: if v.id not in df.columns: raise ValueError("Updated dataframe is missing new {} column".format(v.id)) # Make sure column ordering matches variable ordering self.df = df[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): """ Find interesting values for categorical variables, to be used to generate "where" clauses Args: max_values (int) : Maximum number of values per variable to add. verbose (bool) : If True, print summary of interesting values found. Returns: None """ for variable in self.variables: # some heuristics to find basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this constraints # don't add interesting values for entities in relationships skip = False for r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip = True break if skip: continue counts = self.df[variable.id].value_counts() # find how many of each unique value there are; sort by count, # and add interesting values to each variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx = counts.index[i] # add the value to interesting_values if it represents more than # 25% of the values we have not seen so far if len(counts.index) < 25: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count if fraction > 0.05 and fraction < 0.95: if verbose: msg = "Variable {}: Marking {} as an " msg += "interesting value" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): """ Remove variables from entity's dataframe and from self.variables Args: variable_ids (list[str]): Variables to delete Returns: None """ # check if variable is not a list if not isinstance(variable_ids, list): raise TypeError('variable_ids must be a list of variable names') if len(variable_ids) == 0: return self.df = self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check time type if not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError("%s time index not recognized as numeric or" " datetime" % (self.id)) if self.entityset.time_type is None: self.entityset.time_type = time_type elif self.entityset.time_type != time_type: raise TypeError("%s time index is %s type which differs from" " other entityset time indexes" % (self.id, time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'): t = time_type # skip checking values already_sorted = True # skip sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use stable sort if not already_sorted: # sort by time variable, then by index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def set_index(self, variable_id, unique=True): """ Args: variable_id (string) : Name of an existing variable to set as index. unique (bool) : Whether to assert that the index is unique. """ if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique: assert self.df.index.is_unique, "Index is not unique on dataframe " \ "(Entity {})".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError("%s time index not recognized as numeric or" " datetime" % (self.id)) if self.entityset.time_type != time_type: raise TypeError("%s time index is %s type which differs from" " other entityset time indexes" % (self.id, time_type)) if time_index not in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index, df): '''Handles index creation logic base on user input''' created_index = None if index is None: # Case 1: user wanted to make index but did not specify column name assert not make_index, "Must specify an index name if make_index is True" # Case 2: make_index not specified but no index supplied, use first column warnings.warn(("Using first column as index. " "To change this, specify the index parameter")) index = df.columns[0] elif make_index and index in df.columns: # Case 3: user wanted to make index but column already exists raise RuntimeError("Cannot make index: index variable already present") elif index not in df.columns: if not make_index: # Case 4: user names index, it is not in df. does not specify # make_index. Make new index column and warn warnings.warn("index {} not found in dataframe, creating new " "integer column".format(index)) # Case 5: make_index with no errors or warnings # (Case 4 also uses this code path) if isinstance(df, dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index = index # Case 6: user specified index, which is already in df. No action needed. return created_index, index, df def _validate_entity_params(id, df, time_index): '''Validation checks for Entity inputs''' assert isinstance(id, str), "Entity id must be a string" assert len(df.columns) == len(set(df.columns)), "Duplicate column names" for c in df.columns: if not isinstance(c, str): raise ValueError("All column names must be strings (Column {} " "is not a string)".format(c)) if time_index is not None and time_index not in df.columns: raise LookupError('Time index not found in dataframe')
[((20, 5, 20, 40), 'featuretools.utils.gen_utils.import_or_none', 'import_or_none', ({(20, 20, 20, 39): '"""databricks.koalas"""'}, {}), "('databricks.koalas')", False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((22, 9, 22, 52), 'logging.getLogger', 'logging.getLogger', ({(22, 27, 22, 51): '"""featuretools.entityset"""'}, {}), "('featuretools.entityset')", False, 'import logging\n'), ((227, 30, 227, 51), 'featuretools.variable_types.find_variable_types', 'find_variable_types', ({}, {}), '()', False, 'from featuretools.variable_types import Text, find_variable_types\n'), ((242, 20, 242, 41), 'featuretools.utils.entity_utils.get_linked_vars', 'get_linked_vars', ({(242, 36, 242, 40): 'self'}, {}), '(self)', False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((243, 34, 247, 76), 'featuretools.utils.entity_utils.infer_variable_types', 'infer_variable_types', ({(243, 55, 243, 62): 'self.df', (244, 55, 244, 64): 'link_vars', (245, 55, 245, 69): 'variable_types', (246, 55, 246, 65): 'time_index', (247, 55, 247, 75): 'secondary_time_index'}, {}), '(self.df, link_vars, variable_types, time_index,\n secondary_time_index)', False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((260, 18, 261, 83), 'featuretools.utils.entity_utils.convert_all_variable_data', 'convert_all_variable_data', (), '', False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((378, 20, 378, 51), 'featuretools.utils.wrangle._check_time_type', '_check_time_type', ({(378, 37, 378, 50): 'time_to_check'}, {}), '(time_to_check)', False, 'from featuretools.utils.wrangle import _check_time_type, _dataframes_equal\n'), ((391, 11, 391, 54), 'featuretools.utils.gen_utils.is_instance', 'is_instance', ({(391, 23, 391, 30): 'self.df', (391, 32, 391, 40): '(dd, ks)', (391, 42, 391, 53): '"""DataFrame"""'}, {}), "(self.df, (dd, ks), 'DataFrame')", False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((452, 8, 453, 70), 'warnings.warn', 'warnings.warn', ({(452, 23, 453, 68): '"""Using first column as index. To change this, specify the index parameter"""'}, {}), "(\n 'Using first column as index. To change this, specify the index parameter')", False, 'import warnings\n'), ((203, 22, 206, 53), 'featuretools.utils.entity_utils.convert_variable_data', 'convert_variable_data', (), '', False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((396, 15, 396, 52), 'featuretools.utils.entity_utils.col_is_datetime', 'col_is_datetime', ({(396, 31, 396, 51): 'self.df[variable_id]'}, {}), '(self.df[variable_id])', False, 'from featuretools.utils.entity_utils import col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types\n'), ((430, 24, 430, 55), 'featuretools.utils.wrangle._check_time_type', '_check_time_type', ({(430, 41, 430, 54): 'time_to_check'}, {}), '(time_to_check)', False, 'from featuretools.utils.wrangle import _check_time_type, _dataframes_equal\n'), ((120, 19, 120, 55), 'featuretools.utils.wrangle._dataframes_equal', '_dataframes_equal', ({(120, 37, 120, 44): 'self.df', (120, 46, 120, 54): 'other.df'}, {}), '(self.df, other.df)', False, 'from featuretools.utils.wrangle import _check_time_type, _dataframes_equal\n'), ((307, 46, 307, 100), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((323, 30, 323, 44), 'numpy.sum', 'np.sum', ({(323, 37, 323, 43): 'counts'}, {}), '(counts)', True, 'import numpy as np\n'), ((426, 15, 426, 58), 'featuretools.utils.gen_utils.is_instance', 'is_instance', ({(426, 27, 426, 34): 'self.df', (426, 36, 426, 44): '(dd, ks)', (426, 46, 426, 57): '"""DataFrame"""'}, {}), "(self.df, (dd, ks), 'DataFrame')", False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((469, 13, 469, 45), 'featuretools.utils.gen_utils.is_instance', 'is_instance', ({(469, 25, 469, 27): 'df', (469, 29, 469, 31): 'ks', (469, 33, 469, 44): '"""DataFrame"""'}, {}), "(df, ks, 'DataFrame')", False, 'from featuretools.utils.gen_utils import import_or_none, is_instance\n'), ((335, 89, 335, 105), 'pandas.Series', 'pd.Series', ({(335, 99, 335, 104): '[idx]'}, {}), '([idx])', True, 'import pandas as pd\n'), ((343, 93, 343, 109), 'pandas.Series', 'pd.Series', ({(343, 103, 343, 108): '[idx]'}, {}), '([idx])', True, 'import pandas as pd\n')]
wilvk/githubdl
githubdl/url_helpers.py
1dc8c1c0d93a8e4b8155aecf4f5e73e2931ed920
import re from urllib.parse import urlparse import logging def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match = predicate.search(repo_url) return False if match is None else True def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\@.*\.git$') match = predicate.search(repo_url) return False if match is None else True def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string = re.sub(r'\.git$', '', site_object.path) if parsed_string[0] == '/': return parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\:)(.*)(?=\.)') match = predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\@)(.*)(?=\:)') match = predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh, is_http): if not is_ssh and not is_http: err_message = "Error: repository url provided is not http(s) or ssh" logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url) validate_protocol_exists(is_ssh, is_http) return (is_ssh, is_http)
[((6, 16, 6, 43), 're.compile', 're.compile', ({(6, 27, 6, 42): '"""^https?://.*$"""'}, {}), "('^https?://.*$')", False, 'import re\n'), ((11, 16, 11, 45), 're.compile', 're.compile', ({(11, 27, 11, 44): '"""^git\\\\@.*\\\\.git$"""'}, {}), "('^git\\\\@.*\\\\.git$')", False, 'import re\n'), ((16, 18, 16, 36), 'urllib.parse.urlparse', 'urlparse', ({(16, 27, 16, 35): 'repo_url'}, {}), '(repo_url)', False, 'from urllib.parse import urlparse\n'), ((20, 18, 20, 36), 'urllib.parse.urlparse', 'urlparse', ({(20, 27, 20, 35): 'repo_url'}, {}), '(repo_url)', False, 'from urllib.parse import urlparse\n'), ((21, 20, 21, 59), 're.sub', 're.sub', ({(21, 27, 21, 36): '"""\\\\.git$"""', (21, 38, 21, 40): '""""""', (21, 42, 21, 58): 'site_object.path'}, {}), "('\\\\.git$', '', site_object.path)", False, 'import re\n'), ((27, 16, 27, 48), 're.compile', 're.compile', ({(27, 27, 27, 47): '"""(?<=\\\\:)(.*)(?=\\\\.)"""'}, {}), "('(?<=\\\\:)(.*)(?=\\\\.)')", False, 'import re\n'), ((32, 16, 32, 48), 're.compile', 're.compile', ({(32, 27, 32, 47): '"""(?<=\\\\@)(.*)(?=\\\\:)"""'}, {}), "('(?<=\\\\@)(.*)(?=\\\\:)')", False, 'import re\n'), ((39, 8, 39, 37), 'logging.critical', 'logging.critical', ({(39, 25, 39, 36): 'err_message'}, {}), '(err_message)', False, 'import logging\n')]
AlsikeE/Ez
RECOVERED_FILES/root/ez-segway/simulator/ez_lib/cen_scheduler.py
2f84ac1896a5b6d8f467c14d3618274bdcfd2cad
import itertools from ez_lib import ez_flow_tool from collections import defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import constants, logger from domain.message import * from collections import deque from misc import global_vars import time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties are used for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for x in self.switches} self.current_notification_time = {x: -1 for x in self.switches} self.current_processing_time = {x: -1 for x in self.switches} ########### End three properties are used for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties are used for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for x in self.switches} self.current_notification_time = {x: -1 for x in self.switches} self.current_processing_time = {x: -1 for x in self.switches} ########### End three properties are used for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def __str__(self): return "Centralized Controller" @staticmethod def init_logger(): return logger.getLogger("Centralized Controller", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info("links by endpoints %s segs_by_segpath_id %s" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info("time to compute dependency graph: %s" % str(time() * 1000 - time_start_computing)) def process_coherent(self): send_to_sames = set() for key in self.to_sames.keys(): to_same = self.to_sames[key] for sw in to_same: send_to_sames.add(sw) # for sw in send_to_sames: # msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info("start finding dependency loop and sort updates") mulog.info(links_by_endpoints) for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased = set() related_sws = set([]) for key in update_infos.keys(): update_info = update_infos[key] # self.logger.info("Process update info %s at %d ms from starting" % (update_info, (time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if sw not in increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug("add message in processing update_info: %s" % update_info) self.log.debug("pending messages: %s" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments = [] # done_loop = True # endpoints = (link.src, link.dst) # total_vol = 0 # for op in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING: # done_loop = False # total_vol += l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {} # executable_link_by_segments = {} # for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol = 0 for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link, l_segment): pass def splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing segment final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only = False for op in link.to_adds_only: if op.seg_path_id == seg_path_id: return 0 splittable, split_vol = self.check_to_split(link, l_segment) if splittable and final_split_vol > split_vol > 0: final_split_vol = split_vol self.log.debug("capable %s" % l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution = True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): # self.log.debug("send to sw%s" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True for pair in l_segment.new_link_seg: self.log.info("avail_cap of link %s: %f, " "give %f to segment %s" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug("number of flows that is not done anything %d" % count) def check_possible_update_by_links(self, update_infos): has_execution = True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug("number of flows that is not done anything %d" % count) def check_and_send_possible_updates(self, update_infos): has_execution = True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info("chk&send psb_uds for linksegment %s"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug("number of flows that is not done anything %d" % count) def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug("handle updated msg %s" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info("receive updated msgs for segment %s, new_seg_length = %d" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \ and len(self.received_updated_msg[msg.seg_path_id]) == \ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL else: # self.log.info("receive enough updated msgs for segment %s" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug("handle removed msg %s" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1 if next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info("avail_cap of link %d->%d: %f, " # "get from segment %s" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL self.log.debug("finish %s" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count = 0 finished = True for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status = '' if link_segment.update_status == constants.NOTHING: count += 1 update_status = "NOTHING" if link_segment.update_status == constants.SENT_ADDING: self.log.debug("must receive %d more UPDATED msgs" % (len(link_segment.new_seg)-1)) self.log.debug("received from: %s" % self.received_updated_msg[link_segment.seg_path_id]) update_status = "SENT_ADDING" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug("must receive %d more REMOVED msgs" % (len(link_segment.old_seg)-1)) self.log.debug("received from: %s" % self.received_removed_msg[link_segment.seg_path_id]) update_status = "SENT REMOVING" elif link_segment.update_status == constants.FINISH_ADDING: update_status = "FINISH_ADDING" elif link_segment.update_status == constants.FINISH_REMOVING: update_status = "FINISH_REMOVING" self.log.debug("segment %s is not finished! update_status %s." % (str(link_segment.seg_path_id), update_status)) # return False finished = False break has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING elif not finished: self.log.debug("number of flows that is not done anything %d" % count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug("pending queue: %s" % str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if queue_len > 0: return False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info("avail_cap of link %d->%d: %f, " # "get from segment %s" % (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for i in range(len(l_segment.old_seg) - 1): # self.log.debug("send to: %s" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \ or current_state == constants.SENT_ADDING: return False return True def is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op in link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op = True break is_add_only = False for op in link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only = True break if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \ or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return False self.log.debug("capable %s" % l_segment) return True
[((14, 8, 14, 62), 'misc.logger.getLogger', 'logger.getLogger', ({(14, 25, 14, 40): '"""cen_scheduler"""', (14, 42, 14, 61): 'constants.LOG_LEVEL'}, {}), "('cen_scheduler', constants.LOG_LEVEL)", False, 'from misc import constants, logger\n'), ((21, 36, 21, 49), 'collections.defaultdict', 'defaultdict', ({}, {}), '()', False, 'from collections import defaultdict\n'), ((22, 36, 22, 49), 'collections.defaultdict', 'defaultdict', ({}, {}), '()', False, 'from collections import defaultdict\n'), ((29, 24, 29, 41), 'collections.defaultdict', 'defaultdict', ({(29, 36, 29, 40): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((36, 36, 36, 49), 'collections.defaultdict', 'defaultdict', ({}, {}), '()', False, 'from collections import defaultdict\n'), ((37, 36, 37, 49), 'collections.defaultdict', 'defaultdict', ({}, {}), '()', False, 'from collections import defaultdict\n'), ((44, 24, 44, 41), 'collections.defaultdict', 'defaultdict', ({(44, 36, 44, 40): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((53, 15, 53, 78), 'misc.logger.getLogger', 'logger.getLogger', ({(53, 32, 53, 56): '"""Centralized Controller"""', (53, 58, 53, 77): 'constants.LOG_LEVEL'}, {}), "('Centralized Controller', constants.LOG_LEVEL)", False, 'from misc import constants, logger\n'), ((57, 8, 59, 97), 'ez_lib.ez_flow_tool.create_dependency_graph', 'ez_flow_tool.create_dependency_graph', (), '', False, 'from ez_lib import ez_flow_tool\n'), ((104, 23, 104, 35), 'time.clock', 'time.clock', ({}, {}), '()', False, 'import time\n'), ((321, 23, 321, 49), 'collections.defaultdict', 'defaultdict', ({(321, 35, 321, 48): 'CenUpdateInfo'}, {}), '(CenUpdateInfo)', False, 'from collections import defaultdict\n'), ((348, 23, 348, 49), 'collections.defaultdict', 'defaultdict', ({(348, 35, 348, 48): 'CenUpdateInfo'}, {}), '(CenUpdateInfo)', False, 'from collections import defaultdict\n'), ((25, 39, 25, 48), 'collections.deque', 'deque', ({(25, 45, 25, 47): '[]'}, {}), '([])', False, 'from collections import deque\n'), ((40, 39, 40, 48), 'collections.deque', 'deque', ({(40, 45, 40, 47): '[]'}, {}), '([])', False, 'from collections import deque\n'), ((56, 31, 56, 42), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((106, 50, 106, 62), 'time.clock', 'time.clock', ({}, {}), '()', False, 'import time\n'), ((338, 82, 338, 93), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((423, 40, 424, 83), 'ez_lib.ez_ob.CenUpdateInfo', 'CenUpdateInfo', ({(423, 54, 423, 65): 'seg_path_id', (423, 67, 423, 85): 'l_segment.flow_src', (424, 64, 424, 82): 'l_segment.flow_dst'}, {}), '(seg_path_id, l_segment.flow_src, l_segment.flow_dst)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((100, 16, 100, 109), 'ez_lib.ez_flow_tool.find_dependency_loop_for_link', 'ez_flow_tool.find_dependency_loop_for_link', ({(100, 59, 100, 63): 'link', (100, 65, 100, 83): 'links_by_endpoints', (100, 85, 100, 108): 'segments_by_seg_path_id'}, {}), '(link, links_by_endpoints,\n segments_by_seg_path_id)', False, 'from ez_lib import ez_flow_tool\n'), ((437, 79, 439, 112), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(437, 90, 437, 101): 'seg_path_id', (438, 90, 438, 97): 'next_sw', (439, 90, 439, 111): 'constants.REMOVE_NEXT'}, {}), '(seg_path_id, next_sw, constants.REMOVE_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((117, 48, 119, 81), 'ez_lib.ez_ob.CenUpdateInfo', 'CenUpdateInfo', ({(117, 62, 117, 73): 'seg_path_id', (118, 62, 118, 80): 'l_segment.flow_src', (119, 62, 119, 80): 'l_segment.flow_dst'}, {}), '(seg_path_id, l_segment.flow_src, l_segment.flow_dst)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((121, 65, 122, 102), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(121, 76, 121, 97): 'l_segment.seg_path_id', (122, 76, 122, 78): 'sw', (122, 80, 122, 101): 'constants.REMOVE_NEXT'}, {}), '(l_segment.seg_path_id, sw, constants.REMOVE_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((218, 66, 220, 99), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(218, 77, 218, 88): 'seg_path_id', (219, 77, 219, 97): 'l_segment.new_seg[0]', (220, 77, 220, 98): 'constants.UPDATE_NEXT'}, {}), '(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((262, 66, 264, 99), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(262, 77, 262, 88): 'seg_path_id', (263, 77, 263, 97): 'l_segment.new_seg[0]', (264, 77, 264, 98): 'constants.UPDATE_NEXT'}, {}), '(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((299, 66, 301, 99), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(299, 77, 299, 88): 'seg_path_id', (300, 77, 300, 97): 'l_segment.new_seg[0]', (301, 77, 301, 98): 'constants.UPDATE_NEXT'}, {}), '(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((214, 52, 216, 85), 'ez_lib.ez_ob.CenUpdateInfo', 'CenUpdateInfo', ({(214, 66, 214, 77): 'seg_path_id', (215, 66, 215, 84): 'l_segment.flow_src', (216, 66, 216, 84): 'l_segment.flow_dst'}, {}), '(seg_path_id, l_segment.flow_src, l_segment.flow_dst)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((224, 73, 226, 103), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(224, 84, 224, 95): 'seg_path_id', (225, 84, 225, 91): 'next_sw', (226, 84, 226, 102): 'constants.ADD_NEXT'}, {}), '(seg_path_id, next_sw, constants.ADD_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((258, 52, 260, 85), 'ez_lib.ez_ob.CenUpdateInfo', 'CenUpdateInfo', ({(258, 66, 258, 77): 'seg_path_id', (259, 66, 259, 84): 'l_segment.flow_src', (260, 66, 260, 84): 'l_segment.flow_dst'}, {}), '(seg_path_id, l_segment.flow_src, l_segment.flow_dst)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((267, 73, 269, 103), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(267, 84, 267, 95): 'seg_path_id', (268, 84, 268, 91): 'next_sw', (269, 84, 269, 102): 'constants.ADD_NEXT'}, {}), '(seg_path_id, next_sw, constants.ADD_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((295, 52, 297, 85), 'ez_lib.ez_ob.CenUpdateInfo', 'CenUpdateInfo', ({(295, 66, 295, 77): 'seg_path_id', (296, 66, 296, 84): 'l_segment.flow_src', (297, 66, 297, 84): 'l_segment.flow_dst'}, {}), '(seg_path_id, l_segment.flow_src, l_segment.flow_dst)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n'), ((304, 73, 306, 103), 'ez_lib.ez_ob.UpdateNext', 'UpdateNext', ({(304, 84, 304, 95): 'seg_path_id', (305, 84, 305, 91): 'next_sw', (306, 84, 306, 102): 'constants.ADD_NEXT'}, {}), '(seg_path_id, next_sw, constants.ADD_NEXT)', False, 'from ez_lib.ez_ob import CenUpdateInfo, UpdateNext\n')]
clovadev/opencv-python
src/trackbar.py
f9c685f8dc658f630a9742f4dd55663bde03fe7d
import numpy as np import cv2 as cv def nothing(x): pass # Create a black image, a window img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create trackbars for color change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) # create switch for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while True: # get current positions of four trackbars r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상 if s == 0: img[:] = 0 else: img[:] = [b, g, r] # 이미지 표시 cv.imshow('image', img) if cv.waitKey(10) > 0: break cv.destroyAllWindows()
[((10, 6, 10, 39), 'numpy.zeros', 'np.zeros', ({(10, 15, 10, 28): '(300, 512, 3)', (10, 30, 10, 38): 'np.uint8'}, {}), '((300, 512, 3), np.uint8)', True, 'import numpy as np\n'), ((11, 0, 11, 23), 'cv2.namedWindow', 'cv.namedWindow', ({(11, 15, 11, 22): '"""image"""'}, {}), "('image')", True, 'import cv2 as cv\n'), ((14, 0, 14, 48), 'cv2.createTrackbar', 'cv.createTrackbar', ({(14, 18, 14, 21): '"""R"""', (14, 23, 14, 30): '"""image"""', (14, 32, 14, 33): '(0)', (14, 35, 14, 38): '(255)', (14, 40, 14, 47): 'nothing'}, {}), "('R', 'image', 0, 255, nothing)", True, 'import cv2 as cv\n'), ((15, 0, 15, 48), 'cv2.createTrackbar', 'cv.createTrackbar', ({(15, 18, 15, 21): '"""G"""', (15, 23, 15, 30): '"""image"""', (15, 32, 15, 33): '(0)', (15, 35, 15, 38): '(255)', (15, 40, 15, 47): 'nothing'}, {}), "('G', 'image', 0, 255, nothing)", True, 'import cv2 as cv\n'), ((16, 0, 16, 48), 'cv2.createTrackbar', 'cv.createTrackbar', ({(16, 18, 16, 21): '"""B"""', (16, 23, 16, 30): '"""image"""', (16, 32, 16, 33): '(0)', (16, 35, 16, 38): '(255)', (16, 40, 16, 47): 'nothing'}, {}), "('B', 'image', 0, 255, nothing)", True, 'import cv2 as cv\n'), ((20, 0, 20, 49), 'cv2.createTrackbar', 'cv.createTrackbar', ({(20, 18, 20, 24): 'switch', (20, 26, 20, 33): '"""image"""', (20, 35, 20, 36): '(0)', (20, 38, 20, 39): '(1)', (20, 41, 20, 48): 'nothing'}, {}), "(switch, 'image', 0, 1, nothing)", True, 'import cv2 as cv\n'), ((39, 0, 39, 22), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ({}, {}), '()', True, 'import cv2 as cv\n'), ((24, 8, 24, 39), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', ({(24, 26, 24, 29): '"""R"""', (24, 31, 24, 38): '"""image"""'}, {}), "('R', 'image')", True, 'import cv2 as cv\n'), ((25, 8, 25, 39), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', ({(25, 26, 25, 29): '"""G"""', (25, 31, 25, 38): '"""image"""'}, {}), "('G', 'image')", True, 'import cv2 as cv\n'), ((26, 8, 26, 39), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', ({(26, 26, 26, 29): '"""B"""', (26, 31, 26, 38): '"""image"""'}, {}), "('B', 'image')", True, 'import cv2 as cv\n'), ((27, 8, 27, 42), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', ({(27, 26, 27, 32): 'switch', (27, 34, 27, 41): '"""image"""'}, {}), "(switch, 'image')", True, 'import cv2 as cv\n'), ((36, 4, 36, 27), 'cv2.imshow', 'cv.imshow', ({(36, 14, 36, 21): '"""image"""', (36, 23, 36, 26): 'img'}, {}), "('image', img)", True, 'import cv2 as cv\n'), ((37, 7, 37, 21), 'cv2.waitKey', 'cv.waitKey', ({(37, 18, 37, 20): '(10)'}, {}), '(10)', True, 'import cv2 as cv\n')]
ambertests/adventofcode
aoc_2015/src/day20.py
140ed1d71ed647d30d1e6572964cab1e89dfd105
from functools import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2 if n%2 else 1 return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5)+1, step) if not n % i))) def solve(target): house_count = 0 deliveries = {} complete = set() pt1 = 0 pt2 = 0 while pt1 == 0 or pt2 == 0: house_count += 1 gifts1 = 0 gifts2 = 0 elves = factors(house_count) if pt1 == 0: gifts1 = sum(elves)*10 if gifts1 >= target: pt1 = house_count if pt2 == 0: working = elves.difference(complete) for elf in working: if elf in deliveries: deliveries[elf] += 1 if deliveries[elf] == 50: complete.add(elf) else: deliveries[elf] = 1 gifts2 = sum(working)*11 if gifts2 >= target: pt2 = house_count return pt1, pt2 # takes around 20s pt1, pt2 = solve(29000000) print("Part 1:", pt1) print("Part 2:", pt2)
[]
jean/labels
setup.py
dcb6f40fb4e222068e302202dd5d7d98b4771e4b
import pathlib import setuptools def read(*args: str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text("utf-8") setuptools.setup( name="labels", version="0.3.0.dev0", author="Raphael Pierzina", author_email="[email protected]", maintainer="Raphael Pierzina", maintainer_email="[email protected]", license="MIT", url="https://github.com/hackebrot/labels", project_urls={ "Repository": "https://github.com/hackebrot/labels", "Issues": "https://github.com/hackebrot/labels/issues", }, description="CLI app for managing GitHub labels for Python 3.6 and newer. 📝", long_description=read("README.md"), long_description_content_type="text/markdown", packages=setuptools.find_packages("src"), package_dir={"": "src"}, include_package_data=True, zip_safe=False, python_requires=">=3.6", install_requires=["click", "requests", "pytoml", "attrs"], entry_points={"console_scripts": ["labels = labels.cli:labels"]}, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Utilities", ], keywords=["github", "command-line"], )
[((26, 13, 26, 44), 'setuptools.find_packages', 'setuptools.find_packages', ({(26, 38, 26, 43): '"""src"""'}, {}), "('src')", False, 'import setuptools\n'), ((6, 16, 6, 38), 'pathlib.Path', 'pathlib.Path', ({(6, 29, 6, 37): '__file__'}, {}), '(__file__)', False, 'import pathlib\n')]
caseywstark/colab
colab/__init__.py
e05293e45a657eda19d733bf05624a1613a7a9b7
# -*- coding: utf-8 -*- __about__ = """ This project demonstrates a social networking site. It provides profiles, friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps, locations and user-to-user messaging. In 0.5 this was called "complete_project". """
[]
quamilek/ralph
src/ralph/ui/forms/util.py
bf7231ea096924332b874718b33cd1f43f9c783b
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from ralph.business.models import Venture, VentureRole def all_ventures(): yield '', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, "%s[%s] %s" % ( '\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break space' v.symbol, v.name, ) ) def all_roles(): yield '', '---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield r.id, '{} / {}'.format(r.venture.name, r.full_name)
[((25, 13, 28, 13), 'ralph.business.models.VentureRole.objects.order_by', 'VentureRole.objects.order_by', ({(26, 16, 26, 45): '"""-venture__is_infrastructure"""', (26, 47, 26, 62): '"""venture__name"""', (27, 16, 27, 38): '"""parent__parent__name"""', (27, 40, 27, 54): '"""parent__name"""', (27, 56, 27, 62): '"""name"""'}, {}), "('-venture__is_infrastructure', 'venture__name',\n 'parent__parent__name', 'parent__name', 'name')", False, 'from ralph.business.models import Venture, VentureRole\n'), ((12, 13, 12, 55), 'ralph.business.models.Venture.objects.filter', 'Venture.objects.filter', (), '', False, 'from ralph.business.models import Venture, VentureRole\n')]
matan-h/friendly
tests/syntax/missing_in_with_for.py
3ab0fc6541c837271e8865e247750007acdd18fb
for x range(4): print(x)
[]
eventprotocol/event-protocol-webapp
services/users/manage.py
38ccdc63bc744576ebb3631b7e17cfd4a09216b6
""" manage.py for flask application """ import unittest import coverage import os from flask.cli import FlaskGroup from project import create_app, db from project.api.models import User # Code coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app = create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def cov(): """ Runs the unit tests with coverage """ tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() return 0 return -1 @cli.command() def recreate_db(): """ Destroys all db and recreates a new db """ db.drop_all() db.create_all() db.session.commit() @cli.command() def test(): """ Runs test without code coverage """ tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else: return -1 @cli.command() def seed_db(): """ Seeds the database with some initial data """ user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = "Meeting Room Of The Century" user1.email = "[email protected]" user1.city_country = "Singapore, SG" user1.tags = "Meeting Spaces" user1.about = '''This is the best meeting space you will ever see''' user1.seller_detail = '''We sell space''' user1.buyer_detail = '''We are not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = "Makeup Till You Breakup" user2.email = "[email protected]" user2.city_country = "Singapore, SG" user2.tags = "Stylist" user2.about = '''Reimagine your looks with us''' user2.seller_detail = '''We are serving looks tonight''' user2.buyer_detail = '''We are not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = "Heart Attack Buffet" user3.email = "[email protected]" user3.city_country = "Singapore, SG" user3.tags = "Buffet" user3.about = '''Eat till you get a heart attack''' user3.seller_detail = '''We sell food''' user3.buyer_detail = '''We are not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = "Pleasant Photography" user4.email = "[email protected]" user4.city_country = "Singapore, SG" user4.tags = "Photography" user4.about = ('We are a group of photographers specialized in wedding' 'photography. ' 'We have won numerous awards for our photos. ' 'We will capture your ' 'memories in ways you cannot imagine.') user4.seller_detail = '''We sell photos''' user4.buyer_detail = '''We are not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = "Epic Winebar" user5.email = "[email protected]" user5.city_country = "Singapore, SG" user5.tags = "Bar, Restaurant" user5.about = ('Award winnning winebar with the best selection of alcohol.' 'We serve delicious international cuisine, with fusion' 'dishes inspired from our travels. We are always ready for' 'your craziest events.') user5.seller_detail = '''We sell wine''' user5.buyer_detail = '''We are not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = "Dancers Who Dance" user6.email = "[email protected]" user6.city_country = "Singapore, SG" user6.tags = "Performer" user6.about = ('Dancers who dance are people who like to dance alot.' 'Give us music and we will dance for you.') user6.seller_detail = '''We sell dance''' user6.buyer_detail = '''We are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if __name__ == '__main__': cli()
[((14, 6, 21, 1), 'coverage.Coverage', 'coverage.Coverage', (), '', False, 'import coverage\n'), ((25, 6, 25, 18), 'project.create_app', 'create_app', ({}, {}), '()', False, 'from project import create_app, db\n'), ((26, 6, 26, 39), 'flask.cli.FlaskGroup', 'FlaskGroup', (), '', False, 'from flask.cli import FlaskGroup\n'), ((56, 4, 56, 17), 'project.db.drop_all', 'db.drop_all', ({}, {}), '()', False, 'from project import create_app, db\n'), ((57, 4, 57, 19), 'project.db.create_all', 'db.create_all', ({}, {}), '()', False, 'from project import create_app, db\n'), ((58, 4, 58, 23), 'project.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from project import create_app, db\n'), ((149, 4, 149, 25), 'project.db.session.add', 'db.session.add', ({(149, 19, 149, 24): 'user1'}, {}), '(user1)', False, 'from project import create_app, db\n'), ((150, 4, 150, 25), 'project.db.session.add', 'db.session.add', ({(150, 19, 150, 24): 'user2'}, {}), '(user2)', False, 'from project import create_app, db\n'), ((151, 4, 151, 25), 'project.db.session.add', 'db.session.add', ({(151, 19, 151, 24): 'user3'}, {}), '(user3)', False, 'from project import create_app, db\n'), ((152, 4, 152, 25), 'project.db.session.add', 'db.session.add', ({(152, 19, 152, 24): 'user4'}, {}), '(user4)', False, 'from project import create_app, db\n'), ((153, 4, 153, 25), 'project.db.session.add', 'db.session.add', ({(153, 19, 153, 24): 'user5'}, {}), '(user5)', False, 'from project import create_app, db\n'), ((154, 4, 154, 25), 'project.db.session.add', 'db.session.add', ({(154, 19, 154, 24): 'user6'}, {}), '(user6)', False, 'from project import create_app, db\n'), ((156, 4, 156, 23), 'project.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from project import create_app, db\n'), ((42, 17, 42, 54), 'os.path.join', 'os.path.join', ({(42, 30, 42, 37): 'basedir', (42, 39, 42, 53): '"""tmp/coverage"""'}, {}), "(basedir, 'tmp/coverage')", False, 'import os\n'), ((34, 12, 34, 33), 'unittest.TestLoader', 'unittest.TestLoader', ({}, {}), '()', False, 'import unittest\n'), ((35, 13, 35, 49), 'unittest.TextTestRunner', 'unittest.TextTestRunner', (), '', False, 'import unittest\n'), ((41, 34, 41, 59), 'os.path.dirname', 'os.path.dirname', ({(41, 50, 41, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((66, 12, 66, 33), 'unittest.TestLoader', 'unittest.TestLoader', ({}, {}), '()', False, 'import unittest\n'), ((68, 13, 68, 49), 'unittest.TextTestRunner', 'unittest.TextTestRunner', (), '', False, 'import unittest\n')]
erelcan/keras-transformer
keras_transformer/keras_transformer/training/custom_callbacks/CustomCheckpointer.py
ae88985dd4f1b5f91737e80c7e9c3157b60b4c4f
import os from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, "model-{epoch:01d}.h5"), **kwargs) self._workspace_path = workspace_path self._artifacts = artifacts self._completed_epoch = 0 self._callbacks = callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts() should_save = False if self.epochs_since_last_save == 0: if self.save_best_only: current = logs.get(self.monitor) if current == self.best: should_save = True else: should_save = True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, "artifacts-" + str(epoch+1) + ".pkl")) def update_artifacts(self): for callback in self._callbacks: self._artifacts["callbacks"][callback.get_name()] = callback.get_artifacts() self._artifacts["callbacks"][self.get_name()] = self.get_artifacts() def get_name(self): return self.__class__.__name__ def get_artifacts(self): return {"best_score": self.best, "completed_epoch": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best = artifacts["best_score"] self._completed_epoch = artifacts["completed_epoch"]
[((10, 25, 10, 77), 'os.path.join', 'os.path.join', ({(10, 38, 10, 52): 'workspace_path', (10, 54, 10, 76): '"""model-{epoch:01d}.h5"""'}, {}), "(workspace_path, 'model-{epoch:01d}.h5')", False, 'import os\n')]
arashk7/Yolo5_Dataset_Generator
train_test_val.py
aeba58b51201b8521478c777b40c4d31f0c60be9
import os import shutil input_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5' output_dir = 'E:\Dataset\zhitang\Dataset_Zhitang_Yolo5\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files = os.listdir(in_img_dir) count = len(files) for f in files: print(f) src = os.path.join(input_dir,f) shutil.copyfile(src, dst)
[]
dauden1184/home-assistant
homeassistant/components/media_player/pjlink.py
f4c6d389b77d0efa86644e76604eaea5d21abdb5
""" Support for controlling projector via the PJLink protocol. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ """ import logging import voluptuous as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): """Set up the PJLink platform.""" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label = "{}:{}".format(host, port) if device_label in hass_data: return device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number): """Format input source for display in UI.""" return "{} {}".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): """Representation of a PJLink device.""" def __init__(self, host, port, name, encoding, password): """Iinitialize the PJLink device.""" self._host = host self._port = port self._name = name self._password = password self._encoding = encoding self._muted = False self._pwstate = STATE_OFF self._current_source = None with self.projector() as projector: if not self._name: self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = \ {format_input_source(*x): x for x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): """Create PJLink Projector instance.""" from pypjlink import Projector projector = Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return projector def update(self): """Get the latest state from the device.""" with self.projector() as projector: pwstate = projector.get_power() if pwstate == 'off': self._pwstate = STATE_OFF else: self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source = \ format_input_source(*projector.get_input()) @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._pwstate @property def is_volume_muted(self): """Return boolean indicating mute status.""" return self._muted @property def source(self): """Return current input source.""" return self._current_source @property def source_list(self): """Return all available input sources.""" return self._source_list @property def supported_features(self): """Return projector supported features.""" return SUPPORT_PJLINK def turn_off(self): """Turn projector off.""" with self.projector() as projector: projector.set_power('off') def turn_on(self): """Turn projector on.""" with self.projector() as projector: projector.set_power('on') def mute_volume(self, mute): """Mute (true) of unmute (false) media player.""" with self.projector() as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): """Set the input source.""" source = self._source_name_mapping[source] with self.projector() as projector: projector.set_input(*source)
[((20, 10, 20, 37), 'logging.getLogger', 'logging.getLogger', ({(20, 28, 20, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((28, 4, 28, 27), 'voluptuous.Required', 'vol.Required', ({(28, 17, 28, 26): 'CONF_HOST'}, {}), '(CONF_HOST)', True, 'import voluptuous as vol\n'), ((29, 4, 29, 49), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((30, 4, 30, 27), 'voluptuous.Optional', 'vol.Optional', ({(30, 17, 30, 26): 'CONF_NAME'}, {}), '(CONF_NAME)', True, 'import voluptuous as vol\n'), ((31, 4, 31, 57), 'voluptuous.Optional', 'vol.Optional', (), '', True, 'import voluptuous as vol\n'), ((32, 4, 32, 31), 'voluptuous.Optional', 'vol.Optional', ({(32, 17, 32, 30): 'CONF_PASSWORD'}, {}), '(CONF_PASSWORD)', True, 'import voluptuous as vol\n'), ((89, 20, 90, 51), 'pypjlink.Projector.from_address', 'Projector.from_address', ({(90, 12, 90, 22): 'self._host', (90, 24, 90, 34): 'self._port', (90, 36, 90, 50): 'self._encoding'}, {}), '(self._host, self._port, self._encoding)', False, 'from pypjlink import Projector\n')]
Kaushalya/algo_journal
leetcode/regex_matching.py
bcea8afda0dc86b36452378e3bcff9b0f57d6856
# Level: Hard def isMatch(s: str, p: str) -> bool: if not p: return not s n_s = len(s) n_p = len(p) j = 0 i = -1 while i < n_s-1: i = i+ 1 if j >= n_p: return False if p[j] == '*': while s[i]==s[i-1]: i += 1 j += 1 if p[j] == '.' or s[i] == p[j]: j += 1 # continue elif s[i] != p[j] and j<n_p-1: j += 2 else: return False return True if __name__ == "__main__": ss = 'abbbbbc' p = 'a*' print(isMatch(ss, p))
[]
luzik/waliki
tests/factories.py
b7db696075ceebb5676be61f44e2d806cc472255
import factory from django.contrib.auth.models import User, Group, Permission from waliki.models import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', 'pass') email = factory.LazyAttribute(lambda o: '%[email protected]' % o.username) class Meta: model = User @factory.post_generation def groups(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group name = factory.Sequence(lambda n: "Group #%s" % n) @factory.post_generation def users(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule name = factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for perm in extracted: if not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for user in extracted: self.users.add(user) @factory.post_generation def groups(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: # A list of groups were passed in, use them for group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted, **kwargs): if not create: # Simple build, do nothing. return if extracted: self.raw = extracted class Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class Meta: model = Redirect
[((8, 15, 8, 71), 'factory.PostGenerationMethodCall', 'factory.PostGenerationMethodCall', ({(8, 48, 8, 62): '"""set_password"""', (8, 64, 8, 70): '"""pass"""'}, {}), "('set_password', 'pass')", False, 'import factory\n'), ((9, 12, 9, 74), 'factory.LazyAttribute', 'factory.LazyAttribute', ({(9, 34, 9, 73): "lambda o: '%[email protected]' % o.username"}, {}), "(lambda o: '%[email protected]' % o.username)", False, 'import factory\n'), ((30, 11, 30, 54), 'factory.Sequence', 'factory.Sequence', ({(30, 28, 30, 53): "lambda n: 'Group #%s' % n"}, {}), "(lambda n: 'Group #%s' % n)", False, 'import factory\n'), ((61, 27, 61, 98), 'django.contrib.auth.models.Permission.objects.get', 'Permission.objects.get', (), '', False, 'from django.contrib.auth.models import User, Group, Permission\n')]
dalteocraft/nxt_editor
nxt_editor/commands.py
18992da7cfa89769568434ec08d787510e09f1c4
# Built-in import copy import logging import time # External from Qt.QtWidgets import QUndoCommand # Internal from nxt_editor import colors from nxt_editor import user_dir from nxt import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io from nxt import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self, layer_path): """Gets the effected state for a given layer with context to this command. Since a single command can effect layers in different ways. :param layer_path: string of layer real path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) """ first_eff_by_undo = False first_eff_by_redo = False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): """When the model marks a layer as saved we reset the class attr `_first_effected_by_redo` to False. This makes sure the layer is properly marked as unsaved even if we undo an action after saving it. :param layer_just_saved: string of layer real path :return: None """ eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd is self: return if layer_just_saved in self._layers_effected_by_me: if eff_by_undo: # This command has already been marked as undo effects the # layer, meaning the layer has been saved and the undo queue # was moved to an index before this command and the same # layer was saved again. eff_by_redo = True eff_by_undo = False else: # Now the undo of this command effects the layer not the redo eff_by_redo = False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path): """Adds layer to the model's set of effected (unsaved) layers. If this command was the first to effect the layer we mark it as such by setting the class attr `_first_effected_by_redo` to True. :param layer_path: string of layer real path :return: None """ layer_unsaved = layer_path in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not eff_by_undo and layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else: # Layer was saved and then undo was called, thus this redo has a # net zero effect on the layer try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save action pass def undo_effected_layer(self, layer_path): """Removes layer from the model's set of effected (unsaved) layers. If the layer is not marked as effected in the model we mark it as effected. This case happens when undo is called after a layer is saved. :param layer_path: string of layer real path :return: None """ eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path not in self.model.effected_layers if layer_saved: eff_by_undo = True # Set redo to False since now its been saved & the undo effects it eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): """Add a node to the graph""" def __init__(self, name, data, parent_path, pos, model, layer_path): super(AddNode, self).__init__(model) self.name = name self.data = data self.parent_path = parent_path self.layer_path = layer_path self.stage = model.stage # command data self.pos = pos or [0.0, 0.0] self.prev_selection = self.model.selection # resulting node self.node_path = None self.created_node_paths = [] @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete any created nodes for node_path in self.created_node_paths: node = layer.lookup(node_path) if node is not None: _, dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data = False comp_layer = self.model.comp_layer if node is not None: # delete node _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes = [] nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path, other_removed_nodes): """Delete node from the layer at the layer path and the comp layer. It is important to note that the other_removed_nodes list must be shared by other DeleteNode commands in a command macro. The list will be mutated by the stage as it deletes node, this behavior is depended upon! :param node_path: String of node path :param model: StageModel :param layer_path: String of layer realpath :param other_removed_nodes: list of node paths that will be deleted in this event loop. """ super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage = model.stage # get undo data self.prev_selection = self.model.selection self.prev_starts = [] self.prev_breaks = {} self.node_path = node_path self.node_data = {} self.others = other_removed_nodes @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent = self.node_data['parent'] # We don't want to fix names because we know this node should be # named what it was named when it was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer data pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos # This might be a bug? We don't touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection # Fixme: Does not account for rebuilding proxy nodes for the dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes = [] node = layer.lookup(self.node_path) # get node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr display data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is not None: self.node_data['attr_display'] = attr_display # get layer data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data = False for p in self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path] if self.node_path in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText("Delete node: {}".format(self.node_path)) class SetNodeAttributeData(NxtCommand): """Set attribute value""" def __init__(self, node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name = attr_name self.attr_name = attr_name self.data = data self.stage = model.stage self.layer_path = layer_path self.created_node_paths = [] self.remove_attr = False self.prev_data = {} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection = model.selection @processing def undo(self): start = time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties = [self.node_path] # delete any created nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if n is not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs = () for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self, start) @processing def redo(self): start = time.time() created_node = False self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr = False self.created_node_paths = [] # get the node node = layer.lookup(self.node_path) dirties = [self.node_path] if node is None: parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted parenting would avoid the need for a recomp if layer.descendants(self.node_path): self.recomp = True created_node = True self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set attribute value this also adds the attribute if it does not exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = () for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText("Set {} to {}".format(attr_path, val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue): """Rename node""" def __init__(self, node_path, name, model, layer_path): self.old_node_path = node_path layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText("{} renamed to {}".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): """Duplicate nodes on this graph""" def __init__(self, node_paths, descendants, model, source_layer_path, target_layer_path): # TODO: We should make another base command class that can be used to # set multiple attr's data. That way duplicate can just be a # setattr. The way it works now we can only set one attr's data at a # time and duplicate needs to get local + INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants = descendants self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path self.stage = model.stage # get undo data self.prev_selection = self.model.selection # resulting nodes self.new_node_paths = [] @processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for node_path in self.new_node_paths: n = target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection = [] self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node = source_layer.lookup(node_path) # duplicate node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for new_node in new: # add new node path to the list and emit model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) # set position has_parent = self.model.node_has_parent(new_node_path, target_layer) if not has_parent and new_node_path != node_path: pos = self.model.get_node_pos(node_path) pos = [pos[0] + 20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str = self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): """Instance nodes on this graph""" def __init__(self, node_path, model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): """Move nodes""" def __init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path = layer_path self.new_positions = node_positions self.old_positions = {} for path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str = None layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str: pos = new_pos prev_pos = self.old_positions[node_path] # Only letting it set text once, relying on consistent delta. x_delta = pos[0] - prev_pos[0] y_delta = pos[1] - prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta) if len(self.new_positions) == 1: nodes_str = node_path else: nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): """Select Nodes and Connections""" def __init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths = paths self.model = model self.prev_paths = self.model.selection def undo(self): self.model.selection = self.prev_paths def redo(self): self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths = paths curr_selection = model.selection new_paths = curr_selection + paths super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths = paths new_selection = model.selection[:] for path in paths: try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): """Localize nodes""" def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model = model self.stage = model.stage self.prev_selection = self.model.selection self.prev_node_data = {} self.created_node_paths = [] @processing def undo(self): for node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for node_path, all_data in self.prev_node_data.items(): apply_data = {} node = self.model.target_layer.lookup(node_path) if not node: continue data = all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs: if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {} self.created_node_paths = [] layer = self.model.target_layer for node_path in self.node_paths: node_data = {} display_node = self.model.comp_layer.lookup(node_path) if not display_node: continue # add node if it doesn't exist on the target layer target_node = self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path) # preserve original data node_data['data'] = get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): """Localize nodes""" def __init__(self, node_path, attr_name, model, layer_path): node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): """Localize nodes""" def __init__(self, node_path, model, layer_path): comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText("Localize compute on {}".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText("Localize instance path to {}".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText("Revert instance path on {}".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText("Localize exec input on {}".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self): self.setText("Revert exec input on {}".format(self.node_path)) class RevertNode(DeleteNode): """Localize nodes""" def __init__(self, node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild = False # Tells the delete command not to re-comp self.created_node_paths = [] self.node_path = node_path def undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove our created empty nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self): self.created_node_paths = [] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create the node as an empty node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): """Parent Nodes""" def __init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node = None self.model = model self.stage = model.stage self.node_paths = node_paths # resulting nodes self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = [] # get node selection for undo self.prev_selection = self.model.selection # get previous node data for all child nodes for undo self.prev_node_data = {} @processing def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes = {} for old_path, node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict in common_parent_nodes.items(): for node, old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore name prev_name = node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore position if self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any created nodes for node_path in self.created_node_paths: node = layer.lookup(node_path) if node is not None: self.stage.delete_node(node, layer) idx = 0 for old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {} self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = [] nodes = [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node = layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] = parent_path parent_node = layer.lookup(parent_path) ancestor_path = parent_path child_order = [] if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if ancestors: ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes += [node] # get current node hierarchy information for each node. each node # path is placed in a list of descendants for each top node so when # they are un-parented each node can be placed visually beside it's # original top node. node_hierarchy_data = {} if self.parent_node_path is nxt_path.WORLD: for node_path in self.node_paths: node = layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node is None: top_node = node top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data: return # parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx = 0 for new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state) # set position for un-parent if self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText("Parent {} to {}".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): """Add an attribute to a node.""" def __init__(self, node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr = True self.setText("Add {} attr to {}".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): """Delete attribute on a node""" def __init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path) # Get the data to be set if undo is called layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove attr here to insure attr is deleted self.remove_attr = True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText("Remove {} attr from {}".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): """Revert compute""" def __init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self): super(RevertCompute, self).redo() self.setText("Revert compute on {}".format(self.node_path)) class RenameAttribute(NxtCommand): """Rename attribute""" def __init__(self, node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name = attr_name self.new_attr_name = new_attr_name self.model = model self.stage = model.stage self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText("Rename {} to {}".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): """Set attribute comment""" def __init__(self, node_path, attr_name, comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText("Changed comment on {}".format(attr_path)) class SetCompute(SetNodeAttributeValue): """Set node code value""" def __init__(self, node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(SetCompute, self).redo() self.setText("Changed compute on {}".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): """Set node comment""" def __init__(self, node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText("Changed comment on {}".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): """Set node instance""" def __init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self): super(SetNodeInstance, self).redo() txt = ("Set inst path on " "{} to {}".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): """Set node enabled state""" def __init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText("Enabled {}".format(self.node_path)) else: self.setText("Disabled {}".format(self.node_path)) class SetNodeCollapse(NxtCommand): """Set the node collapse state""" def __init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value = value self.model = model self.stage = model.stage self.layer_path = layer_path self.prev_values = {} @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText("Collapsed {}".format(path_str)) else: self.setText("Expanded {}".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): """Set node execute sources""" def __init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if val is None: self.setText("Removed exec input for {}".format(self.node_path)) return self.setText("Set {} exec input to {}".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): """Set node as a break point""" def __init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value = value self.model = model self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if not self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText("Add breakpoint to {}".format(path_str)) else: self.setText("Remove breakpoint from {}".format(path_str)) class ClearBreakpoints(QUndoCommand): """Clear all the breakpoints for a given layer""" def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model = model self.layer_path = layer_path self.prev_breaks = [] @processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText("Clear all breakpoints") class SetNodeStartPoint(SetNodeAttributeValue): """Set this node as the execution start point""" def __init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): """Set node child order""" def __init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText("Change child order on {}".format(self.node_path)) class SetLayerAlias(NxtCommand): """Set Layer Alias""" def __init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias = alias self.old_alias = '' self.model = model self.stage = model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText("Set {} alias to {}".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): """Add new layer""" def __init__(self, file_path, file_name, idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None self.model = model self.stage = model.stage self.insert_idx = idx self.file_path = file_path self.file_name = file_name self.chdir = chdir @processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors = [] for layer in self.stage._sub_layers: color = layer.color if color: color = color.lower() open_layer_colors += [color] layer_color = layer_color_index[0] for c in layer_color_index: if c not in open_layer_colors: layer_color = c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {"parent_layer": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText("New layer {}".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): """Refernce existing layer""" def __init__(self, file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model = model self.stage = model.stage self.insert_idx = idx self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data = nxt_io.load_file_data(self.real_path) extra_data = {"parent_layer": parent_layer, "filepath": self.file_path, "real_path": self.real_path, "alias": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText("Added reference to {}".format(self.real_path)) class RemoveLayer(ReferenceLayer): """Remove existing layer""" def __init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text = "Removed reference to {}".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): """Toggles muting an existing layer""" def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths = [] def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText("Toggle {} muted.".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): """Toggles soloing an existing layer""" def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths = [] def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText("Toggle {} soloed.".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model): """Sets the color for a given layer, if the layer is not a top layer the top layer store an overrides. :param color: string of new layer alias (name) :param layer_path: real path of layer :param model: StageModel """ super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color = color self.old_color = '' self.model = model self.stage = model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color = self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText("Set {} color to {}".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer = model.comp_layer new_node_paths = [] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes += [n] return new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug("Undo " + cmd.text() + " | " + update_time + "ms") def redo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() + " | " + update_time + "ms")
[((20, 9, 20, 50), 'logging.getLogger', 'logging.getLogger', ({(20, 27, 20, 49): 'nxt_editor.LOGGER_NAME'}, {}), '(nxt_editor.LOGGER_NAME)', False, 'import logging\n'), ((1657, 21, 1657, 72), 'nxt.nxt_path.str_path_to_node_namespace', 'nxt_path.str_path_to_node_namespace', ({(1657, 57, 1657, 71): 'base_node_path'}, {}), '(base_node_path)', False, 'from nxt import nxt_path\n'), ((307, 38, 307, 60), 'nxt.nxt_node.get_node_as_dict', 'get_node_as_dict', ({(307, 55, 307, 59): 'node'}, {}), '(node)', False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((357, 16, 357, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((404, 16, 404, 27), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((466, 20, 466, 80), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', ({(466, 44, 466, 58): 'self.node_path', (466, 60, 466, 79): 'self.nice_attr_name'}, {}), '(self.node_path, self.nice_attr_name)', False, 'from nxt import nxt_path\n'), ((485, 22, 485, 57), 'nxt.nxt_path.get_parent_path', 'nxt_path.get_parent_path', ({(485, 47, 485, 56): 'node_path'}, {}), '(node_path)', False, 'from nxt import nxt_path\n'), ((592, 19, 592, 63), 'nxt.nxt_path.node_name_from_node_path', 'nxt_path.node_name_from_node_path', ({(592, 53, 592, 62): 'node_path'}, {}), '(node_path)', False, 'from nxt import nxt_path\n'), ((593, 22, 593, 57), 'nxt.nxt_path.get_parent_path', 'nxt_path.get_parent_path', ({(593, 47, 593, 56): 'node_path'}, {}), '(node_path)', False, 'from nxt import nxt_path\n'), ((597, 19, 597, 66), 'nxt.nxt_path.join_node_paths', 'nxt_path.join_node_paths', ({(597, 44, 597, 55): 'parent_path', (597, 57, 597, 65): 'new_name'}, {}), '(parent_path, new_name)', False, 'from nxt import nxt_path\n'), ((1142, 19, 1142, 69), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', ({(1142, 43, 1142, 57): 'self.node_path', (1142, 59, 1142, 68): 'attr_name'}, {}), '(self.node_path, attr_name)', False, 'from nxt import nxt_path\n'), ((1143, 19, 1143, 73), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', ({(1143, 43, 1143, 57): 'self.node_path', (1143, 59, 1143, 72): 'new_attr_name'}, {}), '(self.node_path, new_attr_name)', False, 'from nxt import nxt_path\n'), ((1158, 20, 1158, 80), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', ({(1158, 44, 1158, 58): 'self.node_path', (1158, 60, 1158, 79): 'self.nice_attr_name'}, {}), '(self.node_path, self.nice_attr_name)', False, 'from nxt import nxt_path\n'), ((1347, 27, 1347, 72), 'nxt_editor.user_dir.breakpoints.get', 'user_dir.breakpoints.get', ({(1347, 52, 1347, 67): 'self.layer_path', (1347, 69, 1347, 71): '[]'}, {}), '(self.layer_path, [])', False, 'from nxt_editor import user_dir\n'), ((1459, 20, 1459, 79), 'nxt.nxt_path.full_file_expand', 'nxt_path.full_file_expand', (), '', False, 'from nxt import nxt_path\n'), ((1485, 25, 1485, 73), 'nxt.nxt_path.full_file_expand', 'nxt_path.full_file_expand', ({(1485, 51, 1485, 65): 'self.file_path', (1485, 67, 1485, 72): 'chdir'}, {}), '(self.file_path, chdir)', False, 'from nxt import nxt_path\n'), ((1505, 21, 1505, 58), 'nxt.nxt_io.load_file_data', 'nxt_io.load_file_data', ({(1505, 43, 1505, 57): 'self.real_path'}, {}), '(self.real_path)', False, 'from nxt import nxt_io\n'), ((385, 24, 385, 70), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', ({(385, 48, 385, 53): 'dirty', (385, 55, 385, 69): 'self.attr_name'}, {}), '(dirty, self.attr_name)', False, 'from nxt import nxt_path\n'), ((416, 26, 416, 66), 'nxt.nxt_path.get_parent_path', 'nxt_path.get_parent_path', ({(416, 51, 416, 65): 'self.node_path'}, {}), '(self.node_path)', False, 'from nxt import nxt_path\n'), ((417, 19, 417, 68), 'nxt.nxt_path.node_name_from_node_path', 'nxt_path.node_name_from_node_path', ({(417, 53, 417, 67): 'self.node_path'}, {}), '(self.node_path)', False, 'from nxt import nxt_path\n'), ((438, 29, 438, 58), 'copy.deepcopy', 'copy.deepcopy', ({(438, 43, 438, 57): 'self.prev_data'}, {}), '(self.prev_data)', False, 'import copy\n'), ((767, 32, 767, 61), 'nxt.nxt_node.get_node_as_dict', 'get_node_as_dict', ({(767, 49, 767, 60): 'target_node'}, {}), '(target_node)', False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((1152, 16, 1152, 58), 'nxt.nxt_node.META_ATTRS.as_save_key', 'META_ATTRS.as_save_key', ({(1152, 39, 1152, 57): 'META_ATTRS.COMMENT'}, {}), '(META_ATTRS.COMMENT)', False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((1349, 12, 1349, 53), 'nxt_editor.user_dir.breakpoints.pop', 'user_dir.breakpoints.pop', ({(1349, 37, 1349, 52): 'self.layer_path'}, {}), '(self.layer_path)', False, 'from nxt_editor import user_dir\n'), ((1668, 30, 1669, 58), 'nxt.nxt_node.list_merger', 'list_merger', ({(1668, 42, 1668, 61): 'display_child_order', (1669, 42, 1669, 57): 'old_child_order'}, {}), '(display_child_order, old_child_order)', False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((419, 36, 419, 78), 'nxt.nxt_node.INTERNAL_ATTRS.as_save_key', 'INTERNAL_ATTRS.as_save_key', ({(419, 63, 419, 77): 'self.attr_name'}, {}), '(self.attr_name)', False, 'from nxt.nxt_node import INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger\n'), ((1039, 27, 1039, 64), 'nxt.nxt_path.get_root_path', 'nxt_path.get_root_path', ({(1039, 50, 1039, 63): 'old_node_path'}, {}), '(old_node_path)', False, 'from nxt import nxt_path\n'), ((1348, 35, 1348, 62), 'nxt_editor.user_dir.breakpoints.keys', 'user_dir.breakpoints.keys', ({}, {}), '()', False, 'from nxt_editor import user_dir\n'), ((463, 32, 463, 78), 'nxt.nxt_path.make_attr_path', 'nxt_path.make_attr_path', ({(463, 56, 463, 61): 'dirty', (463, 63, 463, 77): 'self.attr_name'}, {}), '(dirty, self.attr_name)', False, 'from nxt import nxt_path\n'), ((1677, 33, 1677, 44), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((1682, 33, 1682, 44), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
libracore/mietrechtspraxis
mietrechtspraxis/mietrechtspraxis/doctype/arbitration_authority/arbitration_authority.py
7b2320a70b98b086be136a86b1ab4fadfce215ff
# -*- coding: utf-8 -*- # Copyright (c) 2021, libracore AG and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document from datetime import datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz ''' # check that token is present try: token = kwargs['token'] except: # 400 Bad Request (Missing Token) return raise_4xx(400, 'Bad Request', 'Token Required') # check that token is correct if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that plz_city is present try: plz_city = kwargs['plz_city'] except: # 400 Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer = [] # lookup for plz city_results = frappe.db.sql(""" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY `city` ASC """.format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: # lookup for city city_results = frappe.db.sql(""" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY `city` ASC """.format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for city in city_results: data = {} data['plz'] = city.plz data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(""" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' """.format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0: return raise_200(answer) else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No results') else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No results') def get_informations(kanton): search = frappe.db.sql(""" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}' """.format(kanton=kanton), as_dict=True) if len(search) > 0: result = search[0] else: result = {} return result def raise_4xx(code, title, message): # 4xx Bad Request / Unauthorized / Not Found return ['{code} {title}'.format(code=code, title=title), { "error": { "code": code, "message": "{message}".format(message=message) } }] def raise_200(answer): return ['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql("""SELECT `name` FROM `tabArbitration Authority`""", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead = no_letterhead) output = frappe.get_print("Arbitration Authority", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts = "{0:04d}-{1:02d}-{2:02d}".format(now.year, now.month, now.day) file_name = "{0}_{1}.pdf".format('SB_Sammel-PDF', ts) save_file(file_name, pdf, '', '', is_private=1) return
[((131, 1, 131, 19), 'frappe.whitelist', 'frappe.whitelist', ({}, {}), '()', False, 'import frappe\n'), ((133, 4, 133, 137), 'frappe.enqueue', 'frappe.enqueue', (), '', False, 'import frappe\n'), ((137, 13, 137, 28), 'PyPDF2.PdfFileWriter', 'PdfFileWriter', ({}, {}), '()', False, 'from PyPDF2 import PdfFileWriter\n'), ((138, 28, 138, 108), 'frappe.db.sql', 'frappe.db.sql', (), '', False, 'import frappe\n'), ((143, 10, 143, 60), 'frappe.utils.pdf.get_file_data_from_writer', 'frappe.utils.pdf.get_file_data_from_writer', ({(143, 53, 143, 59): 'output'}, {}), '(output)', False, 'import frappe\n'), ((145, 10, 145, 24), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((148, 4, 148, 51), 'frappe.utils.file_manager.save_file', 'save_file', (), '', False, 'from frappe.utils.file_manager import save_file\n'), ((140, 17, 140, 170), 'frappe.get_print', 'frappe.get_print', (), '', False, 'import frappe\n'), ((141, 17, 141, 189), 'frappe.get_print', 'frappe.get_print', (), '', False, 'import frappe\n'), ((32, 20, 32, 79), 'frappe.db.get_single_value', 'frappe.db.get_single_value', ({(32, 47, 32, 69): '"""mietrechtspraxis API"""', (32, 71, 32, 78): '"""token"""'}, {}), "('mietrechtspraxis API', 'token')", False, 'import frappe\n')]
Matthias1590/EasySockets
easysockets/client_socket.py
70d33a04e862b682b87bdf2103bcc1d7da06994e
from .connection import Connection import socket class ClientSocket: def __init__(self) -> None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host: str, port: int) -> Connection: self.__socket.connect((host, port)) return Connection(self.__socket)
[((7, 24, 7, 73), 'socket.socket', 'socket.socket', ({(7, 38, 7, 52): 'socket.AF_INET', (7, 54, 7, 72): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n')]
yurivict/USD
pxr/usd/usdGeom/testenv/testUsdGeomSchemata.py
3b097e3ba8fabf1777a1256e241ea15df83f3065
#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under the Apache License, Version 2.0 (the "Apache License") # with the following modification; you may not use this file except in # compliance with the Apache License and the following modification to it: # Section 6. Trademarks. is deleted and replaced with: # # 6. Trademarks. This License does not grant permission to use the trade # names, trademarks, service marks, or product names of the Licensor # and its affiliates, except as required to comply with Section 4(c) of # the License and to reproduce the content of the NOTICE file. # # You may obtain a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the Apache License with the above modification is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the Apache License for the specific # language governing permissions and limitations under the Apache License. # pylint: disable=map-builtin-not-iterating import sys, unittest from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p = stage.DefinePrim("/Mesh", "Mesh") self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure uniform access behaves as expected. # ori = p.GetAttribute("orientation") # The generic orientation attribute should be automatically defined because # it is a registered attribute of a well known schema. However, it's not # yet authored at the current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value, and check that it's still defined, and now is in fact # authored at the current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # "leftHanded" should have been authored at Usd.TimeCode.Default, so reading the # attribute at Default should return lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value "rightHanded" was set at t=10, so reading *any* time should # return "rightHanded" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute name sanity check. We expect the names returned by the schema # to match the names returned via the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames(): # apiName overrides if n == "primvars:displayColor": n = "displayColor" elif n == "primvars:displayOpacity": n = "displayOpacity" name = n[0].upper() + n[1:] self.assertTrue(("Get" + name + "Attr") in dir(mesh), ("Get" + name + "Attr() not found in: " + str(dir(mesh)))) def test_IsA(self): # Author Scene and Compose Stage l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) # For every prim schema type in this module, validate that: # 1. We can define a prim of its type # 2. Its type and inheritance matches our expectations # 3. At least one of its builtin properties is available and defined # BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage, "/BasisCurves") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema = UsdGeom.Camera.Define(stage, "/Camera") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema = UsdGeom.Capsule.Define(stage, "/Capsule") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema = UsdGeom.Cone.Define(stage, "/Cone") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema = UsdGeom.Cube.Define(stage, "/Cube") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema = UsdGeom.Cylinder.Define(stage, "/Cylinder") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema = UsdGeom.Mesh.Define(stage, "/Mesh") self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage, "/NurbsCurves") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage, "/NurbsPatch") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema = UsdGeom.Points.Define(stage, "/Points") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema = UsdGeom.Scope.Define(stage, "/Scope") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder # Scope has no builtins! # Sphere Tests schema = UsdGeom.Sphere.Define(stage, "/Sphere") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema = UsdGeom.Xform.Define(stage, "/Xform") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author Scene and Compose Stage stage = Usd.Stage.CreateInMemory() # Xformable Tests identity = Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0, 0) xform = UsdGeom.Xform.Define(stage, "/Xform") # direct subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = ["xformOp:transform"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, "/Mesh") # multiple ancestor hops # PointBased and Curves curves = UsdGeom.BasisCurves.Define(stage, "/Curves") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we go, test that CreateXXXAttr performs as we expect in various # scenarios # Number 1: Sparse and non-sparse authoring on def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse authoring demotes to dense for non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it a defined mesh, and sanity check it still evals the same mesh2 = UsdGeom.Mesh.Define(stage, "/overMesh") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying of fallback values. sphere = UsdGeom.Sphere.Define(stage, "/Sphere") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent) # Make a subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert that a simple find or create gives us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to make a mesh at subscope's path. This transforms the scope into a # mesh, since Define() always authors typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) # Make a mesh at a different path, should work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author a custom property spec. layer = s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List fields on 'visibility' attribute -- should include 'allowedTokens', # provided by the property definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert that attribute fallback values are returned for builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def test_Camera(self): from pxr import Gf stage = Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, "/Camera") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage = Usd.Stage.CreateInMemory() # Points Tests schema = UsdGeom.Points.Define(stage, "/Points") self.assertTrue(schema) # Test that id's roundtrip properly, for big numbers, and negative numbers ids = [8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): # This used to test a change for Bug111239, but now tests that this # fix has been reverted. We no longer allow the C++ typename be used as # a prim's typename. s = Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr import Gf # Create some simple test cases allPoints = [ [(1, 1, 0)], # Zero-Volume Extent Test [(0, 0, 0)], # Simple Width Test [(-1, -1, -1), (1, 1, 1)], # Multiple Width Test [(-1, -1, -1), (1, 1, 1)], # Erroneous Widths/Points Test # Complex Test, Many Points/Widths [(3, -1, 5), (-1.5, 0, 3), (1, 3, -2), (2, 2, -4)], ] allWidths = [ [0], # Zero-Volume Extent Test [2], # Simple Width Test [2, 4], # Multiple Width Test [2, 4, 5], # Erroneous Widths/Points Test [1, 2, 2, 1] # Complex Test, Many Points/Widths ] pointBasedSolutions = [ [(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test [(0, 0, 0), (0, 0, 0)], # Simple Width Test [(-1, -1, -1), (1, 1, 1)], # Multiple Width Test # Erroneous Widths/Points Test -> Ok For Point-Based [(-1, -1, -1), (1, 1, 1)], [(-1.5, -1, -4), (3, 3, 5)] # Complex Test, Many Points/Widths ] pointsSolutions = [ [(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test [(-1, -1, -1), (1, 1, 1)], # Simple Width Test [(-2, -2, -2), (3, 3, 3)], # Multiple Width Test # Erroneous Widths/Points Test -> Returns None None, [(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex Test, Many Points/Widths ] # Perform the correctness tests for PointBased and Points # Test for empty points prims emptyPoints = [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to map the contents of extremeExtentArr to floats from # num.float32s due to the way Gf.Vec3f is wrapped out # XXX: This is awful, it'd be nice to not do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets = len(allPoints) for i in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points Test for i in range(numDataSets): pointsData = allPoints[i] widthsData = allWidths[i] expectedExtent = pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is not None and expectedExtent is not None: for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, "/Points") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent is not None and expectedExtent is not None: for a, b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh Test for i in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] # Compute extent via generic UsdGeom.Boundable API. # UsdGeom.Mesh does not have its own compute extent function, so # it should fall back to the extent for PointBased prims. s = Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, "/Mesh") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test UsdGeomCurves curvesPoints = [ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1 width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2 widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with no width ] curvesWidths = [ [1], # Test Curve with 1 width [.5, .1], # Test Curve with 2 widths [] # Test Curve with no width ] curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths (MAX) [(0,0,0), (3,1,1)], # Test Curve with no width ] # Perform the actual v. expected comparison numDataSets = len(curvesPoints) for i in range(numDataSets): pointsData = curvesPoints[i] widths = curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, "/NurbsCurves") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, "/BasisCurves") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self): # Perform Type-Ness Checking for ComputeExtent pointsAsList = [(0, 0, 0), (1, 1, 1), (2, 2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self): from pxr import Gf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere') # set with list of tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with Gf vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr import Tf xform = Tf.Type.FindByName("UsdGeomXform") imageable = Tf.Type.FindByName("UsdGeomImageable") geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr import Tf xform = Tf.Type.FindByName("UsdGeomXform") imageable = Tf.Type.FindByName("UsdGeomImageable") geomModelAPI = Tf.Type.FindByName("UsdGeomModelAPI") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that we get exceptions but don't crash when applying to the # null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr import Usd, Tf s = Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere prim should return true on IsA queries for Sphere # and everything it inherits from. Our plain prim should return false # for all of them. for t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr import Usd, Tf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no APIs have yet been applied for t in types: self.assertFalse(prim.HasAPI(t)) # Apply our schemas to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that all our applied schemas show up for t in types: self.assertTrue(prim.HasAPI(t)) # Check that we get an exception for unknown and non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test with a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI')) if __name__ == "__main__": unittest.main()
[((726, 4, 726, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import sys, unittest\n'), ((32, 12, 32, 39), 'pxr.Sdf.Layer.CreateAnonymous', 'Sdf.Layer.CreateAnonymous', ({}, {}), '()', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((33, 16, 33, 44), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', ({(33, 31, 33, 43): 'l.identifier'}, {}), '(l.identifier)', False, 'from pxr import Usd, Tf\n'), ((38, 15, 38, 30), 'pxr.UsdGeom.Mesh', 'UsdGeom.Mesh', ({(38, 28, 38, 29): 'p'}, {}), '(p)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((95, 12, 95, 39), 'pxr.Sdf.Layer.CreateAnonymous', 'Sdf.Layer.CreateAnonymous', ({}, {}), '()', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((96, 16, 96, 44), 'pxr.Usd.Stage.Open', 'Usd.Stage.Open', ({(96, 31, 96, 43): 'l.identifier'}, {}), '(l.identifier)', False, 'from pxr import Usd, Tf\n'), ((105, 17, 105, 66), 'pxr.UsdGeom.BasisCurves.Define', 'UsdGeom.BasisCurves.Define', ({(105, 44, 105, 49): 'stage', (105, 51, 105, 65): '"""/BasisCurves"""'}, {}), "(stage, '/BasisCurves')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((115, 17, 115, 56), 'pxr.UsdGeom.Camera.Define', 'UsdGeom.Camera.Define', ({(115, 39, 115, 44): 'stage', (115, 46, 115, 55): '"""/Camera"""'}, {}), "(stage, '/Camera')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((125, 17, 125, 58), 'pxr.UsdGeom.Capsule.Define', 'UsdGeom.Capsule.Define', ({(125, 40, 125, 45): 'stage', (125, 47, 125, 57): '"""/Capsule"""'}, {}), "(stage, '/Capsule')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((135, 17, 135, 52), 'pxr.UsdGeom.Cone.Define', 'UsdGeom.Cone.Define', ({(135, 37, 135, 42): 'stage', (135, 44, 135, 51): '"""/Cone"""'}, {}), "(stage, '/Cone')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((145, 17, 145, 52), 'pxr.UsdGeom.Cube.Define', 'UsdGeom.Cube.Define', ({(145, 37, 145, 42): 'stage', (145, 44, 145, 51): '"""/Cube"""'}, {}), "(stage, '/Cube')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((155, 17, 155, 60), 'pxr.UsdGeom.Cylinder.Define', 'UsdGeom.Cylinder.Define', ({(155, 41, 155, 46): 'stage', (155, 48, 155, 59): '"""/Cylinder"""'}, {}), "(stage, '/Cylinder')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((165, 17, 165, 52), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', ({(165, 37, 165, 42): 'stage', (165, 44, 165, 51): '"""/Mesh"""'}, {}), "(stage, '/Mesh')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((175, 17, 175, 66), 'pxr.UsdGeom.NurbsCurves.Define', 'UsdGeom.NurbsCurves.Define', ({(175, 44, 175, 49): 'stage', (175, 51, 175, 65): '"""/NurbsCurves"""'}, {}), "(stage, '/NurbsCurves')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((185, 17, 185, 64), 'pxr.UsdGeom.NurbsPatch.Define', 'UsdGeom.NurbsPatch.Define', ({(185, 43, 185, 48): 'stage', (185, 50, 185, 63): '"""/NurbsPatch"""'}, {}), "(stage, '/NurbsPatch')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((195, 17, 195, 56), 'pxr.UsdGeom.Points.Define', 'UsdGeom.Points.Define', ({(195, 39, 195, 44): 'stage', (195, 46, 195, 55): '"""/Points"""'}, {}), "(stage, '/Points')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((205, 17, 205, 54), 'pxr.UsdGeom.Scope.Define', 'UsdGeom.Scope.Define', ({(205, 38, 205, 43): 'stage', (205, 45, 205, 53): '"""/Scope"""'}, {}), "(stage, '/Scope')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((215, 17, 215, 56), 'pxr.UsdGeom.Sphere.Define', 'UsdGeom.Sphere.Define', ({(215, 39, 215, 44): 'stage', (215, 46, 215, 55): '"""/Sphere"""'}, {}), "(stage, '/Sphere')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((225, 17, 225, 54), 'pxr.UsdGeom.Xform.Define', 'UsdGeom.Xform.Define', ({(225, 38, 225, 43): 'stage', (225, 45, 225, 53): '"""/Xform"""'}, {}), "(stage, '/Xform')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((237, 16, 237, 42), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((241, 19, 241, 33), 'pxr.Gf.Matrix4d', 'Gf.Matrix4d', ({(241, 31, 241, 32): '1'}, {}), '(1)', False, 'from pxr import Gf\n'), ((242, 17, 242, 34), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(242, 26, 242, 27): '0', (242, 29, 242, 30): '0', (242, 32, 242, 33): '0'}, {}), '(0, 0, 0)', False, 'from pxr import Gf\n'), ((244, 16, 244, 53), 'pxr.UsdGeom.Xform.Define', 'UsdGeom.Xform.Define', ({(244, 37, 244, 42): 'stage', (244, 44, 244, 52): '"""/Xform"""'}, {}), "(stage, '/Xform')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((266, 15, 266, 50), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', ({(266, 35, 266, 40): 'stage', (266, 42, 266, 49): '"""/Mesh"""'}, {}), "(stage, '/Mesh')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((269, 17, 269, 61), 'pxr.UsdGeom.BasisCurves.Define', 'UsdGeom.BasisCurves.Define', ({(269, 44, 269, 49): 'stage', (269, 51, 269, 60): '"""/Curves"""'}, {}), "(stage, '/Curves')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((289, 16, 289, 55), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', ({(289, 36, 289, 41): 'stage', (289, 43, 289, 54): '"""/overMesh"""'}, {}), "(stage, '/overMesh')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((293, 17, 293, 56), 'pxr.UsdGeom.Sphere.Define', 'UsdGeom.Sphere.Define', ({(293, 39, 293, 44): 'stage', (293, 46, 293, 55): '"""/Sphere"""'}, {}), "(stage, '/Sphere')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((297, 22, 297, 48), 'pxr.Usd.AttributeQuery', 'Usd.AttributeQuery', ({(297, 41, 297, 47): 'radius'}, {}), '(radius)', False, 'from pxr import Usd, Tf\n'), ((301, 12, 301, 38), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((305, 16, 305, 59), 'pxr.UsdGeom.Scope.Define', 'UsdGeom.Scope.Define', ({(305, 37, 305, 38): 's', (305, 40, 305, 58): '"""/parent/subscope"""'}, {}), "(s, '/parent/subscope')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((312, 15, 312, 57), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', ({(312, 35, 312, 36): 's', (312, 38, 312, 56): '"""/parent/subscope"""'}, {}), "(s, '/parent/subscope')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((316, 15, 316, 53), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', ({(316, 35, 316, 36): 's', (316, 38, 316, 52): '"""/parent/mesh"""'}, {}), "(s, '/parent/mesh')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((320, 12, 320, 38), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((336, 21, 338, 68), 'pxr.Sdf.AttributeSpec', 'Sdf.AttributeSpec', (), '', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((362, 16, 362, 42), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((364, 17, 364, 56), 'pxr.UsdGeom.Camera.Define', 'UsdGeom.Camera.Define', ({(364, 39, 364, 44): 'stage', (364, 46, 364, 55): '"""/Camera"""'}, {}), "(stage, '/Camera')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((393, 13, 393, 56), 'pxr.Vt.Vec4fArray', 'Vt.Vec4fArray', ({(393, 27, 393, 55): '[(1, 2, 3, 4), (8, 7, 6, 5)]'}, {}), '([(1, 2, 3, 4), (8, 7, 6, 5)])', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((396, 13, 396, 28), 'pxr.Vt.Vec4fArray', 'Vt.Vec4fArray', ({}, {}), '()', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((409, 16, 409, 42), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((413, 17, 413, 56), 'pxr.UsdGeom.Points.Define', 'UsdGeom.Points.Define', ({(413, 39, 413, 44): 'stage', (413, 46, 413, 55): '"""/Points"""'}, {}), "(stage, '/Points')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((427, 12, 427, 38), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((481, 27, 481, 72), 'pxr.UsdGeom.PointBased.ComputeExtent', 'UsdGeom.PointBased.ComputeExtent', ({(481, 60, 481, 71): 'emptyPoints'}, {}), '(emptyPoints)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((602, 27, 602, 54), 'pxr.Vt.Vec3fArray', 'Vt.Vec3fArray', ({(602, 41, 602, 53): 'pointsAsList'}, {}), '(pointsAsList)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((614, 12, 614, 38), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((631, 16, 631, 50), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(631, 35, 631, 49): '"""UsdGeomXform"""'}, {}), "('UsdGeomXform')", False, 'from pxr import Usd, Tf\n'), ((632, 20, 632, 58), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(632, 39, 632, 57): '"""UsdGeomImageable"""'}, {}), "('UsdGeomImageable')", False, 'from pxr import Usd, Tf\n'), ((633, 23, 633, 60), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(633, 42, 633, 59): '"""UsdGeomModelAPI"""'}, {}), "('UsdGeomModelAPI')", False, 'from pxr import Usd, Tf\n'), ((641, 16, 641, 50), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(641, 35, 641, 49): '"""UsdGeomXform"""'}, {}), "('UsdGeomXform')", False, 'from pxr import Usd, Tf\n'), ((642, 20, 642, 58), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(642, 39, 642, 57): '"""UsdGeomImageable"""'}, {}), "('UsdGeomImageable')", False, 'from pxr import Usd, Tf\n'), ((643, 23, 643, 60), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(643, 42, 643, 59): '"""UsdGeomModelAPI"""'}, {}), "('UsdGeomModelAPI')", False, 'from pxr import Usd, Tf\n'), ((650, 12, 650, 58), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({(650, 37, 650, 57): '"""AppliedSchemas.usd"""'}, {}), "('AppliedSchemas.usd')", False, 'from pxr import Usd, Tf\n'), ((655, 8, 655, 37), 'pxr.UsdGeom.MotionAPI.Apply', 'UsdGeom.MotionAPI.Apply', ({(655, 32, 655, 36): 'root'}, {}), '(root)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((657, 8, 657, 37), 'pxr.UsdGeom.MotionAPI.Apply', 'UsdGeom.MotionAPI.Apply', ({(657, 32, 657, 36): 'root'}, {}), '(root)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((661, 8, 661, 36), 'pxr.UsdGeom.ModelAPI.Apply', 'UsdGeom.ModelAPI.Apply', ({(661, 31, 661, 35): 'root'}, {}), '(root)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((673, 12, 673, 38), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((693, 12, 693, 38), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((704, 8, 704, 36), 'pxr.UsdGeom.ModelAPI.Apply', 'UsdGeom.ModelAPI.Apply', ({(704, 31, 704, 35): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((705, 8, 705, 37), 'pxr.UsdGeom.MotionAPI.Apply', 'UsdGeom.MotionAPI.Apply', ({(705, 32, 705, 36): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((386, 62, 386, 82), 'pxr.Gf.Vec2f', 'Gf.Vec2f', ({(386, 71, 386, 72): '(1)', (386, 74, 386, 81): '(1000000)'}, {}), '(1, 1000000)', False, 'from pxr import Gf\n'), ((387, 42, 387, 57), 'pxr.Gf.Vec2f', 'Gf.Vec2f', ({(387, 51, 387, 52): '(5)', (387, 54, 387, 56): '(10)'}, {}), '(5, 10)', False, 'from pxr import Gf\n'), ((391, 63, 391, 78), 'pxr.Vt.Vec4fArray', 'Vt.Vec4fArray', ({}, {}), '()', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((429, 21, 429, 54), 'pxr.UsdGeom.Sphere._GetStaticTfType', 'UsdGeom.Sphere._GetStaticTfType', ({}, {}), '()', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((432, 24, 432, 46), 'pxr.UsdGeom.Sphere', 'UsdGeom.Sphere', ({(432, 39, 432, 45): 'sphere'}, {}), '(sphere)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((434, 25, 434, 54), 'pxr.UsdGeom.Sphere', 'UsdGeom.Sphere', ({(434, 40, 434, 53): 'usdGeomSphere'}, {}), '(usdGeomSphere)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((496, 27, 496, 71), 'pxr.UsdGeom.PointBased.ComputeExtent', 'UsdGeom.PointBased.ComputeExtent', ({(496, 60, 496, 70): 'pointsData'}, {}), '(pointsData)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((507, 27, 507, 79), 'pxr.UsdGeom.Points.ComputeExtent', 'UsdGeom.Points.ComputeExtent', ({(507, 56, 507, 66): 'pointsData', (507, 68, 507, 78): 'widthsData'}, {}), '(pointsData, widthsData)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((513, 16, 513, 42), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((514, 25, 514, 60), 'pxr.UsdGeom.Points.Define', 'UsdGeom.Points.Define', ({(514, 47, 514, 48): 's', (514, 50, 514, 59): '"""/Points"""'}, {}), "(s, '/Points')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((534, 16, 534, 42), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((535, 23, 535, 54), 'pxr.UsdGeom.Mesh.Define', 'UsdGeom.Mesh.Define', ({(535, 43, 535, 44): 's', (535, 46, 535, 53): '"""/Mesh"""'}, {}), "(s, '/Mesh')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((572, 27, 572, 75), 'pxr.UsdGeom.Curves.ComputeExtent', 'UsdGeom.Curves.ComputeExtent', ({(572, 56, 572, 66): 'pointsData', (572, 68, 572, 74): 'widths'}, {}), '(pointsData, widths)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((578, 16, 578, 42), 'pxr.Usd.Stage.CreateInMemory', 'Usd.Stage.CreateInMemory', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((579, 30, 579, 75), 'pxr.UsdGeom.NurbsCurves.Define', 'UsdGeom.NurbsCurves.Define', ({(579, 57, 579, 58): 's', (579, 60, 579, 74): '"""/NurbsCurves"""'}, {}), "(s, '/NurbsCurves')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((589, 30, 589, 75), 'pxr.UsdGeom.BasisCurves.Define', 'UsdGeom.BasisCurves.Define', ({(589, 57, 589, 58): 's', (589, 60, 589, 74): '"""/BasisCurves"""'}, {}), "(s, '/BasisCurves')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((620, 69, 620, 84), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(620, 78, 620, 79): '(1)', (620, 80, 620, 81): '(2)', (620, 82, 620, 83): '(2)'}, {}), '(1, 2, 2)', False, 'from pxr import Gf\n'), ((621, 69, 621, 85), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(621, 78, 621, 80): '(12)', (621, 81, 621, 82): '(3)', (621, 83, 621, 84): '(3)'}, {}), '(12, 3, 3)', False, 'from pxr import Gf\n'), ((624, 15, 624, 30), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(624, 24, 624, 25): '(1)', (624, 26, 624, 27): '(2)', (624, 28, 624, 29): '(2)'}, {}), '(1, 2, 2)', False, 'from pxr import Gf\n'), ((624, 32, 624, 47), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(624, 41, 624, 42): '(1)', (624, 43, 624, 44): '(1)', (624, 45, 624, 46): '(1)'}, {}), '(1, 1, 1)', False, 'from pxr import Gf\n'), ((626, 69, 626, 84), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(626, 78, 626, 79): '(1)', (626, 80, 626, 81): '(2)', (626, 82, 626, 83): '(2)'}, {}), '(1, 2, 2)', False, 'from pxr import Gf\n'), ((627, 69, 627, 84), 'pxr.Gf.Vec3f', 'Gf.Vec3f', ({(627, 78, 627, 79): '(1)', (627, 80, 627, 81): '(1)', (627, 82, 627, 83): '(1)'}, {}), '(1, 1, 1)', False, 'from pxr import Gf\n'), ((635, 24, 635, 57), 'pxr.Usd.SchemaRegistry.IsTyped', 'Usd.SchemaRegistry.IsTyped', ({(635, 51, 635, 56): 'xform'}, {}), '(xform)', False, 'from pxr import Usd, Tf\n'), ((636, 24, 636, 61), 'pxr.Usd.SchemaRegistry.IsTyped', 'Usd.SchemaRegistry.IsTyped', ({(636, 51, 636, 60): 'imageable'}, {}), '(imageable)', False, 'from pxr import Usd, Tf\n'), ((637, 25, 637, 65), 'pxr.Usd.SchemaRegistry.IsTyped', 'Usd.SchemaRegistry.IsTyped', ({(637, 52, 637, 64): 'geomModelAPI'}, {}), '(geomModelAPI)', False, 'from pxr import Usd, Tf\n'), ((677, 17, 677, 52), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(677, 36, 677, 51): '"""UsdGeomSphere"""'}, {}), "('UsdGeomSphere')", False, 'from pxr import Usd, Tf\n'), ((678, 17, 678, 51), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(678, 36, 678, 50): '"""UsdGeomGprim"""'}, {}), "('UsdGeomGprim')", False, 'from pxr import Usd, Tf\n'), ((679, 17, 679, 55), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(679, 36, 679, 54): '"""UsdGeomBoundable"""'}, {}), "('UsdGeomBoundable')", False, 'from pxr import Usd, Tf\n'), ((680, 17, 680, 55), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(680, 36, 680, 54): '"""UsdGeomXformable"""'}, {}), "('UsdGeomXformable')", False, 'from pxr import Usd, Tf\n'), ((681, 17, 681, 55), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(681, 36, 681, 54): '"""UsdGeomImageable"""'}, {}), "('UsdGeomImageable')", False, 'from pxr import Usd, Tf\n'), ((682, 17, 682, 47), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(682, 36, 682, 46): '"""UsdTyped"""'}, {}), "('UsdTyped')", False, 'from pxr import Usd, Tf\n'), ((696, 17, 696, 55), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(696, 36, 696, 54): '"""UsdGeomMotionAPI"""'}, {}), "('UsdGeomMotionAPI')", False, 'from pxr import Usd, Tf\n'), ((697, 17, 697, 54), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(697, 36, 697, 53): '"""UsdGeomModelAPI"""'}, {}), "('UsdGeomModelAPI')", False, 'from pxr import Usd, Tf\n'), ((321, 21, 321, 56), 'pxr.UsdGeom.Sphere.Define', 'UsdGeom.Sphere.Define', ({(321, 43, 321, 44): 's', (321, 46, 321, 55): '"""/sphere"""'}, {}), "(s, '/sphere')", False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((389, 35, 389, 50), 'pxr.Gf.Vec2f', 'Gf.Vec2f', ({(389, 44, 389, 45): '(5)', (389, 47, 389, 49): '(10)'}, {}), '(5, 10)', False, 'from pxr import Gf\n'), ((519, 28, 519, 50), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((539, 26, 539, 48), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((584, 33, 584, 55), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((594, 33, 594, 55), 'pxr.Usd.TimeCode.Default', 'Usd.TimeCode.Default', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((609, 28, 609, 50), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(609, 39, 609, 40): 'a', (609, 42, 609, 43): 'b', (609, 45, 609, 49): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((716, 24, 716, 58), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(716, 43, 716, 57): '"""UsdGeomXform"""'}, {}), "('UsdGeomXform')", False, 'from pxr import Usd, Tf\n'), ((719, 24, 719, 62), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(719, 43, 719, 61): '"""UsdGeomImageable"""'}, {}), "('UsdGeomImageable')", False, 'from pxr import Usd, Tf\n'), ((723, 24, 723, 57), 'pxr.Tf.Type.FindByName', 'Tf.Type.FindByName', ({(723, 43, 723, 56): '"""UsdModelAPI"""'}, {}), "('UsdModelAPI')", False, 'from pxr import Usd, Tf\n'), ((43, 12, 43, 32), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((499, 32, 499, 54), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(499, 43, 499, 44): 'a', (499, 46, 499, 47): 'b', (499, 49, 499, 53): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((542, 32, 542, 54), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(542, 43, 542, 44): 'a', (542, 46, 542, 47): 'b', (542, 49, 542, 53): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((575, 32, 575, 54), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(575, 43, 575, 44): 'a', (575, 46, 575, 47): 'b', (575, 49, 575, 53): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((587, 32, 587, 54), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(587, 43, 587, 44): 'a', (587, 46, 587, 47): 'b', (587, 49, 587, 53): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((597, 32, 597, 54), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(597, 43, 597, 44): 'a', (597, 46, 597, 47): 'b', (597, 49, 597, 53): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((619, 24, 619, 46), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', ({(619, 41, 619, 45): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((625, 24, 625, 46), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', ({(625, 41, 625, 45): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((645, 24, 645, 44), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((646, 25, 646, 45), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((647, 25, 647, 45), 'pxr.Usd.SchemaRegistry', 'Usd.SchemaRegistry', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((667, 53, 667, 63), 'pxr.Usd.Prim', 'Usd.Prim', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((669, 52, 669, 62), 'pxr.Usd.Prim', 'Usd.Prim', ({}, {}), '()', False, 'from pxr import Usd, Tf\n'), ((510, 36, 510, 58), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(510, 47, 510, 48): 'a', (510, 50, 510, 51): 'b', (510, 53, 510, 57): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((523, 36, 523, 58), 'pxr.Gf.IsClose', 'Gf.IsClose', ({(523, 47, 523, 48): 'a', (523, 50, 523, 51): 'b', (523, 53, 523, 57): '(1e-05)'}, {}), '(a, b, 1e-05)', False, 'from pxr import Gf\n'), ((620, 25, 620, 47), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', ({(620, 42, 620, 46): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((621, 25, 621, 47), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', ({(621, 42, 621, 46): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((626, 25, 626, 47), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', ({(626, 42, 626, 46): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n'), ((627, 25, 627, 47), 'pxr.UsdGeom.ModelAPI', 'UsdGeom.ModelAPI', ({(627, 42, 627, 46): 'prim'}, {}), '(prim)', False, 'from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf\n')]
avadavat/round_robin_generator
round_robin_generator/matchup_times.py
242d522386f6af26db029232fcffb51004ff4c59
import pandas as pd from datetime import timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger == 0: for round_num in range(time_df.shape[0]): round_key = 'Round ' + str(round_num + 1) match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return time_df else: """ # Given the algorithm, at worst every player can play every (game duration + stagger time) # This is b/c your opponent begins play one stagger count after you at the latest. """ for round_num in range(time_df.shape[0]): round_key = 'Round ' + str(round_num + 1) default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in range(time_df.shape[1])] match_times = [ (def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for def_time in default_spread] time_df.loc[round_key, :] = match_times return time_df
[((6, 14, 6, 78), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((10, 49, 10, 95), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((20, 54, 20, 96), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((23, 28, 23, 91), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n')]
seisatsu/DennisMUD-ESP32
src/commands/locate_item.py
b63d4b914c5e8d0f9714042997c64919b20be842
####################### # Dennis MUD # # locate_item.py # # Copyright 2018-2020 # # Michael D. Reiley # ####################### # ********** # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # ********** NAME = "locate item" CATEGORIES = ["items"] ALIASES = ["find item"] USAGE = "locate item <item_id>" DESCRIPTION = """Find out what room the item <item_id> is in, or who is holding it. You can only locate an item that you own. Wizards can locate any item. Ex. `locate item 4`""" def COMMAND(console, args): # Perform initial checks. if not COMMON.check(NAME, console, args, argc=1): return False # Perform argument type checks and casts. itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if itemid is None: return False # Check if the item exists. thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if not thisitem: return False # Keep track of whether we found anything in case the item is duplified and we can't return right away. found_something = False # Check if we are holding the item. if itemid in console.user["inventory"]: console.msg("{0}: {1} ({2}) is in your inventory.".format(NAME, thisitem["name"], thisitem["id"])) # If the item is duplified we need to keep looking for other copies. if not thisitem["duplified"]: return True found_something = True # Check if someone else is holding the item. for targetuser in console.database.users.all(): if targetuser["name"] == console.user["name"]: continue if itemid in targetuser["inventory"]: console.msg("{0}: {1} ({2}) is in the inventory of: {3}.".format(NAME, thisitem["name"], thisitem["id"], targetuser["name"])) # If the item is duplified we need to keep looking for other copies. if not thisitem["duplified"]: return True found_something = True # Check if the item is in a room. for targetroom in console.database.rooms.all(): if itemid in targetroom["items"]: console.msg("{0}: {1} ({2}) is in room: {3} ({4})".format(NAME, thisitem["name"], thisitem["id"], targetroom["name"], targetroom["id"])) # If the item is duplified we need to keep looking for other copies. if not thisitem["duplified"]: return True found_something = True # Couldn't find the item. if not found_something: console.log.error("Item exists but has no location: {item}", item=itemid) console.msg("{0}: ERROR: Item exists but has no location. Use `requisition` to fix this.".format(NAME)) return False # Finished. return True
[]
bcgov-c/wally
modelling/scsb/models/monthly-comparisons.py
264bc5d40f9b5cf293159f1bc0424cfd9ff8aa06
import json import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data = json.load(f) all_zones_df = pd.read_csv("../data/scsb_all_zones.csv") zone_25_df = pd.read_csv("../data/scsb_zone_25.csv") zone_26_df = pd.read_csv("../data/scsb_zone_26.csv") zone_27_df = pd.read_csv("../data/scsb_zone_27.csv") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in month_dependant_variables] data = zone_26_df xgb_results = [] rfr_results = [] dtr_results = [] # calculate monthly estimations for 3 models for dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of scsb against the 3 models for row_target_index in range(20): xgb_row = [] rfr_row = [] dtr_row = [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index] plt.title(name) plt.savefig('../plots/{}.png'.format(name)) plt.show()
[((16, 15, 16, 56), 'pandas.read_csv', 'pd.read_csv', ({(16, 27, 16, 55): '"""../data/scsb_all_zones.csv"""'}, {}), "('../data/scsb_all_zones.csv')", True, 'import pandas as pd\n'), ((17, 13, 17, 52), 'pandas.read_csv', 'pd.read_csv', ({(17, 25, 17, 51): '"""../data/scsb_zone_25.csv"""'}, {}), "('../data/scsb_zone_25.csv')", True, 'import pandas as pd\n'), ((18, 13, 18, 52), 'pandas.read_csv', 'pd.read_csv', ({(18, 25, 18, 51): '"""../data/scsb_zone_26.csv"""'}, {}), "('../data/scsb_zone_26.csv')", True, 'import pandas as pd\n'), ((19, 13, 19, 52), 'pandas.read_csv', 'pd.read_csv', ({(19, 25, 19, 51): '"""../data/scsb_zone_27.csv"""'}, {}), "('../data/scsb_zone_27.csv')", True, 'import pandas as pd\n'), ((35, 39, 35, 94), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((37, 10, 37, 39), 'xgboost.XGBRegressor', 'XGBRegressor', (), '', False, 'from xgboost import XGBRegressor\n'), ((41, 10, 41, 48), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', (), '', False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((45, 10, 45, 48), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', (), '', False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((60, 4, 60, 112), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((61, 4, 61, 67), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((62, 4, 62, 74), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((63, 4, 63, 75), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((64, 4, 64, 26), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((65, 4, 65, 55), 'matplotlib.pyplot.xticks', 'plt.xticks', ({(65, 15, 65, 40): 'month_dependant_variables', (65, 42, 65, 54): 'month_labels'}, {}), '(month_dependant_variables, month_labels)', True, 'import matplotlib.pyplot as plt\n'), ((66, 4, 66, 23), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(66, 15, 66, 22): '"""Month"""'}, {}), "('Month')", True, 'import matplotlib.pyplot as plt\n'), ((67, 4, 67, 38), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(67, 15, 67, 37): '"""Monthly Distribution"""'}, {}), "('Monthly Distribution')", True, 'import matplotlib.pyplot as plt\n'), ((69, 4, 69, 19), 'matplotlib.pyplot.title', 'plt.title', ({(69, 14, 69, 18): 'name'}, {}), '(name)', True, 'import matplotlib.pyplot as plt\n'), ((71, 4, 71, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n')]
xzhnshng/databricks-zero-to-mlops
src/week2-mlflow/AutoML/XGBoost-fake-news-automl.py
f1691c6f6137ad8b938e64cea4700c7011efb800
# Databricks notebook source # MAGIC %md # MAGIC # XGBoost training # MAGIC This is an auto-generated notebook. To reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun it. # MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments UI, this link isn't very useful.) # MAGIC - Clone this notebook into your project folder by selecting **File > Clone** in the notebook toolbar. # MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import databricks.automl_runtime # Use MLflow to track experiments mlflow.set_experiment("/Users/[email protected]/databricks_automl/label_news_articles_csv-2022_03_12-15_38") target_col = "label" # COMMAND ---------- # MAGIC %md # MAGIC ## Load Data # COMMAND ---------- from mlflow.tracking import MlflowClient import os import uuid import shutil import pandas as pd # Create temp directory to download input data from MLflow input_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], "tmp", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and read it into a pandas DataFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts("c2dfe80b419d4a8dbc88a90e3274369a", "data", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, "training_data")) # Delete the temp data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC %md # MAGIC ### Select supported columns # MAGIC Select only the columns that are supported. This allows us to train a model that can predict on a dataset that has extra columns that are not used in training. # MAGIC `[]` are dropped in the pipelines. See the Alerts tab of the AutoML Experiment page for details on why these columns are dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = ["text_without_stopwords", "published", "language", "main_img_url", "site_url", "hasImage", "title_without_stopwords", "text", "title", "type", "author"] col_selector = ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md # MAGIC ## Preprocessors # COMMAND ---------- transformers = [] # COMMAND ---------- # MAGIC %md # MAGIC ### Categorical columns # COMMAND ---------- # MAGIC %md # MAGIC #### Low-cardinality categoricals # MAGIC Convert each low-cardinality categorical column into multiple binary columns through one-hot encoding. # MAGIC For each input categorical column (string or numeric), the number of output columns is equal to the number of unique values in the input column. # COMMAND ---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown="ignore") transformers.append(("onehot", one_hot_encoder, ["published", "language", "site_url", "hasImage", "title", "title_without_stopwords", "text_without_stopwords"])) # COMMAND ---------- # MAGIC %md # MAGIC #### Medium-cardinality categoricals # MAGIC Convert each medium-cardinality categorical column into a numerical representation. # MAGIC Each string column is hashed to 1024 float columns. # MAGIC Each numeric column is imputed with zeros. # COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline for feature in ["text", "main_img_url"]: hash_transformer = Pipeline(steps=[ ("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")), (f"{feature}_hasher", FeatureHasher(n_features=1024, input_type="string"))]) transformers.append((f"{feature}_hasher", hash_transformer, [feature])) # COMMAND ---------- # MAGIC %md # MAGIC ### Text features # MAGIC Convert each feature to a fixed-length vector using TF-IDF vectorization. The length of the output # MAGIC vector is equal to 1024. Each column corresponds to one of the top word n-grams # MAGIC where n is in the range [1, 2]. # COMMAND ---------- import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer for col in {'type', 'author'}: vectorizer = Pipeline(steps=[ ("imputer", SimpleImputer(missing_values=None, strategy="constant", fill_value="")), # Reshape to 1D since SimpleImputer changes the shape of the input to 2D ("reshape", FunctionTransformer(np.reshape, kw_args={"newshape":-1})), ("tfidf", TfidfVectorizer(decode_error="ignore", ngram_range = (1, 2), max_features=1024))]) transformers.append((f"text_{col}", vectorizer, [col])) # COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder="passthrough", sparse_threshold=0) # COMMAND ---------- # MAGIC %md # MAGIC ### Feature standardization # MAGIC Scale all feature columns to be centered around zero with unit variance. # COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer = StandardScaler() # COMMAND ---------- # MAGIC %md # MAGIC ## Train - Validation - Test Split # MAGIC Split the input data into 3 sets: # MAGIC - Train (60% of the dataset used to train the model) # MAGIC - Validation (20% of the dataset used to tune the hyperparameters of the model) # MAGIC - Test (20% of the dataset used to report the true performance of the model on an unseen dataset) # COMMAND ---------- df_loaded.columns # COMMAND ---------- from sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] # Split out train data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data equally for validation and test X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC %md # MAGIC ## Train classification model # MAGIC - Log relevant metrics to MLflow to track runs # MAGIC - All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model parameters and re-run the training cell to log a different trial to the MLflow experiment # MAGIC - To view the full list of tunable hyperparameters, check the output of the cell below # COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow import sklearn from sklearn import set_config from sklearn.pipeline import Pipeline set_config(display="diagram") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([ ("column_selector", col_selector), ("preprocessor", preprocessor), ("standardizer", standardizer), ("classifier", xgbc_classifier), ]) # Create a separate pipeline to transform the validation dataset. This is used for early stopping. pipeline = Pipeline([ ("column_selector", col_selector), ("preprocessor", preprocessor), ("standardizer", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- # Enable automatic logging of input samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name="xgboost") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged by MLflow autologging # Log metrics for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix="val_") # Log metrics for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix="test_") # Display the logged metrics xgbc_val_metrics = {k.replace("val_", ""): v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace("test_", ""): v for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=["validation", "test"])) # COMMAND ---------- # Patch requisite packages to the model environment YAML for model serving import os import shutil import uuid import yaml None import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ["SPARK_LOCAL_DIRS"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, "model/conda.yaml", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str["dependencies"][-1]["pip"].append(f"xgboost=={xgboost.__version__}") with open(xgbc_model_env_path, "w") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path="model") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md # MAGIC ## Feature importance # MAGIC # MAGIC SHAP is a game-theoretic approach to explain machine learning models, providing a summary plot # MAGIC of the relationship between features and model output. Features are ranked in descending order of # MAGIC importance, and impact/color describe the correlation between the feature and the target variable. # MAGIC - Generating SHAP feature importance is a very memory intensive operation, so to ensure that AutoML can run trials without # MAGIC running out of memory, we disable SHAP by default.<br /> # MAGIC You can set the flag defined below to `shap_enabled = True` and re-run this notebook to see the SHAP plots. # MAGIC - To reduce the computational overhead of each trial, a single example is sampled from the validation set to explain.<br /> # MAGIC For more thorough results, increase the sample size of explanations, or provide your own examples to explain. # MAGIC - SHAP cannot explain models using data with nulls; if your dataset has any, both the background data and # MAGIC examples to explain will be imputed using the mode (most frequent values). This affects the computed # MAGIC SHAP values, as the imputed samples may not match the actual data distribution. # MAGIC # MAGIC For more information on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this flag to True and re-run the notebook to see the SHAP plots shap_enabled = True # COMMAND ---------- if shap_enabled: from shap import KernelExplainer, summary_plot # SHAP cannot explain models using data with nulls. # To enable SHAP to succeed, both the background data and examples to explain are imputed with the mode (most frequent values). mode = X_train.mode().iloc[0] # Sample background data for SHAP Explainer. Increase the sample size to reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single example from the validation set to explain. Increase the sample size and rerun for more thorough results. example = X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to explain feature importance on the example from the validation set. predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link="logit") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- # MAGIC %md # MAGIC ## Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML models, work together from experimentation to online testing and production, integrate with approval and governance workflows, and monitor ML deployments and their performance. The snippets below show how to add the model trained in this notebook to the model registry and to retrieve it later for inference. # MAGIC # MAGIC > **NOTE:** The `model_uri` for the model already trained in this notebook can be found in the cell below # MAGIC # MAGIC ### Register to Model Registry # MAGIC ``` # MAGIC model_name = "Example" # MAGIC # MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC # MAGIC ### Load from Model Registry # MAGIC ``` # MAGIC model_name = "Example" # MAGIC model_version = registered_model_version.version # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f"models:/{model_name}/{model_version}") # MAGIC model.predict(input_X) # MAGIC ``` # MAGIC # MAGIC ### Load model without registering # MAGIC ``` # MAGIC model_uri = f"runs:/{ mlflow_run.info.run_id }/model" # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ``` # COMMAND ---------- # model_uri for the generated model print(f"runs:/{ mlflow_run.info.run_id }/model") # COMMAND ---------- # MAGIC %md # MAGIC ### Loading model to make prediction # COMMAND ---------- model_uri = f"runs:/51c0348482e042ea8e4b7983ab6bff99/model" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import pandas as pd data = {'author': {0: 'bigjim.com'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming to invade earth'}, 'text': {0: 'aliens are coming to invade earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are coming to invade earth'}, 'text_without_stopwords': {0: 'aliens are coming to invade earth'}, 'hasImage': {0: 1.0}} df = pd.DataFrame(data=data) df.head() # COMMAND ---------- model.predict(df) # COMMAND ----------
[((17, 0, 17, 110), 'mlflow.set_experiment', 'mlflow.set_experiment', ({(17, 22, 17, 109): '"""/Users/[email protected]/databricks_automl/label_news_articles_csv-2022_03_12-15_38"""'}, {}), "(\n '/Users/[email protected]/databricks_automl/label_news_articles_csv-2022_03_12-15_38'\n )", False, 'import mlflow\n'), ((36, 0, 36, 27), 'os.makedirs', 'os.makedirs', ({(36, 12, 36, 26): 'input_temp_dir'}, {}), '(input_temp_dir)', False, 'import os\n'), ((40, 15, 40, 29), 'mlflow.tracking.MlflowClient', 'MlflowClient', ({}, {}), '()', False, 'from mlflow.tracking import MlflowClient\n'), ((45, 0, 45, 29), 'shutil.rmtree', 'shutil.rmtree', ({(45, 14, 45, 28): 'input_temp_dir'}, {}), '(input_temp_dir)', False, 'import shutil\n'), ((65, 15, 65, 45), 'databricks.automl_runtime.sklearn.column_selector.ColumnSelector', 'ColumnSelector', ({(65, 30, 65, 44): 'supported_cols'}, {}), '(supported_cols)', False, 'from databricks.automl_runtime.sklearn.column_selector import ColumnSelector\n'), ((93, 18, 93, 56), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', (), '', False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((146, 15, 146, 91), 'sklearn.compose.ColumnTransformer', 'ColumnTransformer', (), '', False, 'from sklearn.compose import ColumnTransformer\n'), ((158, 15, 158, 31), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import StandardScaler\n'), ((181, 45, 181, 137), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((184, 31, 184, 134), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((208, 0, 208, 29), 'sklearn.set_config', 'set_config', (), '', False, 'from sklearn import set_config\n'), ((210, 18, 220, 1), 'xgboost.XGBClassifier', 'XGBClassifier', (), '', False, 'from xgboost import XGBClassifier\n'), ((222, 8, 227, 2), 'sklearn.pipeline.Pipeline', 'Pipeline', ({(222, 17, 227, 1): "[('column_selector', col_selector), ('preprocessor', preprocessor), (\n 'standardizer', standardizer), ('classifier', xgbc_classifier)]"}, {}), "([('column_selector', col_selector), ('preprocessor', preprocessor),\n ('standardizer', standardizer), ('classifier', xgbc_classifier)])", False, 'from sklearn.pipeline import Pipeline\n'), ((230, 11, 234, 2), 'sklearn.pipeline.Pipeline', 'Pipeline', ({(230, 20, 234, 1): "[('column_selector', col_selector), ('preprocessor', preprocessor), (\n 'standardizer', standardizer)]"}, {}), "([('column_selector', col_selector), ('preprocessor', preprocessor),\n ('standardizer', standardizer)])", False, 'from sklearn.pipeline import Pipeline\n'), ((236, 0, 236, 36), 'mlflow.sklearn.autolog', 'mlflow.sklearn.autolog', (), '', False, 'import mlflow\n'), ((244, 0, 244, 60), 'mlflow.sklearn.autolog', 'mlflow.sklearn.autolog', (), '', False, 'import mlflow\n'), ((275, 0, 275, 26), 'os.makedirs', 'os.makedirs', ({(275, 12, 275, 25): 'xgbc_temp_dir'}, {}), '(xgbc_temp_dir)', False, 'import os\n'), ((276, 14, 276, 28), 'mlflow.tracking.MlflowClient', 'MlflowClient', ({}, {}), '()', False, 'from mlflow.tracking import MlflowClient\n'), ((279, 28, 279, 81), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n'), ((286, 0, 286, 28), 'shutil.rmtree', 'shutil.rmtree', ({(286, 14, 286, 27): 'xgbc_temp_dir'}, {}), '(xgbc_temp_dir)', False, 'import shutil\n'), ((379, 8, 379, 43), 'mlflow.pyfunc.load_model', 'mlflow.pyfunc.load_model', ({(379, 33, 379, 42): 'model_uri'}, {}), '(model_uri)', False, 'import mlflow\n'), ((396, 5, 396, 28), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((43, 28, 43, 74), 'os.path.join', 'os.path.join', ({(43, 41, 43, 56): 'input_data_path', (43, 58, 43, 73): '"""training_data"""'}, {}), "(input_data_path, 'training_data')", False, 'import os\n'), ((246, 5, 246, 41), 'mlflow.start_run', 'mlflow.start_run', (), '', False, 'import mlflow\n'), ((251, 23, 251, 94), 'mlflow.sklearn.eval_and_log_metrics', 'mlflow.sklearn.eval_and_log_metrics', (), '', False, 'import mlflow\n'), ((254, 24, 254, 98), 'mlflow.sklearn.eval_and_log_metrics', 'mlflow.sklearn.eval_and_log_metrics', (), '', False, 'import mlflow\n'), ((328, 16, 328, 68), 'shap.KernelExplainer', 'KernelExplainer', (), '', False, 'from shap import KernelExplainer, summary_plot\n'), ((330, 4, 330, 66), 'shap.summary_plot', 'summary_plot', (), '', False, 'from shap import KernelExplainer, summary_plot\n'), ((259, 12, 259, 93), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((284, 10, 284, 46), 'yaml.dump', 'yaml.dump', ({(284, 20, 284, 45): 'xgbc_parsed_model_env_str'}, {}), '(xgbc_parsed_model_env_str)', False, 'import yaml\n'), ((35, 73, 35, 85), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((274, 65, 274, 77), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((327, 44, 327, 84), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((113, 20, 113, 90), 'sklearn.impute.SimpleImputer', 'SimpleImputer', (), '', False, 'from sklearn.impute import SimpleImputer\n'), ((114, 30, 114, 81), 'sklearn.feature_extraction.FeatureHasher', 'FeatureHasher', (), '', False, 'from sklearn.feature_extraction import FeatureHasher\n'), ((135, 20, 135, 90), 'sklearn.impute.SimpleImputer', 'SimpleImputer', (), '', False, 'from sklearn.impute import SimpleImputer\n'), ((137, 20, 137, 76), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (), '', False, 'from sklearn.preprocessing import FunctionTransformer\n'), ((138, 18, 138, 97), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', (), '', False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n')]
mfinzi/lucky-guess-chemist
lucky_guess/__init__.py
01898b733dc7d026f70d0cb6337309cb600502fb
import importlib import pkgutil __all__ = [] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for k in module.__all__}) __all__ += module.__all__ except AttributeError: continue
[((5, 36, 5, 67), 'pkgutil.walk_packages', 'pkgutil.walk_packages', ({(5, 58, 5, 66): '__path__'}, {}), '(__path__)', False, 'import pkgutil\n'), ((6, 13, 6, 70), 'importlib.import_module', 'importlib.import_module', (), '', False, 'import importlib\n')]
BaptisteLafoux/aztec_tiling
shuffling_algorithm.py
413acd8751b8178942e91fbee32987f02bc5c695
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Dec 30 22:04:48 2020 @author: baptistelafoux """ import domino import numpy as np import numpy.lib.arraysetops as aso def spawn_block(x, y): if np.random.rand() > 0.5: d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y + 1]), np.array([x + 1, y + 1]), np.array([0, 1])) else: d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0])) d2 = domino.domino(np.array([x + 1, y]), np.array([x + 1, y + 1]), np.array([ 1,0])) return [d1, d2] def aztec_grid(order, only_new_blocks = True): grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2 * order - 1)/2 , np.arange(2 * order) - (2 * order - 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y = center_pts[:,1] if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) > order - 1) else: idx = np.abs(X) + np.abs(Y) <= order return X[idx], Y[idx] def add_to_grid(tiles, grid): for tile in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] = tile return grid def generate_good_block(grid): center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X = center_pts[:, 0] Y = center_pts[:, 1] for (x,y) in zip(X,Y): try: if ~grid[x, y]: idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid) except: pass except: pass return grid def enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts) if order > 1: for x, y in list(diff_array): grid[x, y] = False else: for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False return grid def enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order, True) for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False return grid def move_tiles(grid, curr_order): temp_grid = {} for coord in grid: if grid[coord] != False: x1, y1 = grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False grid[x2, y2] = False for coord in temp_grid: grid[coord] = temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X = center_pts[:, 0] Y = center_pts[:, 1] for (x,y) in zip(X,Y): try: next_x, next_y = np.array([x, y]) + grid[x, y].v if (grid[next_x, next_y] != False): if all(grid[next_x, next_y].v == - grid[x, y].v): grid[x, y ] = False grid[next_x, next_y] = False except: pass return grid
[((48, 17, 48, 34), 'numpy.array', 'np.array', ({(48, 26, 48, 33): '[*grid]'}, {}), '([*grid])', True, 'import numpy as np\n'), ((118, 17, 118, 34), 'numpy.array', 'np.array', ({(118, 26, 118, 33): '[*grid]'}, {}), '([*grid])', True, 'import numpy as np\n'), ((15, 7, 15, 23), 'numpy.random.rand', 'np.random.rand', ({}, {}), '()', True, 'import numpy as np\n'), ((16, 27, 16, 47), 'numpy.array', 'np.array', ({(16, 36, 16, 46): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((16, 49, 16, 73), 'numpy.array', 'np.array', ({(16, 58, 16, 72): '[x + 1, y]'}, {}), '([x + 1, y])', True, 'import numpy as np\n'), ((16, 75, 16, 91), 'numpy.array', 'np.array', ({(16, 84, 16, 90): '[0, -1]'}, {}), '([0, -1])', True, 'import numpy as np\n'), ((17, 27, 17, 47), 'numpy.array', 'np.array', ({(17, 36, 17, 46): '[x, y + 1]'}, {}), '([x, y + 1])', True, 'import numpy as np\n'), ((17, 49, 17, 73), 'numpy.array', 'np.array', ({(17, 58, 17, 72): '[x + 1, y + 1]'}, {}), '([x + 1, y + 1])', True, 'import numpy as np\n'), ((17, 75, 17, 91), 'numpy.array', 'np.array', ({(17, 84, 17, 90): '[0, 1]'}, {}), '([0, 1])', True, 'import numpy as np\n'), ((19, 27, 19, 47), 'numpy.array', 'np.array', ({(19, 36, 19, 46): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((19, 49, 19, 73), 'numpy.array', 'np.array', ({(19, 58, 19, 72): '[x, y + 1]'}, {}), '([x, y + 1])', True, 'import numpy as np\n'), ((19, 75, 19, 91), 'numpy.array', 'np.array', ({(19, 84, 19, 90): '[-1, 0]'}, {}), '([-1, 0])', True, 'import numpy as np\n'), ((20, 27, 20, 47), 'numpy.array', 'np.array', ({(20, 36, 20, 46): '[x + 1, y]'}, {}), '([x + 1, y])', True, 'import numpy as np\n'), ((20, 49, 20, 73), 'numpy.array', 'np.array', ({(20, 58, 20, 72): '[x + 1, y + 1]'}, {}), '([x + 1, y + 1])', True, 'import numpy as np\n'), ((20, 75, 20, 91), 'numpy.array', 'np.array', ({(20, 84, 20, 90): '[1, 0]'}, {}), '([1, 0])', True, 'import numpy as np\n'), ((26, 33, 26, 53), 'numpy.arange', 'np.arange', ({(26, 43, 26, 52): '2 * order'}, {}), '(2 * order)', True, 'import numpy as np\n'), ((26, 76, 26, 96), 'numpy.arange', 'np.arange', ({(26, 86, 26, 95): '2 * order'}, {}), '(2 * order)', True, 'import numpy as np\n'), ((29, 28, 29, 74), 'numpy.lexsort', 'np.lexsort', ({(29, 39, 29, 73): '(center_pts[:, (1)], center_pts[:, (0)])'}, {}), '((center_pts[:, (1)], center_pts[:, (0)]))', True, 'import numpy as np\n'), ((49, 28, 49, 76), 'numpy.lexsort', 'np.lexsort', ({(49, 39, 49, 75): '(center_pts[:, (1)], center_pts[:, (0)])'}, {}), '((center_pts[:, (1)], center_pts[:, (0)]))', True, 'import numpy as np\n'), ((34, 16, 34, 25), 'numpy.abs', 'np.abs', ({(34, 23, 34, 24): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((34, 28, 34, 37), 'numpy.abs', 'np.abs', ({(34, 35, 34, 36): 'Y'}, {}), '(Y)', True, 'import numpy as np\n'), ((125, 29, 125, 45), 'numpy.array', 'np.array', ({(125, 38, 125, 44): '[x, y]'}, {}), '([x, y])', True, 'import numpy as np\n'), ((33, 31, 33, 40), 'numpy.abs', 'np.abs', ({(33, 38, 33, 39): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((33, 43, 33, 52), 'numpy.abs', 'np.abs', ({(33, 50, 33, 51): 'Y'}, {}), '(Y)', True, 'import numpy as np\n'), ((33, 66, 33, 75), 'numpy.abs', 'np.abs', ({(33, 73, 33, 74): 'X'}, {}), '(X)', True, 'import numpy as np\n'), ((33, 78, 33, 87), 'numpy.abs', 'np.abs', ({(33, 85, 33, 86): 'Y'}, {}), '(Y)', True, 'import numpy as np\n')]
h3ct0r/gas_mapping_example
scripts/matrix_operations.py
57bd8333b4832281fbb89019df440374e2b50b9b
import numpy as np def get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return np.sqrt((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return (x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2
[((13, 11, 13, 67), 'numpy.sqrt', 'np.sqrt', ({(13, 19, 13, 66): '((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)'}, {}), '((x - cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)', True, 'import numpy as np\n'), ((5, 28, 5, 48), 'numpy.nanargmin', 'np.nanargmin', ({(5, 41, 5, 47): 'matrix'}, {}), '(matrix)', True, 'import numpy as np\n'), ((9, 28, 9, 48), 'numpy.nanargmax', 'np.nanargmax', ({(9, 41, 9, 47): 'matrix'}, {}), '(matrix)', True, 'import numpy as np\n')]
biljiang/pyprojects
ShanghaiPower/build_up.py
10095c6b8f2f32831e8a36e122d1799f135dc5df
from distutils.core import setup from Cython.Build import cythonize setup(ext_modules = cythonize(["license_chk.py"]))
[((4, 20, 4, 49), 'Cython.Build.cythonize', 'cythonize', ({(4, 30, 4, 48): "['license_chk.py']"}, {}), "(['license_chk.py'])", False, 'from Cython.Build import cythonize\n')]
yamt/neutron
quantum/plugins/nicira/extensions/nvp_qos.py
f94126739a48993efaf1d1439dcd3dadb0c69742
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Aaron Rosen, Nicira Networks, Inc. from abc import abstractmethod from quantum.api import extensions from quantum.api.v2 import attributes as attr from quantum.api.v2 import base from quantum.common import exceptions as qexception from quantum import manager # For policy.json/Auth qos_queue_create = "create_qos_queue" qos_queue_delete = "delete_qos_queue" qos_queue_get = "get_qos_queue" qos_queue_list = "get_qos_queues" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _("Need to be admin in order to create queue called default") class DefaultQueueAlreadyExists(qexception.InUse): message = _("Default queue already exists.") class QueueInvalidDscp(qexception.InvalidInput): message = _("Invalid value for dscp %(data)s must be integer.") class QueueMinGreaterMax(qexception.InvalidInput): message = _("Invalid bandwidth rate, min greater than max.") class QueueInvalidBandwidth(qexception.InvalidInput): message = _("Invalid bandwidth rate, %(data)s must be a non negative" " integer.") class MissingDSCPForTrusted(qexception.InvalidInput): message = _("No DSCP field needed when QoS workload marked trusted") class QueueNotFound(qexception.NotFound): message = _("Queue %(id)s does not exist") class QueueInUseByPort(qexception.InUse): message = _("Unable to delete queue attached to port.") class QueuePortBindingNotFound(qexception.NotFound): message = _("Port is not associated with lqueue") def convert_to_unsigned_int_or_none(val): if val is None: return try: val = int(val) if val < 0: raise ValueError except (ValueError, TypeError): msg = _("'%s' must be a non negative integer.") % val raise qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False}} } class Nvp_qos(object): """Port Queue extension.""" @classmethod def get_name(cls): return "nvp-qos" @classmethod def get_alias(cls): return "nvp-qos" @classmethod def get_description(cls): return "NVP QoS extension." @classmethod def get_namespace(cls): return "http://docs.openstack.org/ext/nvp-qos/api/v2.0" @classmethod def get_updated(cls): return "2012-10-05T10:00:00-00:00" @classmethod def get_resources(cls): """Returns Ext Resources.""" exts = [] plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if version == "2.0": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue): pass @abstractmethod def delete_qos_queue(self, context, id): pass @abstractmethod def get_qos_queue(self, context, id, fields=None): pass @abstractmethod def get_qos_queues(self, context, filters=None, fields=None): pass
[((166, 17, 166, 52), 'quantum.manager.QuantumManager.get_plugin', 'manager.QuantumManager.get_plugin', ({}, {}), '()', False, 'from quantum import manager\n'), ((170, 21, 172, 75), 'quantum.api.v2.base.create_resource', 'base.create_resource', (), '', False, 'from quantum.api.v2 import base\n'), ((174, 13, 175, 53), 'quantum.api.extensions.ResourceExtension', 'extensions.ResourceExtension', ({(174, 42, 174, 57): 'collection_name', (175, 42, 175, 52): 'controller'}, {}), '(collection_name, controller)', False, 'from quantum.api import extensions\n'), ((83, 14, 83, 56), 'quantum.common.exceptions.InvalidInput', 'qexception.InvalidInput', (), '', True, 'from quantum.common import exceptions as qexception\n')]
TrendingTechnology/easyneuron
easyneuron/math/__init__.py
b99822c7206a144a0ab61b3b6b5cddeaca1a3c6a
"""easyneuron.math contains all of the maths tools that you'd ever need for your AI projects, when used alongside Numpy. To suggest more to be added, please add an issue on the GitHub repo. """ from easyneuron.math.distance import euclidean_distance
[]
shane-breeze/AlphaTwirl
tests/unit/concurrently/test_TaskPackageDropbox_put.py
59dbd5348af31d02e133d43fd5bfaad6b99a155e
# Tai Sakuma <[email protected]> import pytest try: import unittest.mock as mock except ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list ##__________________________________________________________________||
[((12, 1, 12, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((16, 1, 16, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((20, 1, 20, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((14, 11, 14, 27), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((18, 11, 18, 27), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((22, 10, 22, 88), 'alphatwirl.concurrently.TaskPackageDropbox', 'TaskPackageDropbox', (), '', False, 'from alphatwirl.concurrently import TaskPackageDropbox\n'), ((33, 10, 33, 88), 'alphatwirl.concurrently.TaskPackageDropbox', 'TaskPackageDropbox', (), '', False, 'from alphatwirl.concurrently import TaskPackageDropbox\n'), ((59, 15, 59, 46), 'mock.MagicMock', 'mock.MagicMock', (), '', False, 'import mock\n'), ((60, 15, 60, 46), 'mock.MagicMock', 'mock.MagicMock', (), '', False, 'import mock\n'), ((73, 15, 73, 46), 'mock.MagicMock', 'mock.MagicMock', (), '', False, 'import mock\n'), ((74, 15, 74, 46), 'mock.MagicMock', 'mock.MagicMock', (), '', False, 'import mock\n'), ((65, 12, 65, 31), 'mock.call', 'mock.call', ({(65, 22, 65, 30): 'package0'}, {}), '(package0)', False, 'import mock\n'), ((65, 33, 65, 52), 'mock.call', 'mock.call', ({(65, 43, 65, 51): 'package1'}, {}), '(package1)', False, 'import mock\n'), ((66, 12, 66, 37), 'mock.call', 'mock.call', ({(66, 22, 66, 33): 'workingarea', (66, 35, 66, 36): '(0)'}, {}), '(workingarea, 0)', False, 'import mock\n'), ((66, 39, 66, 64), 'mock.call', 'mock.call', ({(66, 49, 66, 60): 'workingarea', (66, 62, 66, 63): '(1)'}, {}), '(workingarea, 1)', False, 'import mock\n'), ((78, 12, 78, 31), 'mock.call', 'mock.call', ({(78, 22, 78, 30): 'package0'}, {}), '(package0)', False, 'import mock\n'), ((78, 33, 78, 52), 'mock.call', 'mock.call', ({(78, 43, 78, 51): 'package1'}, {}), '(package1)', False, 'import mock\n'), ((79, 12, 79, 42), 'mock.call', 'mock.call', ({(79, 22, 79, 33): 'workingarea', (79, 35, 79, 41): '[0, 1]'}, {}), '(workingarea, [0, 1])', False, 'import mock\n')]
gokarslan/networking-odl2
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
6a6967832b2c02dfcff6a9f0ab6e36472b849ce8
# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testscenarios from networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports = {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port_id)
[((26, 0, 26, 64), 'oslo_config.cfg.CONF.import_group', 'cfg.CONF.import_group', ({(26, 22, 26, 31): '"""ml2_odl"""', (26, 33, 26, 63): '"""networking_odl.common.config"""'}, {}), "('ml2_odl', 'networking_odl.common.config')", False, 'from oslo_config import cfg\n'), ((33, 8, 33, 69), 'oslo_config.cfg.CONF.set_override', 'cfg.CONF.set_override', ({(33, 30, 33, 51): '"""enable_dhcp_service"""', (33, 53, 33, 57): '(True)', (33, 59, 33, 68): '"""ml2_odl"""'}, {}), "('enable_dhcp_service', True, 'ml2_odl')", False, 'from oslo_config import cfg\n'), ((34, 20, 34, 64), 'networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver', 'mech_driver_v2.OpenDaylightMechanismDriver', ({}, {}), '()', False, 'from networking_odl.ml2 import mech_driver_v2\n'), ((49, 8, 50, 71), 'networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', 'mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', ({(50, 12, 50, 26): 'subnet_context', (50, 28, 50, 48): 'odl_const.ODL_SUBNET', (50, 50, 50, 70): 'odl_const.ODL_CREATE'}, {}), '(subnet_context,\n odl_const.ODL_SUBNET, odl_const.ODL_CREATE)', False, 'from networking_odl.ml2 import mech_driver_v2\n'), ((78, 8, 79, 75), 'networking_odl.ml2.mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', 'mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal', ({(79, 12, 79, 26): 'subnet_context', (79, 28, 79, 46): 'odl_const.ODL_PORT', (79, 48, 79, 68): 'odl_const.ODL_UPDATE', (79, 70, 79, 74): 'port'}, {}), '(subnet_context,\n odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)', False, 'from networking_odl.ml2 import mech_driver_v2\n')]
Dragonite/djangohat
users/migrations/0002_auto_20191113_1352.py
68890703b1fc647785cf120ada281d6f3fcc4121
# Generated by Django 2.2.2 on 2019-11-13 13:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True, default='b7265a9e874f4068b0b48d45ef97595a', max_length=32, unique=True), ), ]
[((16, 18, 16, 118), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')]
cjh0613/baserow
premium/backend/src/baserow_premium/api/admin/dashboard/views.py
62871f5bf53c9d25446976031aacb706c0abe584
from datetime import timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from baserow.api.decorators import accept_timezone from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=["Admin"], operation_id="admin_dashboard", description="Returns the new and active users for the last 24 hours, 7 days and" " 30 days. The `previous_` values are the values of the period before, so for " "example `previous_new_users_last_24_hours` are the new users that signed up " "from 48 to 24 hours ago. It can be used to calculate an increase or decrease " "in the amount of signups. A list of the new and active users for every day " "for the last 30 days is also included.\n\nThis is a **premium** feature.", responses={ 200: AdminDashboardSerializer, 401: None, }, ) @accept_timezone() def get(self, request, now): """ Returns the new and active users for the last 24 hours, 7 days and 30 days. The `previous_` values are the values of the period before, so for example `previous_new_users_last_24_hours` are the new users that signed up from 48 to 24 hours ago. It can be used to calculate an increase or decrease in the amount of signups. A list of the new and active users for every day for the last 30 days is also included. """ handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts( { "new_users_last_24_hours": timedelta(hours=24), "new_users_last_7_days": timedelta(days=7), "new_users_last_30_days": timedelta(days=30), }, include_previous=True, ) active_users = handler.get_active_user_count( { "active_users_last_24_hours": timedelta(hours=24), "active_users_last_7_days": timedelta(days=7), "active_users_last_30_days": timedelta(days=30), }, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer = AdminDashboardSerializer( { "total_users": total_users, "total_groups": total_groups, "total_applications": total_applications, "new_users_per_day": new_users_per_day, "active_users_per_day": active_users_per_day, **new_users, **active_users, } ) return Response(serializer.data)
[((19, 7, 19, 23), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n'), ((25, 5, 38, 5), 'drf_spectacular.utils.extend_schema', 'extend_schema', (), '', False, 'from drf_spectacular.utils import extend_schema\n'), ((39, 5, 39, 22), 'baserow.api.decorators.accept_timezone', 'accept_timezone', ({}, {}), '()', False, 'from baserow.api.decorators import accept_timezone\n'), ((50, 18, 50, 41), 'baserow_premium.admin.dashboard.handler.AdminDashboardHandler', 'AdminDashboardHandler', ({}, {}), '()', False, 'from baserow_premium.admin.dashboard.handler import AdminDashboardHandler\n'), ((88, 15, 88, 40), 'rest_framework.response.Response', 'Response', ({(88, 24, 88, 39): 'serializer.data'}, {}), '(serializer.data)', False, 'from rest_framework.response import Response\n'), ((71, 12, 71, 30), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((74, 12, 74, 30), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((52, 23, 52, 42), 'baserow.core.models.Group.objects.all', 'Group.objects.all', ({}, {}), '()', False, 'from baserow.core.models import Group, Application\n'), ((53, 29, 53, 54), 'baserow.core.models.Application.objects.all', 'Application.objects.all', ({}, {}), '()', False, 'from baserow.core.models import Group, Application\n'), ((56, 43, 56, 62), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((57, 41, 57, 58), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((58, 42, 58, 60), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((64, 46, 64, 65), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((65, 44, 65, 61), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n'), ((66, 45, 66, 63), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import timedelta\n')]
dan3612812/socketChatRoom
src/clientOld.py
b0d548477687de2d9fd521826db9ea75e528de5c
# -*- coding: UTF-8 -*- import sys import socket import time import threading import select HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print("add client to queue") def socketRecv(): while True: data = s.recv(1024).decode("utf-8") print(data) time.sleep(0.1) def inputJob(): while True: data = input() s.send(bytes(data, "utf-8")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start() try: while True: data = input() s.send(bytes(data, "utf-8")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print("in except") # s.close() # 關閉連線 socketThread.do_run = False # socketThread.join() # inputThread.join() print("close thread") sys.exit(0)
[((12, 4, 12, 53), 'socket.socket', 'socket.socket', ({(12, 18, 12, 32): 'socket.AF_INET', (12, 34, 12, 52): 'socket.SOCK_STREAM'}, {}), '(socket.AF_INET, socket.SOCK_STREAM)', False, 'import socket\n'), ((33, 15, 33, 50), 'threading.Thread', 'threading.Thread', (), '', False, 'import threading\n'), ((23, 8, 23, 23), 'time.sleep', 'time.sleep', ({(23, 19, 23, 22): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((30, 8, 30, 23), 'time.sleep', 'time.sleep', ({(30, 19, 30, 22): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((41, 8, 41, 23), 'time.sleep', 'time.sleep', ({(41, 19, 41, 22): '(0.1)'}, {}), '(0.1)', False, 'import time\n'), ((49, 4, 49, 15), 'sys.exit', 'sys.exit', ({(49, 13, 49, 14): '(0)'}, {}), '(0)', False, 'import sys\n')]
lukaszlaszuk/insightconnect-plugins
plugins/anomali_threatstream/komand_anomali_threatstream/actions/import_observable/schema.py
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json class Component: DESCRIPTION = "Import observable(s) into Anomali ThreatStream with approval" class Input: FILE = "file" OBSERVABLE_SETTINGS = "observable_settings" class Output: RESULTS = "results" class ImportObservableInput(komand.Input): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "file": { "$ref": "#/definitions/file", "title": "File", "description": "File of data to be imported into Anomali ThreatStream", "order": 1 }, "observable_settings": { "$ref": "#/definitions/observable_settings", "title": "Observable Settings", "description": "Settings needed for importing an observable that needs approval", "order": 2 } }, "required": [ "file" ], "definitions": { "file": { "id": "file", "type": "object", "title": "File", "description": "File Object", "properties": { "content": { "type": "string", "title": "Content", "description": "File contents", "format": "bytes" }, "filename": { "type": "string", "title": "Filename", "description": "Name of file" } } }, "observable_settings": { "type": "object", "title": "observable_settings", "properties": { "classification": { "type": "string", "title": "Classification", "description": "Classification of the observable", "default": "private", "enum": [ "public", "private" ], "order": 4 }, "confidence": { "type": "integer", "title": "Confidence", "description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence", "order": 1 }, "domain_mapping": { "type": "string", "title": "Domain Mapping", "description": "Indicator type to assign if a specific type is not associated with an observable", "order": 8 }, "email_mapping": { "type": "string", "title": "Email Mapping", "description": "Indicator type to assign if a specific type is not associated with an observable", "order": 10 }, "expiration_ts": { "type": "string", "title": "Expiration Time Stamp", "displayType": "date", "description": "Time stamp of when intelligence will expire on ThreatStream", "format": "date-time", "order": 5 }, "ip_mapping": { "type": "string", "title": "IP Mapping", "description": "Indicator type to assign if a specific type is not associated with an observable", "order": 7 }, "md5_mapping": { "type": "string", "title": "MD5 Mapping", "description": "Indicator type to assign if a specific type is not associated with an observable", "order": 11 }, "notes": { "type": "array", "title": "Notes", "description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g ['note1', 'note2', 'note3']", "items": { "type": "string" }, "order": 6 }, "severity": { "type": "string", "title": "Severity", "description": "Severity you want to assign to the observable when it is imported", "default": "", "enum": [ "low", "medium", "high", "very-high", "" ], "order": 3 }, "source_confidence_weight": { "type": "integer", "title": "Source Confidence Weight", "description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence", "order": 2 }, "threat_type": { "type": "string", "title": "Threat Type", "description": "Type of threat associated with the imported observables", "order": 13 }, "trustedcircles": { "type": "array", "title": "Trusted Circles", "description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]", "items": { "type": "integer" }, "order": 12 }, "url_mapping": { "type": "string", "title": "URL Mapping", "description": "Indicator type to assign if a specific type is not associated with an observable", "order": 9 } }, "required": [ "classification" ] } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "results": { "$ref": "#/definitions/import_observable_response", "title": "Results", "description": "Results from importing observable(s)", "order": 1 } }, "definitions": { "import_observable_response": { "type": "object", "title": "import_observable_response", "properties": { "import_session_id": { "type": "string", "title": "Import Session ID", "description": "ID for import session", "order": 3 }, "job_id": { "type": "string", "title": "Job ID", "description": "Job ID", "order": 1 }, "success": { "type": "boolean", "title": "Success", "description": "If import was successful", "order": 2 } } } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema)
[((20, 13, 171, 8), 'json.loads', 'json.loads', ({(20, 24, 171, 7): '"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "file": {\n "$ref": "#/definitions/file",\n "title": "File",\n "description": "File of data to be imported into Anomali ThreatStream",\n "order": 1\n },\n "observable_settings": {\n "$ref": "#/definitions/observable_settings",\n "title": "Observable Settings",\n "description": "Settings needed for importing an observable that needs approval",\n "order": 2\n }\n },\n "required": [\n "file"\n ],\n "definitions": {\n "file": {\n "id": "file",\n "type": "object",\n "title": "File",\n "description": "File Object",\n "properties": {\n "content": {\n "type": "string",\n "title": "Content",\n "description": "File contents",\n "format": "bytes"\n },\n "filename": {\n "type": "string",\n "title": "Filename",\n "description": "Name of file"\n }\n }\n },\n "observable_settings": {\n "type": "object",\n "title": "observable_settings",\n "properties": {\n "classification": {\n "type": "string",\n "title": "Classification",\n "description": "Classification of the observable",\n "default": "private",\n "enum": [\n "public",\n "private"\n ],\n "order": 4\n },\n "confidence": {\n "type": "integer",\n "title": "Confidence",\n "description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",\n "order": 1\n },\n "domain_mapping": {\n "type": "string",\n "title": "Domain Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 8\n },\n "email_mapping": {\n "type": "string",\n "title": "Email Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 10\n },\n "expiration_ts": {\n "type": "string",\n "title": "Expiration Time Stamp",\n "displayType": "date",\n "description": "Time stamp of when intelligence will expire on ThreatStream",\n "format": "date-time",\n "order": 5\n },\n "ip_mapping": {\n "type": "string",\n "title": "IP Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 7\n },\n "md5_mapping": {\n "type": "string",\n "title": "MD5 Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 11\n },\n "notes": {\n "type": "array",\n "title": "Notes",\n "description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g [\'note1\', \'note2\', \'note3\']",\n "items": {\n "type": "string"\n },\n "order": 6\n },\n "severity": {\n "type": "string",\n "title": "Severity",\n "description": "Severity you want to assign to the observable when it is imported",\n "default": "",\n "enum": [\n "low",\n "medium",\n "high",\n "very-high",\n ""\n ],\n "order": 3\n },\n "source_confidence_weight": {\n "type": "integer",\n "title": "Source Confidence Weight",\n "description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",\n "order": 2\n },\n "threat_type": {\n "type": "string",\n "title": "Threat Type",\n "description": "Type of threat associated with the imported observables",\n "order": 13\n },\n "trustedcircles": {\n "type": "array",\n "title": "Trusted Circles",\n "description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",\n "items": {\n "type": "integer"\n },\n "order": 12\n },\n "url_mapping": {\n "type": "string",\n "title": "URL Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 9\n }\n },\n "required": [\n "classification"\n ]\n }\n }\n}\n """'}, {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "file": {\n "$ref": "#/definitions/file",\n "title": "File",\n "description": "File of data to be imported into Anomali ThreatStream",\n "order": 1\n },\n "observable_settings": {\n "$ref": "#/definitions/observable_settings",\n "title": "Observable Settings",\n "description": "Settings needed for importing an observable that needs approval",\n "order": 2\n }\n },\n "required": [\n "file"\n ],\n "definitions": {\n "file": {\n "id": "file",\n "type": "object",\n "title": "File",\n "description": "File Object",\n "properties": {\n "content": {\n "type": "string",\n "title": "Content",\n "description": "File contents",\n "format": "bytes"\n },\n "filename": {\n "type": "string",\n "title": "Filename",\n "description": "Name of file"\n }\n }\n },\n "observable_settings": {\n "type": "object",\n "title": "observable_settings",\n "properties": {\n "classification": {\n "type": "string",\n "title": "Classification",\n "description": "Classification of the observable",\n "default": "private",\n "enum": [\n "public",\n "private"\n ],\n "order": 4\n },\n "confidence": {\n "type": "integer",\n "title": "Confidence",\n "description": "Confidence value assigned to the observable. Confidence score can range from 0-100, in increasing order of confidence",\n "order": 1\n },\n "domain_mapping": {\n "type": "string",\n "title": "Domain Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 8\n },\n "email_mapping": {\n "type": "string",\n "title": "Email Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 10\n },\n "expiration_ts": {\n "type": "string",\n "title": "Expiration Time Stamp",\n "displayType": "date",\n "description": "Time stamp of when intelligence will expire on ThreatStream",\n "format": "date-time",\n "order": 5\n },\n "ip_mapping": {\n "type": "string",\n "title": "IP Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 7\n },\n "md5_mapping": {\n "type": "string",\n "title": "MD5 Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 11\n },\n "notes": {\n "type": "array",\n "title": "Notes",\n "description": "Additional details for the observable. This information is displayed in the Tags column of the ThreatStream UI e.g [\'note1\', \'note2\', \'note3\']",\n "items": {\n "type": "string"\n },\n "order": 6\n },\n "severity": {\n "type": "string",\n "title": "Severity",\n "description": "Severity you want to assign to the observable when it is imported",\n "default": "",\n "enum": [\n "low",\n "medium",\n "high",\n "very-high",\n ""\n ],\n "order": 3\n },\n "source_confidence_weight": {\n "type": "integer",\n "title": "Source Confidence Weight",\n "description": "Specifies the ratio between the amount of the source confidence of each observable and the ThreatStream confidence",\n "order": 2\n },\n "threat_type": {\n "type": "string",\n "title": "Threat Type",\n "description": "Type of threat associated with the imported observables",\n "order": 13\n },\n "trustedcircles": {\n "type": "array",\n "title": "Trusted Circles",\n "description": "ID of the trusted circle to which this threat data should be imported. If you want to import the threat data to multiple trusted circles, enter the list of comma-separated IDs e.g [1,2,3]",\n "items": {\n "type": "integer"\n },\n "order": 12\n },\n "url_mapping": {\n "type": "string",\n "title": "URL Mapping",\n "description": "Indicator type to assign if a specific type is not associated with an observable",\n "order": 9\n }\n },\n "required": [\n "classification"\n ]\n }\n }\n}\n """\n )', False, 'import json\n'), ((178, 13, 217, 8), 'json.loads', 'json.loads', ({(178, 24, 217, 7): '"""\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "results": {\n "$ref": "#/definitions/import_observable_response",\n "title": "Results",\n "description": "Results from importing observable(s)",\n "order": 1\n }\n },\n "definitions": {\n "import_observable_response": {\n "type": "object",\n "title": "import_observable_response",\n "properties": {\n "import_session_id": {\n "type": "string",\n "title": "Import Session ID",\n "description": "ID for import session",\n "order": 3\n },\n "job_id": {\n "type": "string",\n "title": "Job ID",\n "description": "Job ID",\n "order": 1\n },\n "success": {\n "type": "boolean",\n "title": "Success",\n "description": "If import was successful",\n "order": 2\n }\n }\n }\n }\n}\n """'}, {}), '(\n """\n {\n "type": "object",\n "title": "Variables",\n "properties": {\n "results": {\n "$ref": "#/definitions/import_observable_response",\n "title": "Results",\n "description": "Results from importing observable(s)",\n "order": 1\n }\n },\n "definitions": {\n "import_observable_response": {\n "type": "object",\n "title": "import_observable_response",\n "properties": {\n "import_session_id": {\n "type": "string",\n "title": "Import Session ID",\n "description": "ID for import session",\n "order": 3\n },\n "job_id": {\n "type": "string",\n "title": "Job ID",\n "description": "Job ID",\n "order": 1\n },\n "success": {\n "type": "boolean",\n "title": "Success",\n "description": "If import was successful",\n "order": 2\n }\n }\n }\n }\n}\n """\n )', False, 'import json\n')]
citrix-openstack-build/trove
trove/tests/unittests/quota/test_quota.py
52506396dd7bd095d1623d40cf2e67f2b478dc1d
# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import testtools from mockito import mock, when, unstub, any, verify, never, times from mock import Mock from trove.quota.quota import DbQuotaDriver from trove.quota.models import Resource from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.common import exception from trove.common import cfg from trove.quota.quota import run_with_quotas from trove.quota.quota import QUOTAS """ Unit tests for the classes and functions in DbQuotaDriver.py. """ CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = "123456" FAKE_TENANT2 = "654321" class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context = mock() context.is_admin = True req = mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req = req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas': {'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support is not enabled') def test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances': None, 'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() delta = {'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[1].status)
[((34, 24, 34, 78), 'trove.quota.models.Resource', 'Resource', ({(34, 33, 34, 51): 'Resource.INSTANCES', (34, 53, 34, 77): '"""max_instances_per_user"""'}, {}), "(Resource.INSTANCES, 'max_instances_per_user')", False, 'from trove.quota.models import Resource\n'), ((35, 22, 35, 72), 'trove.quota.models.Resource', 'Resource', ({(35, 31, 35, 47): 'Resource.VOLUMES', (35, 49, 35, 71): '"""max_volumes_per_user"""'}, {}), "(Resource.VOLUMES, 'max_volumes_per_user')", False, 'from trove.quota.models import Resource\n'), ((126, 5, 127, 54), 'testtools.skipIf', 'testtools.skipIf', ({(126, 22, 126, 51): '(not CONF.trove_volume_support)', (127, 22, 127, 53): '"""Volume support is not enabled"""'}, {}), "(not CONF.trove_volume_support, 'Volume support is not enabled'\n )", False, 'import testtools\n'), ((49, 25, 49, 31), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((50, 26, 50, 32), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((51, 24, 51, 30), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((61, 12, 61, 18), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((62, 8, 62, 72), 'trove.quota.quota.run_with_quotas', 'run_with_quotas', ({(62, 24, 62, 36): 'FAKE_TENANT1', (62, 38, 62, 68): "{'instances': 1, 'volumes': 5}", (62, 70, 62, 71): 'f'}, {}), "(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)", False, 'from trove.quota.quota import run_with_quotas\n'), ((85, 18, 85, 24), 'mockito.mock', 'mock', ({}, {}), '()', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((87, 14, 87, 20), 'mockito.mock', 'mock', ({}, {}), '()', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((88, 22, 88, 28), 'mockito.mock', 'mock', ({}, {}), '()', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((91, 26, 91, 43), 'trove.extensions.mgmt.quota.service.QuotaController', 'QuotaController', ({}, {}), '()', False, 'from trove.extensions.mgmt.quota.service import QuotaController\n'), ((95, 8, 95, 16), 'mockito.unstub', 'unstub', ({}, {}), '()', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((104, 16, 104, 27), 'mockito.mock', 'mock', ({(104, 21, 104, 26): 'Quota'}, {}), '(Quota)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((114, 25, 114, 36), 'mockito.mock', 'mock', ({(114, 30, 114, 35): 'Quota'}, {}), '(Quota)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((129, 25, 129, 36), 'mockito.mock', 'mock', ({(129, 30, 129, 35): 'Quota'}, {}), '(Quota)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((133, 23, 133, 34), 'mockito.mock', 'mock', ({(133, 28, 133, 33): 'Quota'}, {}), '(Quota)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((152, 22, 152, 46), 'trove.quota.quota.DbQuotaDriver', 'DbQuotaDriver', ({(152, 36, 152, 45): 'resources'}, {}), '(resources)', False, 'from trove.quota.quota import DbQuotaDriver\n'), ((160, 33, 160, 39), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((161, 33, 161, 39), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((162, 25, 162, 66), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((163, 30, 163, 71), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((188, 37, 188, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((199, 37, 199, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((217, 37, 217, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((232, 37, 232, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((253, 37, 253, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((274, 37, 274, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((286, 21, 289, 43), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((291, 37, 291, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((292, 28, 292, 57), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((313, 37, 313, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((339, 37, 339, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((340, 28, 340, 57), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((362, 25, 365, 47), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((366, 37, 366, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((367, 28, 367, 61), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((395, 37, 395, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((396, 37, 396, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((397, 26, 397, 32), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((398, 29, 398, 35), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((433, 37, 433, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((434, 37, 434, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((456, 37, 456, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((457, 37, 457, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((479, 37, 479, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((480, 37, 480, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((502, 37, 502, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((503, 37, 503, 67), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((505, 26, 505, 32), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((506, 29, 506, 35), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((521, 27, 521, 33), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((522, 26, 522, 32), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((542, 29, 542, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((557, 27, 557, 33), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((558, 26, 558, 32), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((578, 29, 578, 58), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((184, 23, 186, 43), 'trove.quota.models.Quota', 'Quota', (), '', False, 'from trove.quota.models import Quota\n'), ((210, 23, 212, 43), 'trove.quota.models.Quota', 'Quota', (), '', False, 'from trove.quota.models import Quota\n'), ((213, 23, 215, 43), 'trove.quota.models.Quota', 'Quota', (), '', False, 'from trove.quota.models import Quota\n'), ((249, 23, 251, 43), 'trove.quota.models.Quota', 'Quota', (), '', False, 'from trove.quota.models import Quota\n'), ((269, 23, 272, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((304, 23, 307, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((308, 23, 311, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((330, 23, 333, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((334, 23, 337, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((357, 23, 360, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((384, 23, 388, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((389, 23, 393, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((422, 23, 426, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((427, 23, 431, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((445, 23, 449, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((450, 23, 454, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((468, 23, 472, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((473, 23, 477, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((491, 23, 495, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((496, 23, 500, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((524, 23, 528, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((529, 23, 533, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((535, 29, 537, 78), 'trove.quota.models.Reservation', 'Reservation', (), '', False, 'from trove.quota.models import Reservation\n'), ((538, 29, 540, 78), 'trove.quota.models.Reservation', 'Reservation', (), '', False, 'from trove.quota.models import Reservation\n'), ((560, 23, 564, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((565, 23, 569, 45), 'trove.quota.models.QuotaUsage', 'QuotaUsage', (), '', False, 'from trove.quota.models import QuotaUsage\n'), ((571, 29, 573, 78), 'trove.quota.models.Reservation', 'Reservation', (), '', False, 'from trove.quota.models import Reservation\n'), ((574, 29, 576, 78), 'trove.quota.models.Reservation', 'Reservation', (), '', False, 'from trove.quota.models import Reservation\n'), ((110, 8, 110, 28), 'mockito.verify', 'verify', ({(110, 15, 110, 20): 'quota', (110, 22, 110, 27): 'never'}, {}), '(quota, never)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((121, 8, 121, 39), 'mockito.verify', 'verify', (), '', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((140, 8, 140, 37), 'mockito.verify', 'verify', ({(140, 15, 140, 29): 'instance_quota', (140, 31, 140, 36): 'never'}, {}), '(instance_quota, never)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((142, 8, 142, 37), 'mockito.verify', 'verify', (), '', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((89, 30, 89, 35), 'mockito.any', 'any', ({}, {}), '()', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((89, 8, 89, 25), 'mockito.when', 'when', ({(89, 13, 89, 24): 'req.environ'}, {}), '(req.environ)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((105, 8, 105, 31), 'mockito.when', 'when', ({(105, 13, 105, 30): 'DatabaseModelBase'}, {}), '(DatabaseModelBase)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((115, 8, 115, 31), 'mockito.when', 'when', ({(115, 13, 115, 30): 'DatabaseModelBase'}, {}), '(DatabaseModelBase)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((130, 8, 130, 31), 'mockito.when', 'when', ({(130, 13, 130, 30): 'DatabaseModelBase'}, {}), '(DatabaseModelBase)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n'), ((134, 8, 134, 31), 'mockito.when', 'when', ({(134, 13, 134, 30): 'DatabaseModelBase'}, {}), '(DatabaseModelBase)', False, 'from mockito import mock, when, unstub, any, verify, never, times\n')]
viniciusandd/uri-analisador-sintatico
analisador_sintatico/blueprints/api/parsers.py
b347f4293e4c60bd3b2c838c8cef0d75db2c0bec
from flask_restful import reqparse def retornar_parser(): parser = reqparse.RequestParser() parser.add_argument('sentenca', type=str, required=True) return parser
[((4, 13, 4, 37), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ({}, {}), '()', False, 'from flask_restful import reqparse\n')]
gunlyungyou/AerialDetection
demo_large_image.py
a5606acd8e9a5f7b10cd76bd4b0c3b8c7630fb26
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections import mmcv from mmcv import Config from mmdet.datasets import get_dataset import cv2 import os import numpy as np from tqdm import tqdm import DOTA_devkit.polyiou as polyiou import math import pdb CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1] x1 = np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:, 1::2], axis=1) scores = dets[:, 8] areas = (x2 - x1 + 1) * (y2 - y1 + 1) polys = [] for i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep = [] while order.size > 0: ovr = [] i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) hbb_inter = w * h hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds + 1] for j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]): pdb.set_trace() except: pass inds = np.where(hbb_ovr <= thresh)[0] order = order[inds + 1] return keep class DetectorModel(): def __init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size, chip_size): img = mmcv.imread(imagname) height, width, channel = img.shape slide_h, slide_w = slide_size hn, wn = chip_size # TODO: check the corner case # import pdb; pdb.set_trace() total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))] for i in tqdm(range(int(width / slide_w + 1))): for j in range(int(height / slide_h) + 1): subimg = np.zeros((hn, wn, channel)) # print('i: ', i, 'j: ', j) chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections = inference_detector(self.model, subimg) # print('result: ', result) for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace() # nms for i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections = self.inference_single(srcpath, slide_size, chip_size) classnames = [cls if cls not in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames] img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512), # (1024, 1024)) for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512), # (1024, 1024))
[((21, 9, 21, 38), 'numpy.min', 'np.min', (), '', True, 'import numpy as np\n'), ((22, 9, 22, 38), 'numpy.min', 'np.min', (), '', True, 'import numpy as np\n'), ((23, 9, 23, 38), 'numpy.max', 'np.max', (), '', True, 'import numpy as np\n'), ((24, 9, 24, 38), 'numpy.max', 'np.max', (), '', True, 'import numpy as np\n'), ((124, 13, 124, 52), 'glob.glob', 'glob', ({(124, 18, 124, 51): '"""data/roksi2020/val/images/*.png"""'}, {}), "('data/roksi2020/val/images/*.png')", False, 'from glob import glob\n'), ((30, 21, 33, 68), 'DOTA_devkit.polyiou.VectorDouble', 'polyiou.VectorDouble', ({(30, 42, 33, 67): '[dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5],\n dets[i][6], dets[i][7]]'}, {}), '([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[\n i][4], dets[i][5], dets[i][6], dets[i][7]])', True, 'import DOTA_devkit.polyiou as polyiou\n'), ((42, 14, 42, 46), 'numpy.maximum', 'np.maximum', ({(42, 25, 42, 30): 'x1[i]', (42, 32, 42, 45): 'x1[order[1:]]'}, {}), '(x1[i], x1[order[1:]])', True, 'import numpy as np\n'), ((43, 14, 43, 46), 'numpy.maximum', 'np.maximum', ({(43, 25, 43, 30): 'y1[i]', (43, 32, 43, 45): 'y1[order[1:]]'}, {}), '(y1[i], y1[order[1:]])', True, 'import numpy as np\n'), ((44, 14, 44, 46), 'numpy.minimum', 'np.minimum', ({(44, 25, 44, 30): 'x2[i]', (44, 32, 44, 45): 'x2[order[1:]]'}, {}), '(x2[i], x2[order[1:]])', True, 'import numpy as np\n'), ((45, 14, 45, 46), 'numpy.minimum', 'np.minimum', ({(45, 25, 45, 30): 'y2[i]', (45, 32, 45, 45): 'y2[order[1:]]'}, {}), '(y2[i], y2[order[1:]])', True, 'import numpy as np\n'), ((46, 12, 46, 38), 'numpy.maximum', 'np.maximum', ({(46, 23, 46, 26): '0.0', (46, 28, 46, 37): 'xx2 - xx1'}, {}), '(0.0, xx2 - xx1)', True, 'import numpy as np\n'), ((47, 12, 47, 38), 'numpy.maximum', 'np.maximum', ({(47, 23, 47, 26): '0.0', (47, 28, 47, 37): 'yy2 - yy1'}, {}), '(0.0, yy2 - yy1)', True, 'import numpy as np\n'), ((72, 19, 72, 52), 'mmcv.Config.fromfile', 'Config.fromfile', ({(72, 35, 72, 51): 'self.config_file'}, {}), '(self.config_file)', False, 'from mmcv import Config\n'), ((74, 23, 74, 50), 'mmdet.datasets.get_dataset', 'get_dataset', ({(74, 35, 74, 49): 'self.data_test'}, {}), '(self.data_test)', False, 'from mmdet.datasets import get_dataset\n'), ((76, 21, 76, 81), 'mmdet.apis.init_detector', 'init_detector', (), '', False, 'from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections\n'), ((79, 14, 79, 35), 'mmcv.imread', 'mmcv.imread', ({(79, 26, 79, 34): 'imagname'}, {}), '(imagname)', False, 'import mmcv\n'), ((113, 14, 113, 91), 'mmdet.apis.draw_poly_detections', 'draw_poly_detections', (), '', False, 'from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections\n'), ((114, 8, 114, 33), 'cv2.imwrite', 'cv2.imwrite', ({(114, 20, 114, 27): 'dstpath', (114, 29, 114, 32): 'img'}, {}), '(dstpath, img)', False, 'import cv2\n'), ((50, 17, 50, 38), 'numpy.where', 'np.where', ({(50, 26, 50, 37): '(hbb_ovr > 0)'}, {}), '(hbb_ovr > 0)', True, 'import numpy as np\n'), ((53, 18, 53, 65), 'DOTA_devkit.polyiou.iou_poly', 'polyiou.iou_poly', ({(53, 35, 53, 43): 'polys[i]', (53, 45, 53, 64): 'polys[tmp_order[j]]'}, {}), '(polys[i], polys[tmp_order[j]])', True, 'import DOTA_devkit.polyiou as polyiou\n'), ((57, 15, 57, 33), 'math.isnan', 'math.isnan', ({(57, 26, 57, 32): 'ovr[0]'}, {}), '(ovr[0])', False, 'import math\n'), ((61, 15, 61, 42), 'numpy.where', 'np.where', ({(61, 24, 61, 41): '(hbb_ovr <= thresh)'}, {}), '(hbb_ovr <= thresh)', True, 'import numpy as np\n'), ((85, 28, 85, 44), 'numpy.zeros', 'np.zeros', ({(85, 37, 85, 43): '(0, 9)'}, {}), '((0, 9))', True, 'import numpy as np\n'), ((135, 14, 135, 50), 'os.path.join', 'os.path.join', ({(135, 27, 135, 44): '"""demo/fasterrcnn"""', (135, 46, 135, 49): 'out'}, {}), "('demo/fasterrcnn', out)", False, 'import os\n'), ((138, 44, 138, 80), 'os.path.join', 'os.path.join', ({(138, 57, 138, 74): '"""demo/fasterrcnn"""', (138, 76, 138, 79): 'out'}, {}), "('demo/fasterrcnn', out)", False, 'import os\n'), ((58, 16, 58, 31), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n'), ((89, 25, 89, 52), 'numpy.zeros', 'np.zeros', ({(89, 34, 89, 51): '(hn, wn, channel)'}, {}), '((hn, wn, channel))', True, 'import numpy as np\n'), ((94, 34, 94, 72), 'mmdet.apis.inference_detector', 'inference_detector', ({(94, 53, 94, 63): 'self.model', (94, 65, 94, 71): 'subimg'}, {}), '(self.model, subimg)', False, 'from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections\n'), ((102, 51, 102, 118), 'numpy.concatenate', 'np.concatenate', ({(102, 66, 102, 117): '(total_detections[cls_id], chip_detections[cls_id])'}, {}), '((total_detections[cls_id], chip_detections[cls_id]))', True, 'import numpy as np\n'), ((104, 36, 104, 51), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n')]