body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def exitClient(self):
'Teardown the client.'
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0)
print(os.remove(((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT))) | 4,262,132,436,711,552,500 | Teardown the client. | Task2/Client_dev.py | exitClient | Aiemu/CourseCN-Proj-RTP | python | def exitClient(self):
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0)
print(os.remove(((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT))) |
def pauseMovie(self):
'Pause movie.'
if (self.state == self.PLAYING):
self.sendRtspRequest(self.PAUSE) | 3,415,863,770,049,874,000 | Pause movie. | Task2/Client_dev.py | pauseMovie | Aiemu/CourseCN-Proj-RTP | python | def pauseMovie(self):
if (self.state == self.PLAYING):
self.sendRtspRequest(self.PAUSE) |
def playMovie(self):
'Play movie.'
if (self.state == self.READY):
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY) | 4,207,836,993,191,038,000 | Play movie. | Task2/Client_dev.py | playMovie | Aiemu/CourseCN-Proj-RTP | python | def playMovie(self):
if (self.state == self.READY):
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY) |
def listenRtp(self):
'Listen for RTP packets.'
while 1:
try:
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb+')
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
currFrameNbr = rtpPacket.seqNum()
file.write(rtpPacket.getPayload())
print(('Current Seq Num: ' + str(currFrameNbr)))
if ((currFrameNbr > self.frameNbr) and rtpPacket.getIfEnd()):
self.frameNbr = currFrameNbr
self.updateMovie(cachename)
file.close()
break
except:
if self.playEvent.isSet():
break
print('Frame receiving failed!')
if (self.teardownAcked == 1):
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break | 7,159,088,159,690,293,000 | Listen for RTP packets. | Task2/Client_dev.py | listenRtp | Aiemu/CourseCN-Proj-RTP | python | def listenRtp(self):
while 1:
try:
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb+')
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
currFrameNbr = rtpPacket.seqNum()
file.write(rtpPacket.getPayload())
print(('Current Seq Num: ' + str(currFrameNbr)))
if ((currFrameNbr > self.frameNbr) and rtpPacket.getIfEnd()):
self.frameNbr = currFrameNbr
self.updateMovie(cachename)
file.close()
break
except:
if self.playEvent.isSet():
break
print('Frame receiving failed!')
if (self.teardownAcked == 1):
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break |
def writeFrame(self):
'Write the received frame to a temp image file. Return the image file.'
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb')
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename | 7,812,888,565,929,767,000 | Write the received frame to a temp image file. Return the image file. | Task2/Client_dev.py | writeFrame | Aiemu/CourseCN-Proj-RTP | python | def writeFrame(self):
cachename = ((CACHE_FILE_NAME + str(self.sessionId)) + CACHE_FILE_EXT)
file = open(cachename, 'wb')
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename |
def updateMovie(self, imageFile):
'Update the image file as video frame in the GUI.'
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True) | -2,300,834,710,126,000,000 | Update the image file as video frame in the GUI. | Task2/Client_dev.py | updateMovie | Aiemu/CourseCN-Proj-RTP | python | def updateMovie(self, imageFile):
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True) |
def connectToServer(self):
'Connect to the Server. Start a new RTSP/TCP session.'
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
messagebox.showwarning('Connection Failed', ("Connection to '%s' failed." % self.serverAddr)) | 4,777,276,545,653,517,000 | Connect to the Server. Start a new RTSP/TCP session. | Task2/Client_dev.py | connectToServer | Aiemu/CourseCN-Proj-RTP | python | def connectToServer(self):
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
messagebox.showwarning('Connection Failed', ("Connection to '%s' failed." % self.serverAddr)) |
def sendRtspRequest(self, requestCode):
'Send RTSP request to the server.'
if ((requestCode == self.SETUP) and (self.state == self.INIT)):
threading.Thread(target=self.recvRtspReply).start()
self.rtspSeq += 1
request = ((((('SETUP ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nTransport: RTP/UDP; client_port= ') + str(self.rtpPort))
self.requestSent = self.SETUP
elif ((requestCode == self.PLAY) and (self.state == self.READY)):
self.rtspSeq += 1
request = ((((('PLAY ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
self.requestSent = self.PLAY
elif ((requestCode == self.PAUSE) and (self.state == self.PLAYING)):
self.rtspSeq += 1
request = ((((('PAUSE ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
self.requestSent = self.PAUSE
elif ((requestCode == self.TEARDOWN) and (not (self.state == self.INIT))):
self.rtspSeq += 1
request = ((((('TEARDOWN ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
self.requestSent = self.TEARDOWN
elif ((requestCode == self.FASTER) and ((self.state == self.PLAYING) or (self.state == self.READY))):
self.rtspSeq += 1
request = ((((('FASTER ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
elif ((requestCode == self.SLOWER) and ((self.state == self.PLAYING) or (self.state == self.READY))):
self.rtspSeq += 1
request = ((((('SLOWER ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
else:
return
self.rtspSocket.send(request.encode())
print(('\nData sent:\n' + request)) | 320,747,659,495,692,300 | Send RTSP request to the server. | Task2/Client_dev.py | sendRtspRequest | Aiemu/CourseCN-Proj-RTP | python | def sendRtspRequest(self, requestCode):
if ((requestCode == self.SETUP) and (self.state == self.INIT)):
threading.Thread(target=self.recvRtspReply).start()
self.rtspSeq += 1
request = ((((('SETUP ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nTransport: RTP/UDP; client_port= ') + str(self.rtpPort))
self.requestSent = self.SETUP
elif ((requestCode == self.PLAY) and (self.state == self.READY)):
self.rtspSeq += 1
request = ((((('PLAY ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
self.requestSent = self.PLAY
elif ((requestCode == self.PAUSE) and (self.state == self.PLAYING)):
self.rtspSeq += 1
request = ((((('PAUSE ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
self.requestSent = self.PAUSE
elif ((requestCode == self.TEARDOWN) and (not (self.state == self.INIT))):
self.rtspSeq += 1
request = ((((('TEARDOWN ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
self.requestSent = self.TEARDOWN
elif ((requestCode == self.FASTER) and ((self.state == self.PLAYING) or (self.state == self.READY))):
self.rtspSeq += 1
request = ((((('FASTER ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
elif ((requestCode == self.SLOWER) and ((self.state == self.PLAYING) or (self.state == self.READY))):
self.rtspSeq += 1
request = ((((('SLOWER ' + self.fileName) + ' RTSP/1.0\nCSeq: ') + str(self.rtspSeq)) + '\nSession: ') + str(self.sessionId))
else:
return
self.rtspSocket.send(request.encode())
print(('\nData sent:\n' + request)) |
def recvRtspReply(self):
'Receive RTSP reply from the server.'
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode('utf-8'))
if (self.requestSent == self.TEARDOWN):
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break | 5,389,725,507,352,486,000 | Receive RTSP reply from the server. | Task2/Client_dev.py | recvRtspReply | Aiemu/CourseCN-Proj-RTP | python | def recvRtspReply(self):
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode('utf-8'))
if (self.requestSent == self.TEARDOWN):
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break |
def parseRtspReply(self, data):
'Parse the RTSP reply from the server.'
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
if (seqNum == self.rtspSeq):
session = int(lines[2].split(' ')[1])
if (self.sessionId == 0):
self.sessionId = session
if (self.sessionId == session):
if (int(lines[0].split(' ')[1]) == 200):
if (self.requestSent == self.SETUP):
self.state = self.READY
self.openRtpPort()
elif (self.requestSent == self.PLAY):
self.state = self.PLAYING
elif (self.requestSent == self.PAUSE):
self.state = self.READY
self.playEvent.set()
elif (self.requestSent == self.TEARDOWN):
self.state = self.INIT
self.teardownAcked = 1 | 1,693,217,859,073,406,700 | Parse the RTSP reply from the server. | Task2/Client_dev.py | parseRtspReply | Aiemu/CourseCN-Proj-RTP | python | def parseRtspReply(self, data):
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
if (seqNum == self.rtspSeq):
session = int(lines[2].split(' ')[1])
if (self.sessionId == 0):
self.sessionId = session
if (self.sessionId == session):
if (int(lines[0].split(' ')[1]) == 200):
if (self.requestSent == self.SETUP):
self.state = self.READY
self.openRtpPort()
elif (self.requestSent == self.PLAY):
self.state = self.PLAYING
elif (self.requestSent == self.PAUSE):
self.state = self.READY
self.playEvent.set()
elif (self.requestSent == self.TEARDOWN):
self.state = self.INIT
self.teardownAcked = 1 |
def openRtpPort(self):
'Open RTP socket binded to a specified port.'
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind(('', self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', ('Unable to bind PORT=%d' % self.rtpPort)) | -8,856,207,915,541,562,000 | Open RTP socket binded to a specified port. | Task2/Client_dev.py | openRtpPort | Aiemu/CourseCN-Proj-RTP | python | def openRtpPort(self):
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rtpSocket.settimeout(0.5)
try:
self.rtpSocket.bind((, self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', ('Unable to bind PORT=%d' % self.rtpPort)) |
def handler(self):
'Handler on explicitly closing the GUI window.'
self.pauseMovie()
if messagebox.askokcancel('Quit?', 'Are you sure you want to quit?'):
self.exitClient()
else:
self.playMovie() | 704,200,587,336,194,600 | Handler on explicitly closing the GUI window. | Task2/Client_dev.py | handler | Aiemu/CourseCN-Proj-RTP | python | def handler(self):
self.pauseMovie()
if messagebox.askokcancel('Quit?', 'Are you sure you want to quit?'):
self.exitClient()
else:
self.playMovie() |
def test_requirements_from_source_info(tmpdir):
'Test the code path used by the exporter'
common.makeproject(tmpdir, 'test-project', deps=[('mod1', '')], imports=['mod1'])
project_dir = os.path.join(tmpdir, 'test-project')
libs_dir = os.path.join(project_dir, 'libs')
common.makemodule(libs_dir, 'mod1', project=False)
mod1 = os.path.join(libs_dir, 'mod1')
mod1_req_txt = "# I'm a comment\npytest>=1.5\n\n\niplib>=0.0.1\n\n "
common.add_file(mod1, 'requirements.txt', mod1_req_txt, msg='initial commit')
project = Project(project_dir, venv_path=os.path.join(project_dir, '.env'))
Project.set(project)
project.load_module('mod1', allow_v1=True)
requirements = SourceInfo(mod1, 'inmanta_plugins.mod1').requires
assert (sorted(requirements) == sorted(['pytest>=1.5', 'iplib>=0.0.1']))
project.virtualenv.install_from_list(requirements) | -679,296,438,620,197,900 | Test the code path used by the exporter | tests/moduletool/test_python_dependencies.py | test_requirements_from_source_info | inmanta/inmanta-core | python | def test_requirements_from_source_info(tmpdir):
common.makeproject(tmpdir, 'test-project', deps=[('mod1', )], imports=['mod1'])
project_dir = os.path.join(tmpdir, 'test-project')
libs_dir = os.path.join(project_dir, 'libs')
common.makemodule(libs_dir, 'mod1', project=False)
mod1 = os.path.join(libs_dir, 'mod1')
mod1_req_txt = "# I'm a comment\npytest>=1.5\n\n\niplib>=0.0.1\n\n "
common.add_file(mod1, 'requirements.txt', mod1_req_txt, msg='initial commit')
project = Project(project_dir, venv_path=os.path.join(project_dir, '.env'))
Project.set(project)
project.load_module('mod1', allow_v1=True)
requirements = SourceInfo(mod1, 'inmanta_plugins.mod1').requires
assert (sorted(requirements) == sorted(['pytest>=1.5', 'iplib>=0.0.1']))
project.virtualenv.install_from_list(requirements) |
@app.task
def execute_command(command):
'airflow worker 执行shell命令 .'
log = LoggingMixin().log
log.info('Executing command in Celery: %s', command)
env = os.environ.copy()
try:
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT, close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed') | 5,545,134,767,227,514,000 | airflow worker 执行shell命令 . | airflow/executors/celery_executor.py | execute_command | fengzhongzhu1621/XAirflow | python | @app.task
def execute_command(command):
log = LoggingMixin().log
log.info('Executing command in Celery: %s', command)
env = os.environ.copy()
try:
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT, close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed') |
def get_object(self, queryset=None):
"\n If the status of the entry is not PUBLISHED,\n a preview is requested, so we check if the user\n has the 'zinnia.can_view_all' permission or if\n it's an author of the entry.\n "
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.is_visible:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or (self.request.user.pk in [author.pk for author in obj.authors.all()])):
return obj
raise Http404(_('No entry found matching the query')) | -6,498,698,108,583,524,000 | If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry. | zinnia/views/mixins/entry_preview.py | get_object | Admoroux/django-blog-zinnia | python | def get_object(self, queryset=None):
"\n If the status of the entry is not PUBLISHED,\n a preview is requested, so we check if the user\n has the 'zinnia.can_view_all' permission or if\n it's an author of the entry.\n "
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.is_visible:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or (self.request.user.pk in [author.pk for author in obj.authors.all()])):
return obj
raise Http404(_('No entry found matching the query')) |
def find_reachable_vertices(g: Graph, sources: set) -> set:
'\n Returns the set of vertices of a graph which are reachable\n from a set of source vertices.\n Args:\n g: Graph, an instance of `Graph`\n sources: set, a set of integers representing the source vertices\n Returns:\n The set of vertices that are reachable from the source vertices\n '
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys()) | 5,854,856,957,845,311,000 | Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices | pybgl/prune_incidence_automaton.py | find_reachable_vertices | nokia/PyBGL | python | def find_reachable_vertices(g: Graph, sources: set) -> set:
'\n Returns the set of vertices of a graph which are reachable\n from a set of source vertices.\n Args:\n g: Graph, an instance of `Graph`\n sources: set, a set of integers representing the source vertices\n Returns:\n The set of vertices that are reachable from the source vertices\n '
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys()) |
def prune_incidence_automaton(g: IncidenceAutomaton):
'\n Prunes the vertices of an IncidenceAutomaton that cannot be reached\n from the intial state, or that cannot reach a final state.\n Args:\n g: IncidenceAutomaton, an instance of IncidenceAutomaton\n '
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = (set(vertices(g)) - to_keep)
for q in to_remove:
remove_vertex(q, g) | -5,881,924,977,987,115,000 | Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton | pybgl/prune_incidence_automaton.py | prune_incidence_automaton | nokia/PyBGL | python | def prune_incidence_automaton(g: IncidenceAutomaton):
'\n Prunes the vertices of an IncidenceAutomaton that cannot be reached\n from the intial state, or that cannot reach a final state.\n Args:\n g: IncidenceAutomaton, an instance of IncidenceAutomaton\n '
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = (set(vertices(g)) - to_keep)
for q in to_remove:
remove_vertex(q, g) |
def example_fn_build_report(report, pvarray):
'Example function that builds a report when used in the\n :py:class:`~pvfactors.engine.PVEngine` with full mode simulations.\n Here it will be a dictionary with lists of calculated values.\n\n Parameters\n ----------\n report : dict\n Initially ``None``, this will be passed and updated by the function\n pvarray : PV array object\n PV array with updated calculation values\n\n Returns\n -------\n report : dict\n Report updated with newly calculated values\n '
if (report is None):
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
if (pvarray is not None):
pvrow = pvarray.pvrows[1]
report['qinc_front'].append(pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(pvrow.back.get_param_weighted('isotropic'))
else:
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report | 8,642,434,767,632,493,000 | Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values | pvfactors/report.py | example_fn_build_report | tcapelle/pvfactors | python | def example_fn_build_report(report, pvarray):
'Example function that builds a report when used in the\n :py:class:`~pvfactors.engine.PVEngine` with full mode simulations.\n Here it will be a dictionary with lists of calculated values.\n\n Parameters\n ----------\n report : dict\n Initially ``None``, this will be passed and updated by the function\n pvarray : PV array object\n PV array with updated calculation values\n\n Returns\n -------\n report : dict\n Report updated with newly calculated values\n '
if (report is None):
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
if (pvarray is not None):
pvrow = pvarray.pvrows[1]
report['qinc_front'].append(pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(pvrow.back.get_param_weighted('isotropic'))
else:
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report |
@staticmethod
def build(report, pvarray):
"Method that will build the simulation report. Here we're using the\n previously defined\n :py:function:`~pvfactors.report.example_fn_build_report`.\n\n Parameters\n ----------\n report : dict\n Initially ``None``, this will be passed and updated by the function\n pvarray : PV array object\n PV array with updated calculation values\n\n Returns\n -------\n report : dict\n Report updated with newly calculated values\n "
return example_fn_build_report(report, pvarray) | 4,803,367,268,110,759,000 | Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values | pvfactors/report.py | build | tcapelle/pvfactors | python | @staticmethod
def build(report, pvarray):
"Method that will build the simulation report. Here we're using the\n previously defined\n :py:function:`~pvfactors.report.example_fn_build_report`.\n\n Parameters\n ----------\n report : dict\n Initially ``None``, this will be passed and updated by the function\n pvarray : PV array object\n PV array with updated calculation values\n\n Returns\n -------\n report : dict\n Report updated with newly calculated values\n "
return example_fn_build_report(report, pvarray) |
@staticmethod
def merge(reports):
'Method used to merge multiple reports together. Here it simply\n concatenates the lists of values saved in the different reports.\n\n Parameters\n ----------\n reports : list of dict\n List of reports that need to be concatenated together\n\n Returns\n -------\n report : dict\n Final report with all concatenated values\n '
report = reports[0]
if (len(reports) > 1):
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report | 5,527,112,909,777,533,000 | Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values | pvfactors/report.py | merge | tcapelle/pvfactors | python | @staticmethod
def merge(reports):
'Method used to merge multiple reports together. Here it simply\n concatenates the lists of values saved in the different reports.\n\n Parameters\n ----------\n reports : list of dict\n List of reports that need to be concatenated together\n\n Returns\n -------\n report : dict\n Final report with all concatenated values\n '
report = reports[0]
if (len(reports) > 1):
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report |
def uniform_mix_C(mixing_ratio, num_classes):
'\n returns a linear interpolation of a uniform matrix and an identity matrix\n '
return ((mixing_ratio * np.full((num_classes, num_classes), (1 / num_classes))) + ((1 - mixing_ratio) * np.eye(num_classes))) | 523,720,610,122,376,450 | returns a linear interpolation of a uniform matrix and an identity matrix | dataloader.py | uniform_mix_C | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | python | def uniform_mix_C(mixing_ratio, num_classes):
'\n \n '
return ((mixing_ratio * np.full((num_classes, num_classes), (1 / num_classes))) + ((1 - mixing_ratio) * np.eye(num_classes))) |
def flip_labels_C(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[(row_indices != i)])] = corruption_prob
return C | 1,236,321,925,871,866,400 | returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row | dataloader.py | flip_labels_C | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | python | def flip_labels_C(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[(row_indices != i)])] = corruption_prob
return C |
def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[(row_indices != i)], 2, replace=False)] = (corruption_prob / 2)
return C | 3,365,218,847,267,169,300 | returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row | dataloader.py | flip_labels_C_two | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | python | def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'\n returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob\n concentrated in only one other entry for each row\n '
np.random.seed(seed)
C = (np.eye(num_classes) * (1 - corruption_prob))
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[(row_indices != i)], 2, replace=False)] = (corruption_prob / 2)
return C |
def loadbasis(cmd: str, dtype: torch.dtype=_dtype, device: torch.device=_device, requires_grad: bool=False) -> List[CGTOBasis]:
'\n Load basis from a file and return the list of CGTOBasis.\n\n Arguments\n ---------\n cmd: str\n This can be a file path where the basis is stored or a\n string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.\n dtype: torch.dtype\n Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis\n device: torch.device\n Tensor device for ``alphas`` and ``coeffs``\n requires_grad: bool\n If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable\n\n Returns\n -------\n list of CGTOBasis\n List of GTO basis loaded from the given file\n '
res = []
if (not os.path.exists(cmd)):
file = _get_basis_file(cmd)
else:
file = cmd
with open(file, 'r') as f:
lines = f.read().split('\n')
while True:
line = lines.pop(0)
if (line == ''):
continue
if line.startswith('!'):
continue
break
while (len(lines) > 0):
line = lines.pop(0)
if line.startswith('**'):
break
desc = line.split()
nlines = int(desc[1])
if (nlines == 0):
raise RuntimeError(('Zero line on basis %s' % file))
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res | 2,860,282,354,001,892,400 | Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
Tensor device for ``alphas`` and ``coeffs``
requires_grad: bool
If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable
Returns
-------
list of CGTOBasis
List of GTO basis loaded from the given file | dqc/api/loadbasis.py | loadbasis | Jaikinator/dqc | python | def loadbasis(cmd: str, dtype: torch.dtype=_dtype, device: torch.device=_device, requires_grad: bool=False) -> List[CGTOBasis]:
'\n Load basis from a file and return the list of CGTOBasis.\n\n Arguments\n ---------\n cmd: str\n This can be a file path where the basis is stored or a\n string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.\n dtype: torch.dtype\n Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis\n device: torch.device\n Tensor device for ``alphas`` and ``coeffs``\n requires_grad: bool\n If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable\n\n Returns\n -------\n list of CGTOBasis\n List of GTO basis loaded from the given file\n '
res = []
if (not os.path.exists(cmd)):
file = _get_basis_file(cmd)
else:
file = cmd
with open(file, 'r') as f:
lines = f.read().split('\n')
while True:
line = lines.pop(0)
if (line == ):
continue
if line.startswith('!'):
continue
break
while (len(lines) > 0):
line = lines.pop(0)
if line.startswith('**'):
break
desc = line.split()
nlines = int(desc[1])
if (nlines == 0):
raise RuntimeError(('Zero line on basis %s' % file))
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res |
@contextlib.contextmanager
def stored(self, key):
'\n\t\tThis is a convenience tool to make plugin storage easier.\n\t\t'
value = self[key]
try:
(yield value)
finally:
self[key] = value | 2,452,644,208,444,902,400 | This is a convenience tool to make plugin storage easier. | plugins/otp/otp.py | stored | hosom/jarvis | python | @contextlib.contextmanager
def stored(self, key):
'\n\t\t\n\t\t'
value = self[key]
try:
(yield value)
finally:
self[key] = value |
def build_qrcode(self, user, url):
'Internal method used to build the QRCode image for token provisioning.'
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png') | -683,330,556,530,843,500 | Internal method used to build the QRCode image for token provisioning. | plugins/otp/otp.py | build_qrcode | hosom/jarvis | python | def build_qrcode(self, user, url):
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png') |
def get_identity(self, message):
'Wrapper to make sure the correct identity object is used.'
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person | 3,442,578,752,912,167,000 | Wrapper to make sure the correct identity object is used. | plugins/otp/otp.py | get_identity | hosom/jarvis | python | def get_identity(self, message):
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person |
@botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'\n\t\tWARNING: This command removes ALL OTP entries.\n\t\t'
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.' | 3,795,642,453,982,059,500 | WARNING: This command removes ALL OTP entries. | plugins/otp/otp.py | otp_delete_all | hosom/jarvis | python | @botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'\n\t\t\n\t\t'
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.' |
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'\n\t\tAdd a command to OTP command filtering.\n\t\t'
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd) | -6,308,578,379,057,276,000 | Add a command to OTP command filtering. | plugins/otp/otp.py | otp_add_command | hosom/jarvis | python | @arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'\n\t\t\n\t\t'
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd) |
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'\n\t\tRemove a command from OTP command filtering.\n\t\t'
with self.lock:
with self.stored('commands') as commands:
if (cmd not in commands):
return dict(err=True, command=cmd)
commands.remove(cmd)
return dict(err=False, command=cmd) | 6,625,291,410,303,574,000 | Remove a command from OTP command filtering. | plugins/otp/otp.py | otp_remove_command | hosom/jarvis | python | @arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'\n\t\t\n\t\t'
with self.lock:
with self.stored('commands') as commands:
if (cmd not in commands):
return dict(err=True, command=cmd)
commands.remove(cmd)
return dict(err=False, command=cmd) |
@botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'\n\t\tList the commands that are filtered by OTP.\n\t\t'
return dict(commands=self['commands']) | -200,299,054,489,281,120 | List the commands that are filtered by OTP. | plugins/otp/otp.py | otp_commands | hosom/jarvis | python | @botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'\n\t\t\n\t\t'
return dict(commands=self['commands']) |
@arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'\n\t\tSend a new secret for a user.\n\t\t'
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOTP(secret)
url = totp.provisioning_uri(user)
self.build_qrcode(user, url)
if self.config:
if self.config.get('provision_via_chat'):
f = open('{0}{1}-qrcode.png'.format(self.DATA_DIR, user), 'rb')
self.send_stream_request(self.build_identifier(user), f, name='OTP-secret.png')
self.send_templated(self.build_identifier(user), 'otp_secret_create_pm', dict(url=url))
return dict(chat_enrollment=True, user=user)
return dict(chat_enrollment=False, user=user) | -9,044,136,800,075,633,000 | Send a new secret for a user. | plugins/otp/otp.py | otp_secret_create | hosom/jarvis | python | @arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'\n\t\t\n\t\t'
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOTP(secret)
url = totp.provisioning_uri(user)
self.build_qrcode(user, url)
if self.config:
if self.config.get('provision_via_chat'):
f = open('{0}{1}-qrcode.png'.format(self.DATA_DIR, user), 'rb')
self.send_stream_request(self.build_identifier(user), f, name='OTP-secret.png')
self.send_templated(self.build_identifier(user), 'otp_secret_create_pm', dict(url=url))
return dict(chat_enrollment=True, user=user)
return dict(chat_enrollment=False, user=user) |
@arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'\n\t\tAuthenticate with OTP to the bot to pass OTP filtering.\n\t\t'
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if (identity not in self['secrets']):
return dict(not_enrolled=True)
(secret, attempts, _) = self['secrets'][identity]
totp = pyotp.TOTP(secret)
if totp.verify(otp):
with self.lock:
with self.stored('secrets') as secrets:
(secret, _, _) = secrets[identity]
secrets[identity] = (secret, 0, datetime.datetime.now())
return dict(success=True)
else:
with self.lock:
with self.stored('secrets') as secrets:
(secret, attempts, ts) = secrets[identity]
if (attempts > self.config.get('max_retries')):
secret = ''
secrets[identity] = (secret, (attempts + 1), ts)
return dict(success=False) | 9,054,655,397,197,392,000 | Authenticate with OTP to the bot to pass OTP filtering. | plugins/otp/otp.py | otp_auth | hosom/jarvis | python | @arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'\n\t\t\n\t\t'
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if (identity not in self['secrets']):
return dict(not_enrolled=True)
(secret, attempts, _) = self['secrets'][identity]
totp = pyotp.TOTP(secret)
if totp.verify(otp):
with self.lock:
with self.stored('secrets') as secrets:
(secret, _, _) = secrets[identity]
secrets[identity] = (secret, 0, datetime.datetime.now())
return dict(success=True)
else:
with self.lock:
with self.stored('secrets') as secrets:
(secret, attempts, ts) = secrets[identity]
if (attempts > self.config.get('max_retries')):
secret =
secrets[identity] = (secret, (attempts + 1), ts)
return dict(success=False) |
@cmdfilter
def otp_filter(self, message, command, args, dry_run):
'\n\t\tFilter commands to determine if user has recently validated with OTP.\n\t\t'
with self.lock:
if (command in self['commands']):
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
if (identity not in secrets):
self.send_templated(message.frm, 'otp_filter', dict(not_enrolled=True))
return (None, None, None)
(_, _, lastotp) = secrets[identity]
if ((datetime.datetime.now() - lastotp) > _OTP_EXPIRE):
self.log.info('{0} has not authenticated with OTP since expire'.format(identity))
self.send_templated(message.frm, 'otp_filter', dict(auth_required=True))
return (None, None, None)
self.log.info('OTP ok, permit command.')
return (message, command, args) | -7,518,729,145,550,202,000 | Filter commands to determine if user has recently validated with OTP. | plugins/otp/otp.py | otp_filter | hosom/jarvis | python | @cmdfilter
def otp_filter(self, message, command, args, dry_run):
'\n\t\t\n\t\t'
with self.lock:
if (command in self['commands']):
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
if (identity not in secrets):
self.send_templated(message.frm, 'otp_filter', dict(not_enrolled=True))
return (None, None, None)
(_, _, lastotp) = secrets[identity]
if ((datetime.datetime.now() - lastotp) > _OTP_EXPIRE):
self.log.info('{0} has not authenticated with OTP since expire'.format(identity))
self.send_templated(message.frm, 'otp_filter', dict(auth_required=True))
return (None, None, None)
self.log.info('OTP ok, permit command.')
return (message, command, args) |
def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:
'\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (`tf.Tensor`): output embedding tensor.\n '
if ((input_ids is None) and (inputs_embeds is None)):
raise ValueError('Need to provide either `input_ids` or `input_embeds`.')
if (input_ids is not None):
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:(- 1)]
if (token_type_ids is None):
token_type_ids = tf.fill(dims=input_shape, value=0)
if (position_ids is None):
position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=(input_shape[1] + past_key_values_length)), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = ((inputs_embeds + position_embeds) + token_type_embeds)
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings | -4,769,499,439,953,555,000 | Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor. | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | def call(self, input_ids: tf.Tensor=None, position_ids: tf.Tensor=None, token_type_ids: tf.Tensor=None, inputs_embeds: tf.Tensor=None, past_key_values_length=0, training: bool=False) -> tf.Tensor:
'\n Applies embedding based on inputs tensor.\n\n Returns:\n final_embeddings (`tf.Tensor`): output embedding tensor.\n '
if ((input_ids is None) and (inputs_embeds is None)):
raise ValueError('Need to provide either `input_ids` or `input_embeds`.')
if (input_ids is not None):
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:(- 1)]
if (token_type_ids is None):
token_type_ids = tf.fill(dims=input_shape, value=0)
if (position_ids is None):
position_ids = tf.expand_dims(tf.range(start=past_key_values_length, limit=(input_shape[1] + past_key_values_length)), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = ((inputs_embeds + position_embeds) + token_type_embeds)
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings |
def _prune_heads(self, heads_to_prune):
'\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n '
raise NotImplementedError | -6,215,471,936,727,332,000 | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel | src/transformers/models/convbert/modeling_tf_convbert.py | _prune_heads | AK391/transformers | python | def _prune_heads(self, heads_to_prune):
'\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n '
raise NotImplementedError |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
generator_hidden_states = self.convbert(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs['training'])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs['training'])
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], prediction_scores))
if (not inputs['return_dict']):
output = ((prediction_scores,) + generator_hidden_states[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions) | -8,487,996,097,200,511,000 | labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,\n config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the\n loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
generator_hidden_states = self.convbert(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs['training'])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs['training'])
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], prediction_scores))
if (not inputs['return_dict']):
output = ((prediction_scores,) + generator_hidden_states[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFMaskedLMOutput(loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions) |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
outputs = self.convbert(inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
logits = self.classifier(outputs[0], training=inputs['training'])
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], logits))
if (not inputs['return_dict']):
output = ((logits,) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | -9,053,624,724,062,643,000 | labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy). | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
outputs = self.convbert(inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
logits = self.classifier(outputs[0], training=inputs['training'])
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], logits))
if (not inputs['return_dict']):
output = ((logits,) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFSequenceClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
@property
def dummy_inputs(self):
'\n Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n '
return {'input_ids': tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)} | 4,374,154,624,472,328,700 | Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs | src/transformers/models/convbert/modeling_tf_convbert.py | dummy_inputs | AK391/transformers | python | @property
def dummy_inputs(self):
'\n Dummy inputs to build the network.\n\n Returns:\n tf.Tensor with dummy inputs\n '
return {'input_ids': tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)} |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`\n where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
if (inputs['input_ids'] is not None):
num_choices = shape_list(inputs['input_ids'])[1]
seq_length = shape_list(inputs['input_ids'])[2]
else:
num_choices = shape_list(inputs['inputs_embeds'])[1]
seq_length = shape_list(inputs['inputs_embeds'])[2]
flat_input_ids = (tf.reshape(inputs['input_ids'], ((- 1), seq_length)) if (inputs['input_ids'] is not None) else None)
flat_attention_mask = (tf.reshape(inputs['attention_mask'], ((- 1), seq_length)) if (inputs['attention_mask'] is not None) else None)
flat_token_type_ids = (tf.reshape(inputs['token_type_ids'], ((- 1), seq_length)) if (inputs['token_type_ids'] is not None) else None)
flat_position_ids = (tf.reshape(inputs['position_ids'], ((- 1), seq_length)) if (inputs['position_ids'] is not None) else None)
flat_inputs_embeds = (tf.reshape(inputs['inputs_embeds'], ((- 1), seq_length, shape_list(inputs['inputs_embeds'])[3])) if (inputs['inputs_embeds'] is not None) else None)
outputs = self.convbert(flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, inputs['head_mask'], flat_inputs_embeds, inputs['output_attentions'], inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
logits = self.sequence_summary(outputs[0], training=inputs['training'])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, ((- 1), num_choices))
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], reshaped_logits))
if (not inputs['return_dict']):
output = ((reshaped_logits,) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | -3,477,998,566,155,918,000 | labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, num_choices, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`\n where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
if (inputs['input_ids'] is not None):
num_choices = shape_list(inputs['input_ids'])[1]
seq_length = shape_list(inputs['input_ids'])[2]
else:
num_choices = shape_list(inputs['inputs_embeds'])[1]
seq_length = shape_list(inputs['inputs_embeds'])[2]
flat_input_ids = (tf.reshape(inputs['input_ids'], ((- 1), seq_length)) if (inputs['input_ids'] is not None) else None)
flat_attention_mask = (tf.reshape(inputs['attention_mask'], ((- 1), seq_length)) if (inputs['attention_mask'] is not None) else None)
flat_token_type_ids = (tf.reshape(inputs['token_type_ids'], ((- 1), seq_length)) if (inputs['token_type_ids'] is not None) else None)
flat_position_ids = (tf.reshape(inputs['position_ids'], ((- 1), seq_length)) if (inputs['position_ids'] is not None) else None)
flat_inputs_embeds = (tf.reshape(inputs['inputs_embeds'], ((- 1), seq_length, shape_list(inputs['inputs_embeds'])[3])) if (inputs['inputs_embeds'] is not None) else None)
outputs = self.convbert(flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, inputs['head_mask'], flat_inputs_embeds, inputs['output_attentions'], inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
logits = self.sequence_summary(outputs[0], training=inputs['training'])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, ((- 1), num_choices))
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], reshaped_logits))
if (not inputs['return_dict']):
output = ((reshaped_logits,) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFMultipleChoiceModelOutput(loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
outputs = self.convbert(inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs['training'])
logits = self.classifier(sequence_output)
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], logits))
if (not inputs['return_dict']):
output = ((logits,) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | 6,359,083,734,458,689,000 | labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs):
'\n labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs)
outputs = self.convbert(inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs['training'])
logits = self.classifier(sequence_output)
loss = (None if (inputs['labels'] is None) else self.compute_loss(inputs['labels'], logits))
if (not inputs['return_dict']):
output = ((logits,) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFTokenClassifierOutput(loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, **kwargs):
'\n start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, start_positions=start_positions, end_positions=end_positions, training=training, kwargs_call=kwargs)
outputs = self.convbert(inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
(start_logits, end_logits) = tf.split(logits, 2, axis=(- 1))
start_logits = tf.squeeze(start_logits, axis=(- 1))
end_logits = tf.squeeze(end_logits, axis=(- 1))
loss = None
if ((inputs['start_positions'] is not None) and (inputs['end_positions'] is not None)):
labels = {'start_position': inputs['start_positions']}
labels['end_position'] = inputs['end_positions']
loss = self.compute_loss(labels, (start_logits, end_logits))
if (not inputs['return_dict']):
output = ((start_logits, end_logits) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) | -8,155,518,814,180,526,000 | start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss. | src/transformers/models/convbert/modeling_tf_convbert.py | call | AK391/transformers | python | @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format('batch_size, sequence_length'))
@add_code_sample_docstrings(processor_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def call(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, **kwargs):
'\n start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the start of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):\n Labels for position (index) of the end of the labelled span for computing the token classification loss.\n Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence\n are not taken into account for computing the loss.\n '
inputs = input_processing(func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, start_positions=start_positions, end_positions=end_positions, training=training, kwargs_call=kwargs)
outputs = self.convbert(inputs['input_ids'], attention_mask=inputs['attention_mask'], token_type_ids=inputs['token_type_ids'], position_ids=inputs['position_ids'], head_mask=inputs['head_mask'], inputs_embeds=inputs['inputs_embeds'], output_attentions=inputs['output_attentions'], output_hidden_states=inputs['output_hidden_states'], return_dict=inputs['return_dict'], training=inputs['training'])
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
(start_logits, end_logits) = tf.split(logits, 2, axis=(- 1))
start_logits = tf.squeeze(start_logits, axis=(- 1))
end_logits = tf.squeeze(end_logits, axis=(- 1))
loss = None
if ((inputs['start_positions'] is not None) and (inputs['end_positions'] is not None)):
labels = {'start_position': inputs['start_positions']}
labels['end_position'] = inputs['end_positions']
loss = self.compute_loss(labels, (start_logits, end_logits))
if (not inputs['return_dict']):
output = ((start_logits, end_logits) + outputs[1:])
return (((loss,) + output) if (loss is not None) else output)
return TFQuestionAnsweringModelOutput(loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) |
def pLK0(self):
'\n Default LK Params.\n '
return dict(winSize=(12, 6), maxLevel=4, crit=((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT), 100, 0.03), flags=0, minEigThreshold=0.001) | 2,156,167,278,745,075,700 | Default LK Params. | core/track.py | pLK0 | yycho0108/MoRoL | python | def pLK0(self):
'\n \n '
return dict(winSize=(12, 6), maxLevel=4, crit=((cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT), 100, 0.03), flags=0, minEigThreshold=0.001) |
def __call__(self, img1, img2, pt1, pt2=None, thresh=2.0, return_msk=False):
'\n Arguments:\n img1(np.ndarray) : previous image. (color/mono) (HxWx?)\n img2(np.ndarray) : current image (color/mono) (HxWx?)\n pt1(np.ndarray) : previous points. (Mx2)\n pt2(np.ndarray) : [Optional] current points estimate (Mx2)\n thresh(float) : Flow Back-projection Error threshold\n\n Returns:\n pt2(np.ndarray) : current points. (Mx2)\n idx(np.ndarray) : valid tracked indices from pt1 & pt2.\n '
if (pt1.size <= 0):
pt2 = np.empty([0, 2], dtype=np.float32)
if return_msk:
msk = np.empty([0], dtype=np.bool)
return (pt2, msk)
idx = np.empty([0], dtype=np.int32)
return (pt2, idx)
(h, w) = np.shape(img2)[:2]
if ((np.ndim(img1) == 2) or (img1.shape[2] == 1)):
img1_gray = img1
img2_gray = img2
else:
if (self.tmp_['img1g'] is not None):
cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY, self.tmp_['img1g'])
img1_gray = self.tmp_['img1g']
else:
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
self.tmp_['img1g'] = np.empty_like(img1_gray)
if (self.tmp_['img2g'] is not None):
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY, self.tmp_['img2g'])
img2_gray = self.tmp_['img2g']
else:
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
self.tmp_['img2g'] = np.empty_like(img2_gray)
if (pt2 is not None):
self.lk_.setFlags((self.lk_.getFlags() | cv2.OPTFLOW_USE_INITIAL_FLOW))
(pt2, st, _) = self.lk_.calc(img1_gray, img2_gray, pt1, pt2)
else:
(pt2, st, _) = self.lk_.calc(img1_gray, img2_gray, pt1, None)
st_fw = st[:, 0].astype(np.bool)
self.lk_.setFlags((self.lk_.getFlags() & (~ cv2.OPTFLOW_USE_INITIAL_FLOW)))
(pt1_r, st, _) = self.lk_.calc(img2_gray, img1_gray, pt2, None)
st_bw = st[:, 0].astype(np.bool)
err = np.linalg.norm((pt1 - pt1_r), axis=(- 1))
msk = np.logical_and.reduce([(err < thresh), (0 <= pt2[:, 0]), (0 <= pt2[:, 1]), (pt2[:, 0] < w), (pt2[:, 1] < h), st_fw, st_bw])
if return_msk:
return (pt2, msk)
else:
idx = np.where(msk)[0]
return (pt2, idx) | 8,712,494,715,450,819,000 | Arguments:
img1(np.ndarray) : previous image. (color/mono) (HxWx?)
img2(np.ndarray) : current image (color/mono) (HxWx?)
pt1(np.ndarray) : previous points. (Mx2)
pt2(np.ndarray) : [Optional] current points estimate (Mx2)
thresh(float) : Flow Back-projection Error threshold
Returns:
pt2(np.ndarray) : current points. (Mx2)
idx(np.ndarray) : valid tracked indices from pt1 & pt2. | core/track.py | __call__ | yycho0108/MoRoL | python | def __call__(self, img1, img2, pt1, pt2=None, thresh=2.0, return_msk=False):
'\n Arguments:\n img1(np.ndarray) : previous image. (color/mono) (HxWx?)\n img2(np.ndarray) : current image (color/mono) (HxWx?)\n pt1(np.ndarray) : previous points. (Mx2)\n pt2(np.ndarray) : [Optional] current points estimate (Mx2)\n thresh(float) : Flow Back-projection Error threshold\n\n Returns:\n pt2(np.ndarray) : current points. (Mx2)\n idx(np.ndarray) : valid tracked indices from pt1 & pt2.\n '
if (pt1.size <= 0):
pt2 = np.empty([0, 2], dtype=np.float32)
if return_msk:
msk = np.empty([0], dtype=np.bool)
return (pt2, msk)
idx = np.empty([0], dtype=np.int32)
return (pt2, idx)
(h, w) = np.shape(img2)[:2]
if ((np.ndim(img1) == 2) or (img1.shape[2] == 1)):
img1_gray = img1
img2_gray = img2
else:
if (self.tmp_['img1g'] is not None):
cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY, self.tmp_['img1g'])
img1_gray = self.tmp_['img1g']
else:
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
self.tmp_['img1g'] = np.empty_like(img1_gray)
if (self.tmp_['img2g'] is not None):
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY, self.tmp_['img2g'])
img2_gray = self.tmp_['img2g']
else:
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
self.tmp_['img2g'] = np.empty_like(img2_gray)
if (pt2 is not None):
self.lk_.setFlags((self.lk_.getFlags() | cv2.OPTFLOW_USE_INITIAL_FLOW))
(pt2, st, _) = self.lk_.calc(img1_gray, img2_gray, pt1, pt2)
else:
(pt2, st, _) = self.lk_.calc(img1_gray, img2_gray, pt1, None)
st_fw = st[:, 0].astype(np.bool)
self.lk_.setFlags((self.lk_.getFlags() & (~ cv2.OPTFLOW_USE_INITIAL_FLOW)))
(pt1_r, st, _) = self.lk_.calc(img2_gray, img1_gray, pt2, None)
st_bw = st[:, 0].astype(np.bool)
err = np.linalg.norm((pt1 - pt1_r), axis=(- 1))
msk = np.logical_and.reduce([(err < thresh), (0 <= pt2[:, 0]), (0 <= pt2[:, 1]), (pt2[:, 0] < w), (pt2[:, 1] < h), st_fw, st_bw])
if return_msk:
return (pt2, msk)
else:
idx = np.where(msk)[0]
return (pt2, idx) |
def read_fasta(filename):
'Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.\n first element in tuple is header and second the sequence.\n \n Key Arguments:\n filename -- fasta file.\n '
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fasta_file:
for line in fasta_file:
line = line.replace('\n', '')
if ('>' in line):
if (tmp_seq != None):
seqs_list.append((hd, tmp_seq))
tmp_seq = ''
hd = line.replace('>', '')
else:
tmp_seq += line
seqs_list.append((hd, tmp_seq))
try:
assert (len(seqs_list) > 0)
except AssertionError:
print('The selected file is not a Fasta file.')
else:
return seqs_list | -592,529,539,078,873,700 | Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.
first element in tuple is header and second the sequence.
Key Arguments:
filename -- fasta file. | pridcon/utils.py | read_fasta | Mirindi95/PrIDcon | python | def read_fasta(filename):
'Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.\n first element in tuple is header and second the sequence.\n \n Key Arguments:\n filename -- fasta file.\n '
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fasta_file:
for line in fasta_file:
line = line.replace('\n', )
if ('>' in line):
if (tmp_seq != None):
seqs_list.append((hd, tmp_seq))
tmp_seq =
hd = line.replace('>', )
else:
tmp_seq += line
seqs_list.append((hd, tmp_seq))
try:
assert (len(seqs_list) > 0)
except AssertionError:
print('The selected file is not a Fasta file.')
else:
return seqs_list |
def write_fasta(outfile, seq_dict):
'Writes fasta with dictionary where keys are headers and values sequences.\n \n Key Arguments:\n outfile.\n '
step = 70
with open(outfile, 'w') as file:
for (header, sequence) in seq_dict.items():
sequence_list = [sequence[(i - step):i] for i in range(step, (len(sequence) + 1), step)]
last = sequence[(step * (len(sequence) // step)):]
if (last != ''):
sequence_list.append(last)
sequence = '\n'.join(sequence_list)
file.write((((('>' + header) + '\n') + sequence) + '\n')) | -5,630,814,507,906,850,000 | Writes fasta with dictionary where keys are headers and values sequences.
Key Arguments:
outfile. | pridcon/utils.py | write_fasta | Mirindi95/PrIDcon | python | def write_fasta(outfile, seq_dict):
'Writes fasta with dictionary where keys are headers and values sequences.\n \n Key Arguments:\n outfile.\n '
step = 70
with open(outfile, 'w') as file:
for (header, sequence) in seq_dict.items():
sequence_list = [sequence[(i - step):i] for i in range(step, (len(sequence) + 1), step)]
last = sequence[(step * (len(sequence) // step)):]
if (last != ):
sequence_list.append(last)
sequence = '\n'.join(sequence_list)
file.write((((('>' + header) + '\n') + sequence) + '\n')) |
def reads_generator(fasta_file, read_length, k):
'This function simulates the reads generation from a fasta file with a coverage not less than 50.\n It will return a list of tuples. First element in tuple is read ID and second the sequence.\n \n Key Arguments:\n fasta_file -- fasta file.\n read_length -- size of reads.\n '
reads_list = []
overlap = (k - 1)
(input_header, input_seq) = read_fasta(fasta_file)[0]
n = len(input_seq)
for i in range(0, (n - overlap), (read_length - overlap)):
read_seq = input_seq[i:(i + read_length)]
reads_list.append(read_seq)
return [('{}_{}'.format(input_header, i), read) for (i, read) in enumerate(reads_list)] | 1,295,099,355,066,453,500 | This function simulates the reads generation from a fasta file with a coverage not less than 50.
It will return a list of tuples. First element in tuple is read ID and second the sequence.
Key Arguments:
fasta_file -- fasta file.
read_length -- size of reads. | pridcon/utils.py | reads_generator | Mirindi95/PrIDcon | python | def reads_generator(fasta_file, read_length, k):
'This function simulates the reads generation from a fasta file with a coverage not less than 50.\n It will return a list of tuples. First element in tuple is read ID and second the sequence.\n \n Key Arguments:\n fasta_file -- fasta file.\n read_length -- size of reads.\n '
reads_list = []
overlap = (k - 1)
(input_header, input_seq) = read_fasta(fasta_file)[0]
n = len(input_seq)
for i in range(0, (n - overlap), (read_length - overlap)):
read_seq = input_seq[i:(i + read_length)]
reads_list.append(read_seq)
return [('{}_{}'.format(input_header, i), read) for (i, read) in enumerate(reads_list)] |
def write_fastq(reads_list, filename):
'This function created a FASTQ file from a list of read generated by the reads_generator function.\n Key Arguments:\n reads_list -- list of reads generated with reads_generator.\n filename -- name of output file WITH EXTENSION.\n '
with open(filename, 'w') as fastq_file:
for (read_id, read) in reads_list:
fastq_file.write('@{}\n'.format(read_id))
fastq_file.write((read + '\n'))
fastq_file.write('+\n')
fastq_file.write((('I' * len(read)) + '\n')) | -2,214,286,735,490,284,500 | This function created a FASTQ file from a list of read generated by the reads_generator function.
Key Arguments:
reads_list -- list of reads generated with reads_generator.
filename -- name of output file WITH EXTENSION. | pridcon/utils.py | write_fastq | Mirindi95/PrIDcon | python | def write_fastq(reads_list, filename):
'This function created a FASTQ file from a list of read generated by the reads_generator function.\n Key Arguments:\n reads_list -- list of reads generated with reads_generator.\n filename -- name of output file WITH EXTENSION.\n '
with open(filename, 'w') as fastq_file:
for (read_id, read) in reads_list:
fastq_file.write('@{}\n'.format(read_id))
fastq_file.write((read + '\n'))
fastq_file.write('+\n')
fastq_file.write((('I' * len(read)) + '\n')) |
def read_fastq(filename):
'This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.\n This function does not consider + and score lines.\n \n Key Arguments:\n filename -- name of FASTQ input file.\n '
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if ('@' in line):
reads_dict[line[1:].replace('\n', '')] = next(fastq_file).replace('\n', '')
next(fastq_file)
next(fastq_file)
return reads_dict | 1,161,349,585,305,516,800 | This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file. | pridcon/utils.py | read_fastq | Mirindi95/PrIDcon | python | def read_fastq(filename):
'This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.\n This function does not consider + and score lines.\n \n Key Arguments:\n filename -- name of FASTQ input file.\n '
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if ('@' in line):
reads_dict[line[1:].replace('\n', )] = next(fastq_file).replace('\n', )
next(fastq_file)
next(fastq_file)
return reads_dict |
def create_user(self, email, password=None, **extra_fields):
'Creates and saves a new user'
if (not email):
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user | -4,414,797,265,921,968,600 | Creates and saves a new user | app/core/models.py | create_user | StoikovOleh/recipe-app-api | python | def create_user(self, email, password=None, **extra_fields):
if (not email):
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user |
def create_superuser(self, email, password):
'Creates and saves a new super user'
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user | 2,904,805,345,650,944,500 | Creates and saves a new super user | app/core/models.py | create_superuser | StoikovOleh/recipe-app-api | python | def create_superuser(self, email, password):
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user |
def toDF(self, schema=None, sampleRatio=None):
"\n Converts current :class:`RDD` into a :class:`DataFrame`\n\n This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``\n\n :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns\n :param samplingRatio: the sample ratio of rows used for inferring\n :return: a DataFrame\n\n >>> rdd.toDF().collect()\n [Row(name=u'Alice', age=1)]\n "
return sparkSession.createDataFrame(self, schema, sampleRatio) | 1,476,514,188,411,419,600 | Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)] | python/pyspark/sql/session.py | toDF | DislabNJU/Spark | python | def toDF(self, schema=None, sampleRatio=None):
"\n Converts current :class:`RDD` into a :class:`DataFrame`\n\n This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``\n\n :param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns\n :param samplingRatio: the sample ratio of rows used for inferring\n :return: a DataFrame\n\n >>> rdd.toDF().collect()\n [Row(name=u'Alice', age=1)]\n "
return sparkSession.createDataFrame(self, schema, sampleRatio) |
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
'Creates a new SparkSession.\n\n >>> from datetime import datetime\n >>> spark = SparkSession(sc)\n >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,\n ... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),\n ... time=datetime(2014, 8, 1, 14, 1, 5))])\n >>> df = allTypes.toDF()\n >>> df.createOrReplaceTempView("allTypes")\n >>> spark.sql(\'select i+1, d+1, not b, list[1], dict["s"], time, row.a \'\n ... \'from allTypes where b and i > 0\').collect()\n [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]\n >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()\n [(1, u\'string\', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]\n '
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if (jsparkSession is None):
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if ((SparkSession._instantiatedSession is None) or (SparkSession._instantiatedSession._sc._jsc is None)):
SparkSession._instantiatedSession = self | -449,384,611,703,641,860 | Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])] | python/pyspark/sql/session.py | __init__ | DislabNJU/Spark | python | @ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
'Creates a new SparkSession.\n\n >>> from datetime import datetime\n >>> spark = SparkSession(sc)\n >>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,\n ... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),\n ... time=datetime(2014, 8, 1, 14, 1, 5))])\n >>> df = allTypes.toDF()\n >>> df.createOrReplaceTempView("allTypes")\n >>> spark.sql(\'select i+1, d+1, not b, list[1], dict["s"], time, row.a \'\n ... \'from allTypes where b and i > 0\').collect()\n [Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]\n >>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()\n [(1, u\'string\', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]\n '
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if (jsparkSession is None):
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if ((SparkSession._instantiatedSession is None) or (SparkSession._instantiatedSession._sc._jsc is None)):
SparkSession._instantiatedSession = self |
@since(2.0)
def newSession(self):
'\n Returns a new SparkSession as new session, that has separate SQLConf,\n registered temporary views and UDFs, but shared SparkContext and\n table cache.\n '
return self.__class__(self._sc, self._jsparkSession.newSession()) | 4,860,885,721,390,664,000 | Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache. | python/pyspark/sql/session.py | newSession | DislabNJU/Spark | python | @since(2.0)
def newSession(self):
'\n Returns a new SparkSession as new session, that has separate SQLConf,\n registered temporary views and UDFs, but shared SparkContext and\n table cache.\n '
return self.__class__(self._sc, self._jsparkSession.newSession()) |
@property
@since(2.0)
def sparkContext(self):
'Returns the underlying :class:`SparkContext`.'
return self._sc | 3,306,938,129,485,477,000 | Returns the underlying :class:`SparkContext`. | python/pyspark/sql/session.py | sparkContext | DislabNJU/Spark | python | @property
@since(2.0)
def sparkContext(self):
return self._sc |
@property
@since(2.0)
def version(self):
'The version of Spark on which this application is running.'
return self._jsparkSession.version() | 838,769,964,761,334,300 | The version of Spark on which this application is running. | python/pyspark/sql/session.py | version | DislabNJU/Spark | python | @property
@since(2.0)
def version(self):
return self._jsparkSession.version() |
@property
@since(2.0)
def conf(self):
'Runtime configuration interface for Spark.\n\n This is the interface through which the user can get and set all Spark and Hadoop\n configurations that are relevant to Spark SQL. When getting the value of a config,\n this defaults to the value set in the underlying :class:`SparkContext`, if any.\n '
if (not hasattr(self, '_conf')):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf | -7,057,829,792,864,211,000 | Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any. | python/pyspark/sql/session.py | conf | DislabNJU/Spark | python | @property
@since(2.0)
def conf(self):
'Runtime configuration interface for Spark.\n\n This is the interface through which the user can get and set all Spark and Hadoop\n configurations that are relevant to Spark SQL. When getting the value of a config,\n this defaults to the value set in the underlying :class:`SparkContext`, if any.\n '
if (not hasattr(self, '_conf')):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf |
@property
@since(2.0)
def catalog(self):
'Interface through which the user may create, drop, alter or query underlying\n databases, tables, functions etc.\n '
if (not hasattr(self, '_catalog')):
self._catalog = Catalog(self)
return self._catalog | 711,370,211,427,092,000 | Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc. | python/pyspark/sql/session.py | catalog | DislabNJU/Spark | python | @property
@since(2.0)
def catalog(self):
'Interface through which the user may create, drop, alter or query underlying\n databases, tables, functions etc.\n '
if (not hasattr(self, '_catalog')):
self._catalog = Catalog(self)
return self._catalog |
@property
@since(2.0)
def udf(self):
'Returns a :class:`UDFRegistration` for UDF registration.\n\n :return: :class:`UDFRegistration`\n '
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped) | 3,305,879,536,619,469,300 | Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration` | python/pyspark/sql/session.py | udf | DislabNJU/Spark | python | @property
@since(2.0)
def udf(self):
'Returns a :class:`UDFRegistration` for UDF registration.\n\n :return: :class:`UDFRegistration`\n '
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped) |
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
'\n Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named\n ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with\n step value ``step``.\n\n :param start: the start value\n :param end: the end value (exclusive)\n :param step: the incremental step (default: 1)\n :param numPartitions: the number of partitions of the DataFrame\n :return: :class:`DataFrame`\n\n >>> spark.range(1, 7, 2).collect()\n [Row(id=1), Row(id=3), Row(id=5)]\n\n If only one argument is specified, it will be used as the end value.\n\n >>> spark.range(3).collect()\n [Row(id=0), Row(id=1), Row(id=2)]\n '
if (numPartitions is None):
numPartitions = self._sc.defaultParallelism
if (end is None):
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped) | 3,370,571,786,270,893,600 | Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)] | python/pyspark/sql/session.py | range | DislabNJU/Spark | python | @since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
'\n Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named\n ``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with\n step value ``step``.\n\n :param start: the start value\n :param end: the end value (exclusive)\n :param step: the incremental step (default: 1)\n :param numPartitions: the number of partitions of the DataFrame\n :return: :class:`DataFrame`\n\n >>> spark.range(1, 7, 2).collect()\n [Row(id=1), Row(id=3), Row(id=5)]\n\n If only one argument is specified, it will be used as the end value.\n\n >>> spark.range(3).collect()\n [Row(id=0), Row(id=1), Row(id=2)]\n '
if (numPartitions is None):
numPartitions = self._sc.defaultParallelism
if (end is None):
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped) |
def _inferSchemaFromList(self, data):
'\n Infer schema from list of Row or tuple.\n\n :param data: list of Row or tuple\n :return: :class:`pyspark.sql.types.StructType`\n '
if (not data):
raise ValueError('can not infer schema from empty dataset')
first = data[0]
if (type(first) is dict):
warnings.warn('inferring schema from dict is deprecated,please use pyspark.sql.Row instead')
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError('Some of types cannot be determined after inferring')
return schema | 800,975,137,581,594,200 | Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType` | python/pyspark/sql/session.py | _inferSchemaFromList | DislabNJU/Spark | python | def _inferSchemaFromList(self, data):
'\n Infer schema from list of Row or tuple.\n\n :param data: list of Row or tuple\n :return: :class:`pyspark.sql.types.StructType`\n '
if (not data):
raise ValueError('can not infer schema from empty dataset')
first = data[0]
if (type(first) is dict):
warnings.warn('inferring schema from dict is deprecated,please use pyspark.sql.Row instead')
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError('Some of types cannot be determined after inferring')
return schema |
def _inferSchema(self, rdd, samplingRatio=None):
'\n Infer schema from an RDD of Row or tuple.\n\n :param rdd: an RDD of Row or tuple\n :param samplingRatio: sampling ratio, or no sampling (default)\n :return: :class:`pyspark.sql.types.StructType`\n '
first = rdd.first()
if (not first):
raise ValueError('The first row in RDD is empty, can not infer schema')
if (type(first) is dict):
warnings.warn('Using RDD of dict to inferSchema is deprecated. Use pyspark.sql.Row instead')
if (samplingRatio is None):
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if (not _has_nulltype(schema)):
break
else:
raise ValueError('Some of types cannot be determined by the first 100 rows, please try again with sampling')
else:
if (samplingRatio < 0.99):
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema | -2,158,780,547,185,956,600 | Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType` | python/pyspark/sql/session.py | _inferSchema | DislabNJU/Spark | python | def _inferSchema(self, rdd, samplingRatio=None):
'\n Infer schema from an RDD of Row or tuple.\n\n :param rdd: an RDD of Row or tuple\n :param samplingRatio: sampling ratio, or no sampling (default)\n :return: :class:`pyspark.sql.types.StructType`\n '
first = rdd.first()
if (not first):
raise ValueError('The first row in RDD is empty, can not infer schema')
if (type(first) is dict):
warnings.warn('Using RDD of dict to inferSchema is deprecated. Use pyspark.sql.Row instead')
if (samplingRatio is None):
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if (not _has_nulltype(schema)):
break
else:
raise ValueError('Some of types cannot be determined by the first 100 rows, please try again with sampling')
else:
if (samplingRatio < 0.99):
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema |
def _createFromRDD(self, rdd, schema, samplingRatio):
'\n Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.\n '
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for (i, name) in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif (not isinstance(schema, StructType)):
raise TypeError(('schema should be StructType or list or None, but got: %s' % schema))
rdd = rdd.map(schema.toInternal)
return (rdd, schema) | -1,601,657,814,421,343,200 | Create an RDD for DataFrame from an existing RDD, returns the RDD and schema. | python/pyspark/sql/session.py | _createFromRDD | DislabNJU/Spark | python | def _createFromRDD(self, rdd, schema, samplingRatio):
'\n \n '
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for (i, name) in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif (not isinstance(schema, StructType)):
raise TypeError(('schema should be StructType or list or None, but got: %s' % schema))
rdd = rdd.map(schema.toInternal)
return (rdd, schema) |
def _createFromLocal(self, data, schema):
'\n Create an RDD for DataFrame from a list or pandas.DataFrame, returns\n the RDD and schema.\n '
if (not isinstance(data, list)):
data = list(data)
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for (i, name) in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif (not isinstance(schema, StructType)):
raise TypeError(('schema should be StructType or list or None, but got: %s' % schema))
data = [schema.toInternal(row) for row in data]
return (self._sc.parallelize(data), schema) | -773,945,324,360,580,500 | Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema. | python/pyspark/sql/session.py | _createFromLocal | DislabNJU/Spark | python | def _createFromLocal(self, data, schema):
'\n Create an RDD for DataFrame from a list or pandas.DataFrame, returns\n the RDD and schema.\n '
if (not isinstance(data, list)):
data = list(data)
if ((schema is None) or isinstance(schema, (list, tuple))):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for (i, name) in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif (not isinstance(schema, StructType)):
raise TypeError(('schema should be StructType or list or None, but got: %s' % schema))
data = [schema.toInternal(row) for row in data]
return (self._sc.parallelize(data), schema) |
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
'\n Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.\n\n When ``schema`` is a list of column names, the type of each column\n will be inferred from ``data``.\n\n When ``schema`` is ``None``, it will try to infer the schema (column names and types)\n from ``data``, which should be an RDD of :class:`Row`,\n or :class:`namedtuple`, or :class:`dict`.\n\n When ``schema`` is :class:`pyspark.sql.types.DataType` or\n :class:`pyspark.sql.types.StringType`, it must match the\n real data, or an exception will be thrown at runtime. If the given schema is not\n :class:`pyspark.sql.types.StructType`, it will be wrapped into a\n :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",\n each record will also be wrapped into a tuple, which can be converted to row later.\n\n If schema inference is needed, ``samplingRatio`` is used to determined the ratio of\n rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.\n\n :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,\n etc.), or :class:`list`, or :class:`pandas.DataFrame`.\n :param schema: a :class:`pyspark.sql.types.DataType` or a\n :class:`pyspark.sql.types.StringType` or a list of\n column names, default is ``None``. The data type string format equals to\n :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can\n omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use\n ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use\n ``int`` as a short name for ``IntegerType``.\n :param samplingRatio: the sample ratio of rows used for inferring\n :param verifySchema: verify data types of every row against schema.\n :return: :class:`DataFrame`\n\n .. versionchanged:: 2.0.1\n Added verifySchema.\n\n >>> l = [(\'Alice\', 1)]\n >>> spark.createDataFrame(l).collect()\n [Row(_1=u\'Alice\', _2=1)]\n >>> spark.createDataFrame(l, [\'name\', \'age\']).collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> d = [{\'name\': \'Alice\', \'age\': 1}]\n >>> spark.createDataFrame(d).collect()\n [Row(age=1, name=u\'Alice\')]\n\n >>> rdd = sc.parallelize(l)\n >>> spark.createDataFrame(rdd).collect()\n [Row(_1=u\'Alice\', _2=1)]\n >>> df = spark.createDataFrame(rdd, [\'name\', \'age\'])\n >>> df.collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> from pyspark.sql import Row\n >>> Person = Row(\'name\', \'age\')\n >>> person = rdd.map(lambda r: Person(*r))\n >>> df2 = spark.createDataFrame(person)\n >>> df2.collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> from pyspark.sql.types import *\n >>> schema = StructType([\n ... StructField("name", StringType(), True),\n ... StructField("age", IntegerType(), True)])\n >>> df3 = spark.createDataFrame(rdd, schema)\n >>> df3.collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP\n [Row(name=u\'Alice\', age=1)]\n >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP\n [Row(0=1, 1=2)]\n\n >>> spark.createDataFrame(rdd, "a: string, b: int").collect()\n [Row(a=u\'Alice\', b=1)]\n >>> rdd = rdd.map(lambda row: row[1])\n >>> spark.createDataFrame(rdd, "int").collect()\n [Row(value=1)]\n >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n Py4JJavaError: ...\n '
if isinstance(data, DataFrame):
raise TypeError('data is already a DataFrame')
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if (has_pandas and isinstance(data, pandas.DataFrame)):
if (schema is None):
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = (_verify_type if verifySchema else (lambda _, t: True))
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add('value', schema)
def prepare(obj):
verify_func(obj, dataType)
return (obj,)
else:
if isinstance(schema, list):
schema = [(x.encode('utf-8') if (not isinstance(x, str)) else x) for x in schema]
prepare = (lambda obj: obj)
if isinstance(data, RDD):
(rdd, schema) = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
(rdd, schema) = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df | -4,672,668,649,996,320,000 | Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ... | python/pyspark/sql/session.py | createDataFrame | DislabNJU/Spark | python | @since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
'\n Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.\n\n When ``schema`` is a list of column names, the type of each column\n will be inferred from ``data``.\n\n When ``schema`` is ``None``, it will try to infer the schema (column names and types)\n from ``data``, which should be an RDD of :class:`Row`,\n or :class:`namedtuple`, or :class:`dict`.\n\n When ``schema`` is :class:`pyspark.sql.types.DataType` or\n :class:`pyspark.sql.types.StringType`, it must match the\n real data, or an exception will be thrown at runtime. If the given schema is not\n :class:`pyspark.sql.types.StructType`, it will be wrapped into a\n :class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",\n each record will also be wrapped into a tuple, which can be converted to row later.\n\n If schema inference is needed, ``samplingRatio`` is used to determined the ratio of\n rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.\n\n :param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,\n etc.), or :class:`list`, or :class:`pandas.DataFrame`.\n :param schema: a :class:`pyspark.sql.types.DataType` or a\n :class:`pyspark.sql.types.StringType` or a list of\n column names, default is ``None``. The data type string format equals to\n :class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can\n omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use\n ``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use\n ``int`` as a short name for ``IntegerType``.\n :param samplingRatio: the sample ratio of rows used for inferring\n :param verifySchema: verify data types of every row against schema.\n :return: :class:`DataFrame`\n\n .. versionchanged:: 2.0.1\n Added verifySchema.\n\n >>> l = [(\'Alice\', 1)]\n >>> spark.createDataFrame(l).collect()\n [Row(_1=u\'Alice\', _2=1)]\n >>> spark.createDataFrame(l, [\'name\', \'age\']).collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> d = [{\'name\': \'Alice\', \'age\': 1}]\n >>> spark.createDataFrame(d).collect()\n [Row(age=1, name=u\'Alice\')]\n\n >>> rdd = sc.parallelize(l)\n >>> spark.createDataFrame(rdd).collect()\n [Row(_1=u\'Alice\', _2=1)]\n >>> df = spark.createDataFrame(rdd, [\'name\', \'age\'])\n >>> df.collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> from pyspark.sql import Row\n >>> Person = Row(\'name\', \'age\')\n >>> person = rdd.map(lambda r: Person(*r))\n >>> df2 = spark.createDataFrame(person)\n >>> df2.collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> from pyspark.sql.types import *\n >>> schema = StructType([\n ... StructField("name", StringType(), True),\n ... StructField("age", IntegerType(), True)])\n >>> df3 = spark.createDataFrame(rdd, schema)\n >>> df3.collect()\n [Row(name=u\'Alice\', age=1)]\n\n >>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP\n [Row(name=u\'Alice\', age=1)]\n >>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP\n [Row(0=1, 1=2)]\n\n >>> spark.createDataFrame(rdd, "a: string, b: int").collect()\n [Row(a=u\'Alice\', b=1)]\n >>> rdd = rdd.map(lambda row: row[1])\n >>> spark.createDataFrame(rdd, "int").collect()\n [Row(value=1)]\n >>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL\n Traceback (most recent call last):\n ...\n Py4JJavaError: ...\n '
if isinstance(data, DataFrame):
raise TypeError('data is already a DataFrame')
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if (has_pandas and isinstance(data, pandas.DataFrame)):
if (schema is None):
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = (_verify_type if verifySchema else (lambda _, t: True))
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add('value', schema)
def prepare(obj):
verify_func(obj, dataType)
return (obj,)
else:
if isinstance(schema, list):
schema = [(x.encode('utf-8') if (not isinstance(x, str)) else x) for x in schema]
prepare = (lambda obj: obj)
if isinstance(data, RDD):
(rdd, schema) = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
(rdd, schema) = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df |
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
'Returns a :class:`DataFrame` representing the result of the given query.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")\n >>> df2.collect()\n [Row(f1=1, f2=u\'row1\'), Row(f1=2, f2=u\'row2\'), Row(f1=3, f2=u\'row3\')]\n '
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped) | 6,139,502,417,837,409,000 | Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')] | python/pyspark/sql/session.py | sql | DislabNJU/Spark | python | @ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
'Returns a :class:`DataFrame` representing the result of the given query.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")\n >>> df2.collect()\n [Row(f1=1, f2=u\'row1\'), Row(f1=2, f2=u\'row2\'), Row(f1=3, f2=u\'row3\')]\n '
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped) |
@since(2.0)
def table(self, tableName):
'Returns the specified table as a :class:`DataFrame`.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.table("table1")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n '
return DataFrame(self._jsparkSession.table(tableName), self._wrapped) | -1,685,094,057,636,237,000 | Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True | python/pyspark/sql/session.py | table | DislabNJU/Spark | python | @since(2.0)
def table(self, tableName):
'Returns the specified table as a :class:`DataFrame`.\n\n :return: :class:`DataFrame`\n\n >>> df.createOrReplaceTempView("table1")\n >>> df2 = spark.table("table1")\n >>> sorted(df.collect()) == sorted(df2.collect())\n True\n '
return DataFrame(self._jsparkSession.table(tableName), self._wrapped) |
@property
@since(2.0)
def read(self):
'\n Returns a :class:`DataFrameReader` that can be used to read data\n in as a :class:`DataFrame`.\n\n :return: :class:`DataFrameReader`\n '
return DataFrameReader(self._wrapped) | 8,627,199,859,067,963,000 | Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader` | python/pyspark/sql/session.py | read | DislabNJU/Spark | python | @property
@since(2.0)
def read(self):
'\n Returns a :class:`DataFrameReader` that can be used to read data\n in as a :class:`DataFrame`.\n\n :return: :class:`DataFrameReader`\n '
return DataFrameReader(self._wrapped) |
@property
@since(2.0)
def readStream(self):
'\n Returns a :class:`DataStreamReader` that can be used to read data streams\n as a streaming :class:`DataFrame`.\n\n .. note:: Experimental.\n\n :return: :class:`DataStreamReader`\n '
return DataStreamReader(self._wrapped) | 7,571,571,327,525,079,000 | Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader` | python/pyspark/sql/session.py | readStream | DislabNJU/Spark | python | @property
@since(2.0)
def readStream(self):
'\n Returns a :class:`DataStreamReader` that can be used to read data streams\n as a streaming :class:`DataFrame`.\n\n .. note:: Experimental.\n\n :return: :class:`DataStreamReader`\n '
return DataStreamReader(self._wrapped) |
@property
@since(2.0)
def streams(self):
'Returns a :class:`StreamingQueryManager` that allows managing all the\n :class:`StreamingQuery` StreamingQueries active on `this` context.\n\n .. note:: Experimental.\n\n :return: :class:`StreamingQueryManager`\n '
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams()) | -486,802,360,053,188,400 | Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager` | python/pyspark/sql/session.py | streams | DislabNJU/Spark | python | @property
@since(2.0)
def streams(self):
'Returns a :class:`StreamingQueryManager` that allows managing all the\n :class:`StreamingQuery` StreamingQueries active on `this` context.\n\n .. note:: Experimental.\n\n :return: :class:`StreamingQueryManager`\n '
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams()) |
@since(2.0)
def stop(self):
'Stop the underlying :class:`SparkContext`.\n '
self._sc.stop()
SparkSession._instantiatedSession = None | -528,874,284,068,885,300 | Stop the underlying :class:`SparkContext`. | python/pyspark/sql/session.py | stop | DislabNJU/Spark | python | @since(2.0)
def stop(self):
'\n '
self._sc.stop()
SparkSession._instantiatedSession = None |
@since(2.0)
def __enter__(self):
"\n Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.\n "
return self | 6,458,408,526,494,362,000 | Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax. | python/pyspark/sql/session.py | __enter__ | DislabNJU/Spark | python | @since(2.0)
def __enter__(self):
"\n \n "
return self |
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"\n Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.\n\n Specifically stop the SparkSession on exit of the with block.\n "
self.stop() | -7,856,880,423,868,570,000 | Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block. | python/pyspark/sql/session.py | __exit__ | DislabNJU/Spark | python | @since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"\n Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.\n\n Specifically stop the SparkSession on exit of the with block.\n "
self.stop() |
@since(2.0)
def config(self, key=None, value=None, conf=None):
'Sets a config option. Options set using this method are automatically propagated to\n both :class:`SparkConf` and :class:`SparkSession`\'s own configuration.\n\n For an existing SparkConf, use `conf` parameter.\n\n >>> from pyspark.conf import SparkConf\n >>> SparkSession.builder.config(conf=SparkConf())\n <pyspark.sql.session...\n\n For a (key, value) pair, you can omit parameter names.\n\n >>> SparkSession.builder.config("spark.some.config.option", "some-value")\n <pyspark.sql.session...\n\n :param key: a key name string for configuration property\n :param value: a value for configuration property\n :param conf: an instance of :class:`SparkConf`\n '
with self._lock:
if (conf is None):
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self | 6,164,548,905,806,063,000 | Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf` | python/pyspark/sql/session.py | config | DislabNJU/Spark | python | @since(2.0)
def config(self, key=None, value=None, conf=None):
'Sets a config option. Options set using this method are automatically propagated to\n both :class:`SparkConf` and :class:`SparkSession`\'s own configuration.\n\n For an existing SparkConf, use `conf` parameter.\n\n >>> from pyspark.conf import SparkConf\n >>> SparkSession.builder.config(conf=SparkConf())\n <pyspark.sql.session...\n\n For a (key, value) pair, you can omit parameter names.\n\n >>> SparkSession.builder.config("spark.some.config.option", "some-value")\n <pyspark.sql.session...\n\n :param key: a key name string for configuration property\n :param value: a value for configuration property\n :param conf: an instance of :class:`SparkConf`\n '
with self._lock:
if (conf is None):
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self |
@since(2.0)
def master(self, master):
'Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"\n to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone\n cluster.\n\n :param master: a url for spark master\n '
return self.config('spark.master', master) | 7,944,548,636,390,787,000 | Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master | python/pyspark/sql/session.py | master | DislabNJU/Spark | python | @since(2.0)
def master(self, master):
'Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"\n to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone\n cluster.\n\n :param master: a url for spark master\n '
return self.config('spark.master', master) |
@since(2.0)
def appName(self, name):
'Sets a name for the application, which will be shown in the Spark web UI.\n\n If no application name is set, a randomly generated name will be used.\n\n :param name: an application name\n '
return self.config('spark.app.name', name) | -3,828,958,710,499,067,400 | Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name | python/pyspark/sql/session.py | appName | DislabNJU/Spark | python | @since(2.0)
def appName(self, name):
'Sets a name for the application, which will be shown in the Spark web UI.\n\n If no application name is set, a randomly generated name will be used.\n\n :param name: an application name\n '
return self.config('spark.app.name', name) |
@since(2.0)
def enableHiveSupport(self):
'Enables Hive support, including connectivity to a persistent Hive metastore, support\n for Hive serdes, and Hive user-defined functions.\n '
return self.config('spark.sql.catalogImplementation', 'hive') | -6,293,888,213,969,502,000 | Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions. | python/pyspark/sql/session.py | enableHiveSupport | DislabNJU/Spark | python | @since(2.0)
def enableHiveSupport(self):
'Enables Hive support, including connectivity to a persistent Hive metastore, support\n for Hive serdes, and Hive user-defined functions.\n '
return self.config('spark.sql.catalogImplementation', 'hive') |
@since(2.0)
def getOrCreate(self):
'Gets an existing :class:`SparkSession` or, if there is no existing one, creates a\n new one based on the options set in this builder.\n\n This method first checks whether there is a valid global default SparkSession, and if\n yes, return that one. If no valid global default SparkSession exists, the method\n creates a new SparkSession and assigns the newly created SparkSession as the global\n default.\n\n >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()\n >>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"\n True\n\n In case an existing SparkSession is returned, the config options specified\n in this builder will be applied to the existing SparkSession.\n\n >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()\n >>> s1.conf.get("k1") == s2.conf.get("k1")\n True\n >>> s1.conf.get("k2") == s2.conf.get("k2")\n True\n '
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if ((session is None) or (session._sc._jsc is None)):
sparkConf = SparkConf()
for (key, value) in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
for (key, value) in self._options.items():
sc._conf.set(key, value)
session = SparkSession(sc)
for (key, value) in self._options.items():
session.conf.set(key, value)
for (key, value) in self._options.items():
session.sparkContext._conf.set(key, value)
return session | 4,076,772,902,933,181,400 | Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True | python/pyspark/sql/session.py | getOrCreate | DislabNJU/Spark | python | @since(2.0)
def getOrCreate(self):
'Gets an existing :class:`SparkSession` or, if there is no existing one, creates a\n new one based on the options set in this builder.\n\n This method first checks whether there is a valid global default SparkSession, and if\n yes, return that one. If no valid global default SparkSession exists, the method\n creates a new SparkSession and assigns the newly created SparkSession as the global\n default.\n\n >>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()\n >>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"\n True\n\n In case an existing SparkSession is returned, the config options specified\n in this builder will be applied to the existing SparkSession.\n\n >>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()\n >>> s1.conf.get("k1") == s2.conf.get("k1")\n True\n >>> s1.conf.get("k2") == s2.conf.get("k2")\n True\n '
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if ((session is None) or (session._sc._jsc is None)):
sparkConf = SparkConf()
for (key, value) in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
for (key, value) in self._options.items():
sc._conf.set(key, value)
session = SparkSession(sc)
for (key, value) in self._options.items():
session.conf.set(key, value)
for (key, value) in self._options.items():
session.sparkContext._conf.set(key, value)
return session |
def add_author_to_blog(apps, schema_editor):
'Author is the claimant'
Blog = apps.get_model('lowfat', 'Blog')
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save() | -5,402,210,210,942,466,000 | Author is the claimant | lowfat/migrations/0090_auto_20170307_1518.py | add_author_to_blog | elena-kolomeets/lowfat | python | def add_author_to_blog(apps, schema_editor):
Blog = apps.get_model('lowfat', 'Blog')
for blog in Blog.objects.all():
blog.author = blog.fund.claimant
blog.save() |
def partition(predicate: Callable[([Any], bool)], iterator: Sequence[Any]) -> Tuple[(List[Any], List[Any])]:
'A stable, out-of-place partition.'
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
return (results[1], results[0]) | 8,080,160,133,644,472,000 | A stable, out-of-place partition. | bin/fixup_oslogin_v1_keywords.py | partition | fahmi-aa/tmdb | python | def partition(predicate: Callable[([Any], bool)], iterator: Sequence[Any]) -> Tuple[(List[Any], List[Any])]:
results = ([], [])
for i in iterator:
results[int(predicate(i))].append(i)
return (results[1], results[0]) |
def fix_files(in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=osloginCallTransformer()):
'Duplicate the input dir to the output dir, fixing file method calls.\n\n Preconditions:\n * in_dir is a real directory\n * out_dir is a real, empty directory\n '
pyfile_gen = (pathlib.Path(os.path.join(root, f)) for (root, _, files) in os.walk(in_dir) for f in files if (os.path.splitext(f)[1] == '.py'))
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
tree = cst.parse_module(src)
updated = tree.visit(transformer)
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
with open(updated_path, 'w') as f:
f.write(updated.code) | 3,131,379,621,797,957,600 | Duplicate the input dir to the output dir, fixing file method calls.
Preconditions:
* in_dir is a real directory
* out_dir is a real, empty directory | bin/fixup_oslogin_v1_keywords.py | fix_files | fahmi-aa/tmdb | python | def fix_files(in_dir: pathlib.Path, out_dir: pathlib.Path, *, transformer=osloginCallTransformer()):
'Duplicate the input dir to the output dir, fixing file method calls.\n\n Preconditions:\n * in_dir is a real directory\n * out_dir is a real, empty directory\n '
pyfile_gen = (pathlib.Path(os.path.join(root, f)) for (root, _, files) in os.walk(in_dir) for f in files if (os.path.splitext(f)[1] == '.py'))
for fpath in pyfile_gen:
with open(fpath, 'r') as f:
src = f.read()
tree = cst.parse_module(src)
updated = tree.visit(transformer)
updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
updated_path.parent.mkdir(parents=True, exist_ok=True)
with open(updated_path, 'w') as f:
f.write(updated.code) |
def recalc_path(self, model_inst):
'计算上传路径,允许是function'
try:
uSettings = self.upload_settings
if ('filePathFormat' in self._upload_settings):
uSettings['filePathFormat'] = calc_path(self._upload_settings['filePathFormat'], model_inst)
if ('imagePathFormat' in self._upload_settings):
uSettings['imagePathFormat'] = calc_path(self._upload_settings['imagePathFormat'], model_inst)
if ('scrawlPathFormat' in self._upload_settings):
uSettings['scrawlPathFormat'] = calc_path(self._upload_settings['scrawlPathFormat'], model_inst)
if ('videoPathFormat' in self._upload_settings):
uSettings['videoPathFormat'] = (calc_path(self._upload_settings['videoPathFormat'], model_inst),)
if ('snapscreenPathFormat' in self._upload_settings):
uSettings['snapscreenPathFormat'] = calc_path(self._upload_settings['snapscreenPathFormat'], model_inst)
if ('catcherPathFormat' in self._upload_settings):
uSettings['catcherPathFormat'] = calc_path(self._upload_settings['catcherPathFormat'], model_inst)
if ('imageManagerListPath' in self._upload_settings):
uSettings['imageManagerListPath'] = calc_path(self._upload_settings['imageManagerListPath'], model_inst)
if ('fileManagerListPath' in self._upload_settings):
uSettings['fileManagerListPath'] = calc_path(self._upload_settings['fileManagerListPath'], model_inst)
if (uSettings['imagePathFormat'] != ''):
default_path = uSettings['imagePathFormat']
uSettings['scrawlPathFormat'] = uSettings.get('scrawlPathFormat', default_path)
uSettings['videoPathFormat'] = uSettings.get('videoPathFormat', default_path)
uSettings['snapscreenPathFormat'] = uSettings.get('snapscreenPathFormat', default_path)
uSettings['catcherPathFormat'] = uSettings.get('catcherPathFormat', default_path)
uSettings['imageManagerListPath'] = uSettings.get('imageManagerListPath', default_path)
if (uSettings['filePathFormat'] != ''):
uSettings['fileManagerListPath'] = uSettings.get('fileManagerListPath', uSettings['filePathFormat'])
except:
pass | 215,576,986,300,150,700 | 计算上传路径,允许是function | DjangoUeditor/widgets.py | recalc_path | Jeyrce/ishare | python | def recalc_path(self, model_inst):
try:
uSettings = self.upload_settings
if ('filePathFormat' in self._upload_settings):
uSettings['filePathFormat'] = calc_path(self._upload_settings['filePathFormat'], model_inst)
if ('imagePathFormat' in self._upload_settings):
uSettings['imagePathFormat'] = calc_path(self._upload_settings['imagePathFormat'], model_inst)
if ('scrawlPathFormat' in self._upload_settings):
uSettings['scrawlPathFormat'] = calc_path(self._upload_settings['scrawlPathFormat'], model_inst)
if ('videoPathFormat' in self._upload_settings):
uSettings['videoPathFormat'] = (calc_path(self._upload_settings['videoPathFormat'], model_inst),)
if ('snapscreenPathFormat' in self._upload_settings):
uSettings['snapscreenPathFormat'] = calc_path(self._upload_settings['snapscreenPathFormat'], model_inst)
if ('catcherPathFormat' in self._upload_settings):
uSettings['catcherPathFormat'] = calc_path(self._upload_settings['catcherPathFormat'], model_inst)
if ('imageManagerListPath' in self._upload_settings):
uSettings['imageManagerListPath'] = calc_path(self._upload_settings['imageManagerListPath'], model_inst)
if ('fileManagerListPath' in self._upload_settings):
uSettings['fileManagerListPath'] = calc_path(self._upload_settings['fileManagerListPath'], model_inst)
if (uSettings['imagePathFormat'] != ):
default_path = uSettings['imagePathFormat']
uSettings['scrawlPathFormat'] = uSettings.get('scrawlPathFormat', default_path)
uSettings['videoPathFormat'] = uSettings.get('videoPathFormat', default_path)
uSettings['snapscreenPathFormat'] = uSettings.get('snapscreenPathFormat', default_path)
uSettings['catcherPathFormat'] = uSettings.get('catcherPathFormat', default_path)
uSettings['imageManagerListPath'] = uSettings.get('imageManagerListPath', default_path)
if (uSettings['filePathFormat'] != ):
uSettings['fileManagerListPath'] = uSettings.get('fileManagerListPath', uSettings['filePathFormat'])
except:
pass |
def get_file_terms(file):
'Ruturns a list of text blocks.'
file_terms = file.split('\n\n')
file_terms = [term_text.split('\n') for term_text in file_terms if term_text.startswith('[Term]')]
return file_terms | 1,029,190,457,889,887,900 | Ruturns a list of text blocks. | scripts/proteinInteractionEBI/parse_ebi_test.py | get_file_terms | pradh/data | python | def get_file_terms(file):
file_terms = file.split('\n\n')
file_terms = [term_text.split('\n') for term_text in file_terms if term_text.startswith('[Term]')]
return file_terms |
def test_get_id_maps(self):
"Test function get_id_maps. Note that id_to_node here doesn't have parent_child\n relation, so only map keys are tested."
(id_to_class_name, id_to_node) = parse_ebi.get_id_maps(CONST_FILE_TERMS)
self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME)
self.assertEqual(id_to_node.keys(), CONST_ID_TO_NODE_NO_RELATION.keys()) | 7,544,115,261,596,032,000 | Test function get_id_maps. Note that id_to_node here doesn't have parent_child
relation, so only map keys are tested. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_get_id_maps | pradh/data | python | def test_get_id_maps(self):
"Test function get_id_maps. Note that id_to_node here doesn't have parent_child\n relation, so only map keys are tested."
(id_to_class_name, id_to_node) = parse_ebi.get_id_maps(CONST_FILE_TERMS)
self.assertEqual(id_to_class_name, CONST_ID_TO_CLASS_NAME)
self.assertEqual(id_to_node.keys(), CONST_ID_TO_NODE_NO_RELATION.keys()) |
def test_build_child_parent_link(self):
'Test function build_child_parent_link by checking the values of\n child_list and parent_list.'
id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION)
id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node)
def get_node_value_set(node_list):
value_set = set()
for node in node_list:
value_set.add(node.value)
return value_set
for id_key in id_to_node:
parent_value_set = get_node_value_set(id_to_node[id_key].parent_list)
const_parent_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].parent_list)
child_value_set = get_node_value_set(id_to_node[id_key].child_list)
const_child_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].child_list)
self.assertEqual(parent_value_set, const_parent_value_set)
self.assertEqual(child_value_set, const_child_value_set) | -4,787,189,117,368,866,000 | Test function build_child_parent_link by checking the values of
child_list and parent_list. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_build_child_parent_link | pradh/data | python | def test_build_child_parent_link(self):
'Test function build_child_parent_link by checking the values of\n child_list and parent_list.'
id_to_node = copy.deepcopy(CONST_ID_TO_NODE_NO_RELATION)
id_to_node = parse_ebi.build_child_parent_link(CONST_FILE_TERMS, id_to_node)
def get_node_value_set(node_list):
value_set = set()
for node in node_list:
value_set.add(node.value)
return value_set
for id_key in id_to_node:
parent_value_set = get_node_value_set(id_to_node[id_key].parent_list)
const_parent_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].parent_list)
child_value_set = get_node_value_set(id_to_node[id_key].child_list)
const_child_value_set = get_node_value_set(CONST_ID_TO_NODE[id_key].child_list)
self.assertEqual(parent_value_set, const_parent_value_set)
self.assertEqual(child_value_set, const_child_value_set) |
def test_TreeBuilder(self):
'Test TreeBuilder class.'
dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE)
INTERACTION_TYPE_ROOT = 'MI:0001'
interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT)
self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET) | -7,194,737,580,420,136,000 | Test TreeBuilder class. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_TreeBuilder | pradh/data | python | def test_TreeBuilder(self):
dfs_caller = parse_ebi.TreeBuilder(CONST_ID_TO_NODE)
INTERACTION_TYPE_ROOT = 'MI:0001'
interaction_type_id_set = dfs_caller.get_subset_id(INTERACTION_TYPE_ROOT)
self.assertEqual(interaction_type_id_set, CONST_INTERACTION_TYPE_ID_SET) |
def test_get_schema_from_text(self):
'Test function get_schema_from_text by comparing the final schema.'
new_source_map = {'references': {}}
term = CONST_FILE_TERMS[1]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA1)
term = CONST_FILE_TERMS[2]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA2) | -3,037,779,471,115,059,700 | Test function get_schema_from_text by comparing the final schema. | scripts/proteinInteractionEBI/parse_ebi_test.py | test_get_schema_from_text | pradh/data | python | def test_get_schema_from_text(self):
new_source_map = {'references': {}}
term = CONST_FILE_TERMS[1]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA1)
term = CONST_FILE_TERMS[2]
schema_res = parse_ebi.get_schema_from_text(term, CONST_ID_TO_NODE, new_source_map, CONST_ID_TO_CLASS_NAME, CONST_INTERACTION_TYPE_ID_SET, set(), set())
self.assertEqual(schema_res[0], CONST_SCHEMA2) |
def isValidBST(self, root):
'\n :type root: TreeNode\n :rtype: bool\n '
MAX = sys.maxint
MIN = ((- sys.maxint) - 1)
return self.isValidBSTHelper(root, MIN, MAX) | -8,699,976,859,105,511,000 | :type root: TreeNode
:rtype: bool | leetcode.com/python/98_Validate_Binary_Search_Tree.py | isValidBST | Ajaykumar98/Algorithms | python | def isValidBST(self, root):
'\n :type root: TreeNode\n :rtype: bool\n '
MAX = sys.maxint
MIN = ((- sys.maxint) - 1)
return self.isValidBSTHelper(root, MIN, MAX) |
def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
'\n Compute the linear model response to an input array sampled at given time\n instances.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n u : array_like\n The real-valued input sequence to force the model. 1D arrays for single\n input models and 2D arrays that has as many columns as the number of\n inputs are valid inputs.\n t : array_like, optional\n The real-valued sequence to be used for the evolution of the system.\n The values should be equally spaced otherwise an error is raised. For\n discrete time models increments different than the sampling period also\n raises an error. On the other hand for discrete models this can be\n omitted and a time sequence will be generated automatically.\n x0 : array_like, optional\n The initial condition array. If omitted an array of zeros is assumed.\n Note that Transfer models by definition assume zero initial conditions\n and will raise an error.\n per_channel : bool, optional\n If this is set to True and if the system has multiple inputs, the\n response of each input is returned individually. For example, if a\n system has 4 inputs and 3 outputs then the response shape becomes\n (num, p, m) instead of (num, p) where k-th slice (:, :, k) is the\n response from the k-th input channel. For single input systems, this\n keyword has no effect.\n\n Returns\n -------\n yout : ndarray\n The resulting response array. The array is 1D if sys is SISO and\n has p columns if sys has p outputs.\n tout : ndarray\n The time sequence used in the simulation. If the parameter t is not\n None then a copy of t is given.\n\n Notes\n -----\n For Transfer models, first conversion to a state model is performed and\n then the resulting model is used for computations.\n\n '
_check_for_state_or_transfer(sys)
if (x0 is not None):
if sys._isgain:
raise ValueError("Static system models can't have initial conditions set.")
if isinstance(sys, Transfer):
raise ValueError("Transfer models can't have initial conditions set.")
x0 = np.asarray(x0, dtype=float).squeeze()
if (x0.ndim > 1):
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if (sys.NumberOfStates != x0.size):
raise ValueError('The initial condition size does not match the number of states of the model.')
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
(n, m) = (sys.NumberOfStates, sys.shape[1])
is_discrete = (sys.SamplingSet == 'Z')
u = np.asarray(u, dtype=float).squeeze()
if (u.ndim == 1):
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
if sys._isgain:
if sys._isSISO:
yout = (u * sys.d.squeeze())
else:
if (m == 1):
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = (u @ sys.d.T)
else:
if (not is_discrete):
sys = discretize(sys, (t[1] - t[0]), method='zoh')
sample_num = len(u)
(a, b, c, d) = sys.matrices
M_u = np.block([b.T, d.T])
at = a.T
if (m == 1):
per_channel = False
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = (0.0 if (x0 is None) else x0.T)
Bu = (u[:, [col]] @ b.T[[col], :])
for row in range(1, sample_num):
xout[row, :, col] = ((xout[(row - 1), :, col] @ at) + Bu[(row - 1)])
yout = (np.einsum('ijk,jl->ilk', xout, c.T) + np.einsum('ij,jk->ikj', u, d.T))
else:
BDu = (u @ M_u)
xout = np.empty([sample_num, n], dtype=float)
xout[0] = (0.0 if (x0 is None) else x0.T)
for row in range(1, sample_num):
xout[row] = ((xout[(row - 1)] @ at) + BDu[(row - 1), :n])
yout = ((xout @ c.T) + BDu[:, n:])
return (yout, t) | 1,990,838,259,651,487,500 | Compute the linear model response to an input array sampled at given time
instances.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
u : array_like
The real-valued input sequence to force the model. 1D arrays for single
input models and 2D arrays that has as many columns as the number of
inputs are valid inputs.
t : array_like, optional
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
x0 : array_like, optional
The initial condition array. If omitted an array of zeros is assumed.
Note that Transfer models by definition assume zero initial conditions
and will raise an error.
per_channel : bool, optional
If this is set to True and if the system has multiple inputs, the
response of each input is returned individually. For example, if a
system has 4 inputs and 3 outputs then the response shape becomes
(num, p, m) instead of (num, p) where k-th slice (:, :, k) is the
response from the k-th input channel. For single input systems, this
keyword has no effect.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs.
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given.
Notes
-----
For Transfer models, first conversion to a state model is performed and
then the resulting model is used for computations. | harold/_time_domain.py | simulate_linear_system | TavaresFilipe/harold | python | def simulate_linear_system(sys, u, t=None, x0=None, per_channel=False):
'\n Compute the linear model response to an input array sampled at given time\n instances.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n u : array_like\n The real-valued input sequence to force the model. 1D arrays for single\n input models and 2D arrays that has as many columns as the number of\n inputs are valid inputs.\n t : array_like, optional\n The real-valued sequence to be used for the evolution of the system.\n The values should be equally spaced otherwise an error is raised. For\n discrete time models increments different than the sampling period also\n raises an error. On the other hand for discrete models this can be\n omitted and a time sequence will be generated automatically.\n x0 : array_like, optional\n The initial condition array. If omitted an array of zeros is assumed.\n Note that Transfer models by definition assume zero initial conditions\n and will raise an error.\n per_channel : bool, optional\n If this is set to True and if the system has multiple inputs, the\n response of each input is returned individually. For example, if a\n system has 4 inputs and 3 outputs then the response shape becomes\n (num, p, m) instead of (num, p) where k-th slice (:, :, k) is the\n response from the k-th input channel. For single input systems, this\n keyword has no effect.\n\n Returns\n -------\n yout : ndarray\n The resulting response array. The array is 1D if sys is SISO and\n has p columns if sys has p outputs.\n tout : ndarray\n The time sequence used in the simulation. If the parameter t is not\n None then a copy of t is given.\n\n Notes\n -----\n For Transfer models, first conversion to a state model is performed and\n then the resulting model is used for computations.\n\n '
_check_for_state_or_transfer(sys)
if (x0 is not None):
if sys._isgain:
raise ValueError("Static system models can't have initial conditions set.")
if isinstance(sys, Transfer):
raise ValueError("Transfer models can't have initial conditions set.")
x0 = np.asarray(x0, dtype=float).squeeze()
if (x0.ndim > 1):
raise ValueError('Initial condition can only be a 1D array.')
else:
x0 = x0[:, None]
if (sys.NumberOfStates != x0.size):
raise ValueError('The initial condition size does not match the number of states of the model.')
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
(n, m) = (sys.NumberOfStates, sys.shape[1])
is_discrete = (sys.SamplingSet == 'Z')
u = np.asarray(u, dtype=float).squeeze()
if (u.ndim == 1):
u = u[:, None]
t = _check_u_and_t_for_simulation(m, sys._dt, u, t, is_discrete)
if sys._isgain:
if sys._isSISO:
yout = (u * sys.d.squeeze())
else:
if (m == 1):
per_channel = False
if per_channel:
yout = np.einsum('ij,jk->ikj', u, sys.d.T)
else:
yout = (u @ sys.d.T)
else:
if (not is_discrete):
sys = discretize(sys, (t[1] - t[0]), method='zoh')
sample_num = len(u)
(a, b, c, d) = sys.matrices
M_u = np.block([b.T, d.T])
at = a.T
if (m == 1):
per_channel = False
if per_channel:
xout = np.empty([sample_num, n, m], dtype=float)
for col in range(m):
xout[0, :, col] = (0.0 if (x0 is None) else x0.T)
Bu = (u[:, [col]] @ b.T[[col], :])
for row in range(1, sample_num):
xout[row, :, col] = ((xout[(row - 1), :, col] @ at) + Bu[(row - 1)])
yout = (np.einsum('ijk,jl->ilk', xout, c.T) + np.einsum('ij,jk->ikj', u, d.T))
else:
BDu = (u @ M_u)
xout = np.empty([sample_num, n], dtype=float)
xout[0] = (0.0 if (x0 is None) else x0.T)
for row in range(1, sample_num):
xout[row] = ((xout[(row - 1)] @ at) + BDu[(row - 1), :n])
yout = ((xout @ c.T) + BDu[:, n:])
return (yout, t) |
def simulate_step_response(sys, t=None):
'\n Compute the linear model response to an Heaviside function (or all-ones\n array) sampled at given time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the poles of the model.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n t : array_like\n The real-valued sequence to be used for the evolution of the system.\n The values should be equally spaced otherwise an error is raised. For\n discrete time models increments different than the sampling period also\n raises an error. On the other hand for discrete models this can be\n omitted and a time sequence will be generated automatically.\n\n Returns\n -------\n yout : ndarray\n The resulting response array. The array is 1D if sys is SISO and\n has p columns if sys has p outputs. If there are also m inputs the\n array is 3D array with the shape (<num of samples>, p, m)\n tout : ndarray\n The time sequence used in the simulation. If the parameter t is not\n None then a copy of t is given.\n\n '
_check_for_state_or_transfer(sys)
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if (t is None):
(tf, ts) = _compute_tfinal_and_dt(sys)
t = np.arange(0, (tf + ts), ts, dtype=float)
else:
(t, ts) = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1) | -5,924,860,459,075,212,000 | Compute the linear model response to an Heaviside function (or all-ones
array) sampled at given time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given. | harold/_time_domain.py | simulate_step_response | TavaresFilipe/harold | python | def simulate_step_response(sys, t=None):
'\n Compute the linear model response to an Heaviside function (or all-ones\n array) sampled at given time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the poles of the model.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n t : array_like\n The real-valued sequence to be used for the evolution of the system.\n The values should be equally spaced otherwise an error is raised. For\n discrete time models increments different than the sampling period also\n raises an error. On the other hand for discrete models this can be\n omitted and a time sequence will be generated automatically.\n\n Returns\n -------\n yout : ndarray\n The resulting response array. The array is 1D if sys is SISO and\n has p columns if sys has p outputs. If there are also m inputs the\n array is 3D array with the shape (<num of samples>, p, m)\n tout : ndarray\n The time sequence used in the simulation. If the parameter t is not\n None then a copy of t is given.\n\n '
_check_for_state_or_transfer(sys)
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if (t is None):
(tf, ts) = _compute_tfinal_and_dt(sys)
t = np.arange(0, (tf + ts), ts, dtype=float)
else:
(t, ts) = _check_custom_time_input(t)
m = sys.shape[1]
u = np.ones([len(t), m], dtype=float)
return simulate_linear_system(sys, u=u, t=t, per_channel=1) |
def simulate_impulse_response(sys, t=None):
'\n Compute the linear model response to an Dirac delta pulse (or all-zeros\n array except the first sample being 1/dt at each channel) sampled at given\n time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the poles of the model.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n t : array_like\n The real-valued sequence to be used for the evolution of the system.\n The values should be equally spaced otherwise an error is raised. For\n discrete time models increments different than the sampling period also\n raises an error. On the other hand for discrete models this can be\n omitted and a time sequence will be generated automatically.\n\n Returns\n -------\n yout : ndarray\n The resulting response array. The array is 1D if sys is SISO and\n has p columns if sys has p outputs. If there are also m inputs the\n array is 3D array with the shape (<num of samples>, p, m)\n tout : ndarray\n The time sequence used in the simulation. If the parameter t is not\n None then a copy of t is given.\n\n '
_check_for_state_or_transfer(sys)
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if (t is None):
(tf, ts) = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, (tf + ts), ts, dtype=float)
else:
(t, ts) = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = (1.0 / ts)
return simulate_linear_system(sys, u=u, t=t, per_channel=1) | 4,456,269,683,030,728,000 | Compute the linear model response to an Dirac delta pulse (or all-zeros
array except the first sample being 1/dt at each channel) sampled at given
time instances.
If the time array is omitted then a time sequence is generated based on
the poles of the model.
Parameters
----------
sys : {State, Transfer}
The system model to be simulated
t : array_like
The real-valued sequence to be used for the evolution of the system.
The values should be equally spaced otherwise an error is raised. For
discrete time models increments different than the sampling period also
raises an error. On the other hand for discrete models this can be
omitted and a time sequence will be generated automatically.
Returns
-------
yout : ndarray
The resulting response array. The array is 1D if sys is SISO and
has p columns if sys has p outputs. If there are also m inputs the
array is 3D array with the shape (<num of samples>, p, m)
tout : ndarray
The time sequence used in the simulation. If the parameter t is not
None then a copy of t is given. | harold/_time_domain.py | simulate_impulse_response | TavaresFilipe/harold | python | def simulate_impulse_response(sys, t=None):
'\n Compute the linear model response to an Dirac delta pulse (or all-zeros\n array except the first sample being 1/dt at each channel) sampled at given\n time instances.\n\n If the time array is omitted then a time sequence is generated based on\n the poles of the model.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system model to be simulated\n t : array_like\n The real-valued sequence to be used for the evolution of the system.\n The values should be equally spaced otherwise an error is raised. For\n discrete time models increments different than the sampling period also\n raises an error. On the other hand for discrete models this can be\n omitted and a time sequence will be generated automatically.\n\n Returns\n -------\n yout : ndarray\n The resulting response array. The array is 1D if sys is SISO and\n has p columns if sys has p outputs. If there are also m inputs the\n array is 3D array with the shape (<num of samples>, p, m)\n tout : ndarray\n The time sequence used in the simulation. If the parameter t is not\n None then a copy of t is given.\n\n '
_check_for_state_or_transfer(sys)
try:
_check_for_state(sys)
except ValueError:
sys = transfer_to_state(sys)
if (t is None):
(tf, ts) = _compute_tfinal_and_dt(sys, is_step=False)
t = np.arange(0, (tf + ts), ts, dtype=float)
else:
(t, ts) = _check_custom_time_input(t)
m = sys.shape[1]
u = np.zeros([len(t), m], dtype=float)
u[0] = (1.0 / ts)
return simulate_linear_system(sys, u=u, t=t, per_channel=1) |
def _compute_tfinal_and_dt(sys, is_step=True):
'\n Helper function to estimate a final time and a sampling period for\n time domain simulations. It is essentially geared towards impulse response\n but is also used for step responses.\n\n For discrete-time models, obviously dt is inherent and only tfinal is\n computed.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system to be investigated\n is_step : bool\n Scales the dc value by the magnitude of the nonzero mode since\n integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.\n Default is True.\n\n Returns\n -------\n tfinal : float\n The final time instance for which the simulation will be performed.\n dt : float\n The estimated sampling period for the simulation.\n\n Notes\n -----\n Just by evaluating the fastest mode for dt and slowest for tfinal often\n leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))\n since dt will be very small and tfinal will be too large though the fast\n mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]\n and the simulation would be unnecessarily long and the plot is virtually\n an L shape since the decay is so fast.\n\n Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR\n can be used such that only the modes that have significant effect on the\n time response are taken. But the sensitivity of the eigenvalues complicate\n the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work\n with simple poles with this formulation. See Golub, Van Loan Section 7.2.2\n for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of\n the response is dependent on the size of the eigenshapes rather than the\n eigenvalues themselves.\n\n '
sqrt_eps = np.sqrt(np.spacing(1.0))
min_points = 100
min_points_z = 20
max_points = 10000
max_points_z = 75000
default_tfinal = 5
total_cycles = 5
pts_per_cycle = 25
log_decay_percent = np.log(100)
if sys._isgain:
if sys._isdiscrete:
return ((sys._dt * min_points_z), sys._dt)
else:
return (default_tfinal, (default_tfinal / min_points))
if sys._isdiscrete:
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
m_u = (np.abs(p) >= (1 + sqrt_eps))
(p_u, p) = (p[m_u], p[(~ m_u)])
if (p_u.size > 0):
m_u = ((p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps))
t_emp = np.max((log_decay_percent / np.abs((np.log(p_u[(~ m_u)]) / dt))))
tfinal = max(tfinal, t_emp)
m_z = (np.abs(p) < sqrt_eps)
p = p[(~ m_z)]
m_nr = ((p.real < 0) & (np.abs(p.imag) < sqrt_eps))
(p_nr, p) = (p[m_nr], p[(~ m_nr)])
if (p_nr.size > 0):
t_emp = np.max((log_decay_percent / np.abs((np.log(p_nr) / dt).real)))
tfinal = max(tfinal, t_emp)
m_int = (((p.real - 1) < sqrt_eps) & (np.abs(p.imag) < sqrt_eps))
(p_int, p) = (p[m_int], p[(~ m_int)])
m_w = (np.abs((np.abs(p) - 1)) < sqrt_eps)
(p_w, p) = (p[m_w], p[(~ m_w)])
if (p_w.size > 0):
t_emp = (((total_cycles * 2) * np.pi) / np.abs((np.log(p_w) / dt)).min())
tfinal = max(tfinal, t_emp)
if (p.size > 0):
t_emp = (log_decay_percent / np.abs((np.log(p) / dt).real).min())
tfinal = max(tfinal, t_emp)
if (p_int.size > 0):
tfinal = (tfinal * 5)
num_samples = (tfinal // dt)
if (num_samples > max_points_z):
tfinal = (dt * max_points_z)
else:
tfinal = (dt * num_samples)
return (tfinal, dt)
(b, (sca, perm)) = matrix_balance(sys.a, separate=True)
(p, l, r) = eig(b, left=True, right=True)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1000000000000.0, eig_sens)
p[(np.abs(p) < np.spacing((eig_sens * norm(b, 1))))] = 0.0
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
(w, v) = ((sys.c @ r), (l.T.conj() @ sys.b))
origin = False
wn = np.abs(p)
if np.any((wn == 0.0)):
origin = True
dc = zeros_like(p, dtype=float)
ok = (np.abs(eig_sens) <= (1 / sqrt_eps))
dc[ok] = ((norm(v[ok, :], axis=1) * norm(w[:, ok], axis=0)) * eig_sens[ok])
dc[(wn != 0.0)] /= (wn[(wn != 0)] if is_step else 1.0)
dc[(wn == 0.0)] = 0.0
dc[(p.imag != 0.0)] *= 2
relevance = ((dc > (0.1 * dc.max())) | (~ ok))
psub = p[relevance]
wnsub = wn[relevance]
(tfinal, dt) = ([], [])
ints = (wnsub == 0.0)
iw = ((psub.imag != 0.0) & (np.abs(psub.real) <= sqrt_eps))
if np.any(iw):
tfinal += (((total_cycles * 2) * np.pi) / wnsub[iw]).tolist()
dt += (((2 * np.pi) / pts_per_cycle) / wnsub[iw]).tolist()
texp_mode = (log_decay_percent / np.abs(psub[((~ iw) & (~ ints))].real))
tfinal += texp_mode.tolist()
dt += minimum((texp_mode / 50), (((2 * np.pi) / pts_per_cycle) / wnsub[((~ iw) & (~ ints))])).tolist()
if (len(tfinal) == 0):
return ((default_tfinal * 5), ((default_tfinal * 5) / min_points))
tfinal = (np.max(tfinal) * (5 if origin else 1))
dt = np.min(dt)
dt = ((tfinal / max_points) if ((tfinal // dt) > max_points) else dt)
tfinal = ((dt * min_points) if ((tfinal // dt) < min_points) else tfinal)
return (tfinal, dt) | 3,334,180,236,767,385,600 | Helper function to estimate a final time and a sampling period for
time domain simulations. It is essentially geared towards impulse response
but is also used for step responses.
For discrete-time models, obviously dt is inherent and only tfinal is
computed.
Parameters
----------
sys : {State, Transfer}
The system to be investigated
is_step : bool
Scales the dc value by the magnitude of the nonzero mode since
integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.
Default is True.
Returns
-------
tfinal : float
The final time instance for which the simulation will be performed.
dt : float
The estimated sampling period for the simulation.
Notes
-----
Just by evaluating the fastest mode for dt and slowest for tfinal often
leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))
since dt will be very small and tfinal will be too large though the fast
mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]
and the simulation would be unnecessarily long and the plot is virtually
an L shape since the decay is so fast.
Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR
can be used such that only the modes that have significant effect on the
time response are taken. But the sensitivity of the eigenvalues complicate
the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work
with simple poles with this formulation. See Golub, Van Loan Section 7.2.2
for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of
the response is dependent on the size of the eigenshapes rather than the
eigenvalues themselves. | harold/_time_domain.py | _compute_tfinal_and_dt | TavaresFilipe/harold | python | def _compute_tfinal_and_dt(sys, is_step=True):
'\n Helper function to estimate a final time and a sampling period for\n time domain simulations. It is essentially geared towards impulse response\n but is also used for step responses.\n\n For discrete-time models, obviously dt is inherent and only tfinal is\n computed.\n\n Parameters\n ----------\n sys : {State, Transfer}\n The system to be investigated\n is_step : bool\n Scales the dc value by the magnitude of the nonzero mode since\n integrating the impulse response gives ∫exp(-λt) = -exp(-λt)/λ.\n Default is True.\n\n Returns\n -------\n tfinal : float\n The final time instance for which the simulation will be performed.\n dt : float\n The estimated sampling period for the simulation.\n\n Notes\n -----\n Just by evaluating the fastest mode for dt and slowest for tfinal often\n leads to unnecessary, bloated sampling (e.g., Transfer(1,[1,1001,1000]))\n since dt will be very small and tfinal will be too large though the fast\n mode hardly ever contributes. Similarly, change the numerator to [1, 2, 0]\n and the simulation would be unnecessarily long and the plot is virtually\n an L shape since the decay is so fast.\n\n Instead, a modal decomposition in time domain hence a truncated ZIR and ZSR\n can be used such that only the modes that have significant effect on the\n time response are taken. But the sensitivity of the eigenvalues complicate\n the matter since dλ = <w, dA*v> with <w,v> = 1. Hence we can only work\n with simple poles with this formulation. See Golub, Van Loan Section 7.2.2\n for simple eigenvalue sensitivity about the nonunity of <w,v>. The size of\n the response is dependent on the size of the eigenshapes rather than the\n eigenvalues themselves.\n\n '
sqrt_eps = np.sqrt(np.spacing(1.0))
min_points = 100
min_points_z = 20
max_points = 10000
max_points_z = 75000
default_tfinal = 5
total_cycles = 5
pts_per_cycle = 25
log_decay_percent = np.log(100)
if sys._isgain:
if sys._isdiscrete:
return ((sys._dt * min_points_z), sys._dt)
else:
return (default_tfinal, (default_tfinal / min_points))
if sys._isdiscrete:
dt = sys._dt
tfinal = default_tfinal
p = eigvals(sys.a)
m_u = (np.abs(p) >= (1 + sqrt_eps))
(p_u, p) = (p[m_u], p[(~ m_u)])
if (p_u.size > 0):
m_u = ((p_u.real < 0) & (np.abs(p_u.imag) < sqrt_eps))
t_emp = np.max((log_decay_percent / np.abs((np.log(p_u[(~ m_u)]) / dt))))
tfinal = max(tfinal, t_emp)
m_z = (np.abs(p) < sqrt_eps)
p = p[(~ m_z)]
m_nr = ((p.real < 0) & (np.abs(p.imag) < sqrt_eps))
(p_nr, p) = (p[m_nr], p[(~ m_nr)])
if (p_nr.size > 0):
t_emp = np.max((log_decay_percent / np.abs((np.log(p_nr) / dt).real)))
tfinal = max(tfinal, t_emp)
m_int = (((p.real - 1) < sqrt_eps) & (np.abs(p.imag) < sqrt_eps))
(p_int, p) = (p[m_int], p[(~ m_int)])
m_w = (np.abs((np.abs(p) - 1)) < sqrt_eps)
(p_w, p) = (p[m_w], p[(~ m_w)])
if (p_w.size > 0):
t_emp = (((total_cycles * 2) * np.pi) / np.abs((np.log(p_w) / dt)).min())
tfinal = max(tfinal, t_emp)
if (p.size > 0):
t_emp = (log_decay_percent / np.abs((np.log(p) / dt).real).min())
tfinal = max(tfinal, t_emp)
if (p_int.size > 0):
tfinal = (tfinal * 5)
num_samples = (tfinal // dt)
if (num_samples > max_points_z):
tfinal = (dt * max_points_z)
else:
tfinal = (dt * num_samples)
return (tfinal, dt)
(b, (sca, perm)) = matrix_balance(sys.a, separate=True)
(p, l, r) = eig(b, left=True, right=True)
eig_sens = reciprocal(maximum(1e-12, einsum('ij,ij->j', l, r).real))
eig_sens = minimum(1000000000000.0, eig_sens)
p[(np.abs(p) < np.spacing((eig_sens * norm(b, 1))))] = 0.0
l[perm, :] *= reciprocal(sca)[:, None]
r[perm, :] *= sca[:, None]
(w, v) = ((sys.c @ r), (l.T.conj() @ sys.b))
origin = False
wn = np.abs(p)
if np.any((wn == 0.0)):
origin = True
dc = zeros_like(p, dtype=float)
ok = (np.abs(eig_sens) <= (1 / sqrt_eps))
dc[ok] = ((norm(v[ok, :], axis=1) * norm(w[:, ok], axis=0)) * eig_sens[ok])
dc[(wn != 0.0)] /= (wn[(wn != 0)] if is_step else 1.0)
dc[(wn == 0.0)] = 0.0
dc[(p.imag != 0.0)] *= 2
relevance = ((dc > (0.1 * dc.max())) | (~ ok))
psub = p[relevance]
wnsub = wn[relevance]
(tfinal, dt) = ([], [])
ints = (wnsub == 0.0)
iw = ((psub.imag != 0.0) & (np.abs(psub.real) <= sqrt_eps))
if np.any(iw):
tfinal += (((total_cycles * 2) * np.pi) / wnsub[iw]).tolist()
dt += (((2 * np.pi) / pts_per_cycle) / wnsub[iw]).tolist()
texp_mode = (log_decay_percent / np.abs(psub[((~ iw) & (~ ints))].real))
tfinal += texp_mode.tolist()
dt += minimum((texp_mode / 50), (((2 * np.pi) / pts_per_cycle) / wnsub[((~ iw) & (~ ints))])).tolist()
if (len(tfinal) == 0):
return ((default_tfinal * 5), ((default_tfinal * 5) / min_points))
tfinal = (np.max(tfinal) * (5 if origin else 1))
dt = np.min(dt)
dt = ((tfinal / max_points) if ((tfinal // dt) > max_points) else dt)
tfinal = ((dt * min_points) if ((tfinal // dt) < min_points) else tfinal)
return (tfinal, dt) |
def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
'\n Helper function to validate the input arguments for simulate_linear_system\n '
if (t is None):
if (not isdiscrete):
raise ValueError('Continuous time models need an evenly spaced time sequence from which the sampling period will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, ((u_samples - 1) * dt), num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if (t.ndim != 1):
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if ((not np.allclose(t_diff, t_diff[0])) or (not (t_diff[0] > 0.0))):
raise ValueError('Time array should be equally spaced and increasing.')
if (isdiscrete and (not np.isclose(dt, t_diff[0]))):
raise ValueError('Time array increment {} is not equal to the model sampling period {}.'.format(t_diff[0], dt))
if (u.size < 1):
raise ValueError('The input array should at least have one point.')
if (len(u) != len(t)):
raise ValueError('The input and time arrays should have the same length. t: {} vs. u: {}'.format(t.shape, u.shape))
if (u.shape[1] != m):
raise ValueError("Number of input columns ({}) don't match the number of inputs ({}) of the given model.".format(u.shape[1], m))
return t | 6,866,641,658,556,078,000 | Helper function to validate the input arguments for simulate_linear_system | harold/_time_domain.py | _check_u_and_t_for_simulation | TavaresFilipe/harold | python | def _check_u_and_t_for_simulation(m, dt, u, t, isdiscrete):
'\n \n '
if (t is None):
if (not isdiscrete):
raise ValueError('Continuous time models need an evenly spaced time sequence from which the sampling period will be obtained.')
else:
u_samples = len(u)
t = np.linspace(0, ((u_samples - 1) * dt), num=u_samples)
else:
t = np.asarray(t, dtype=float).squeeze()
if (t.ndim != 1):
raise ValueError('Time array needs to be a 1D array.')
t_diff = np.diff(t)
if ((not np.allclose(t_diff, t_diff[0])) or (not (t_diff[0] > 0.0))):
raise ValueError('Time array should be equally spaced and increasing.')
if (isdiscrete and (not np.isclose(dt, t_diff[0]))):
raise ValueError('Time array increment {} is not equal to the model sampling period {}.'.format(t_diff[0], dt))
if (u.size < 1):
raise ValueError('The input array should at least have one point.')
if (len(u) != len(t)):
raise ValueError('The input and time arrays should have the same length. t: {} vs. u: {}'.format(t.shape, u.shape))
if (u.shape[1] != m):
raise ValueError("Number of input columns ({}) don't match the number of inputs ({}) of the given model.".format(u.shape[1], m))
return t |
def _check_custom_time_input(t):
'\n Helper function for simple and rather expensive checks for sanity\n '
t = atleast_1d(t)
if (t.ndim > 1):
t = squeeze(t)
if (t.ndim > 1):
raise ValueError('Time array should be a 1D array but has {} nontrivial dimensions'.format(t.ndim))
if (t.size < 2):
raise ValueError('Time array should have at least two data points.')
dt = (t[1] - t[0])
if (dt <= 0.0):
raise ValueError('The time increment dt cannot be negative; Difference of the first two samples t1 - t0 = {}'.format(dt))
if (not np.allclose((t[1:] - t[:(- 1)]), dt)):
raise ValueError('Supplied time array is not numerically equally spaced (checked via numpy.allclose).')
return (t, dt) | -6,231,156,308,016,383,000 | Helper function for simple and rather expensive checks for sanity | harold/_time_domain.py | _check_custom_time_input | TavaresFilipe/harold | python | def _check_custom_time_input(t):
'\n \n '
t = atleast_1d(t)
if (t.ndim > 1):
t = squeeze(t)
if (t.ndim > 1):
raise ValueError('Time array should be a 1D array but has {} nontrivial dimensions'.format(t.ndim))
if (t.size < 2):
raise ValueError('Time array should have at least two data points.')
dt = (t[1] - t[0])
if (dt <= 0.0):
raise ValueError('The time increment dt cannot be negative; Difference of the first two samples t1 - t0 = {}'.format(dt))
if (not np.allclose((t[1:] - t[:(- 1)]), dt)):
raise ValueError('Supplied time array is not numerically equally spaced (checked via numpy.allclose).')
return (t, dt) |
def unet(inputI, output_channel):
'3D U-net'
phase_flag = 1
concat_dim = 4
conv1_1 = conv3d(input=inputI, output_chn=64, kernel_size=3, stride=1, use_bias=False, name='conv1')
conv1_bn = tf.contrib.layers.batch_norm(conv1_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv1_batch_norm')
conv1_relu = tf.nn.relu(conv1_bn, name='conv1_relu')
pool1_in = tf.layers.max_pooling3d(inputs=conv1_relu, pool_size=2, strides=2, name='pool1')
pool1 = pool1_in
conv2_1 = conv3d(input=pool1, output_chn=128, kernel_size=3, stride=1, use_bias=False, name='conv2')
conv2_bn = tf.contrib.layers.batch_norm(conv2_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv2_batch_norm')
conv2_relu = tf.nn.relu(conv2_bn, name='conv2_relu')
pool2_in = tf.layers.max_pooling3d(inputs=conv2_relu, pool_size=2, strides=2, name='pool2')
pool2 = pool2_in
conv3_1 = conv3d(input=pool2, output_chn=256, kernel_size=3, stride=1, use_bias=False, name='conv3a')
conv3_1_bn = tf.contrib.layers.batch_norm(conv3_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv3_1_batch_norm')
conv3_1_relu = tf.nn.relu(conv3_1_bn, name='conv3_1_relu')
conv3_2 = conv3d(input=conv3_1_relu, output_chn=256, kernel_size=3, stride=1, use_bias=False, name='conv3b')
conv3_2 = (conv3_2 + conv3_1)
conv3_2_bn = tf.contrib.layers.batch_norm(conv3_2, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv3_2_batch_norm')
conv3_2_relu = tf.nn.relu(conv3_2_bn, name='conv3_2_relu')
pool3_in = tf.layers.max_pooling3d(inputs=conv3_2_relu, pool_size=2, strides=2, name='pool3')
pool3 = pool3_in
conv4_1 = conv3d(input=pool3, output_chn=512, kernel_size=3, stride=1, use_bias=False, name='conv4a')
conv4_1_bn = tf.contrib.layers.batch_norm(conv4_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv4_1_batch_norm')
conv4_1_relu = tf.nn.relu(conv4_1_bn, name='conv4_1_relu')
conv4_2 = conv3d(input=conv4_1_relu, output_chn=512, kernel_size=3, stride=1, use_bias=False, name='conv4b')
conv4_2 = (conv4_2 + conv4_1)
conv4_2_bn = tf.contrib.layers.batch_norm(conv4_2, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv4_2_batch_norm')
conv4_2_relu = tf.nn.relu(conv4_2_bn, name='conv4_2_relu')
pool4 = tf.layers.max_pooling3d(inputs=conv4_2_relu, pool_size=2, strides=2, name='pool4')
conv5_1 = conv_bn_relu(input=pool4, output_chn=512, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='conv5_1')
conv5_2 = conv_bn_relu(input=conv5_1, output_chn=512, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='conv5_2')
deconv1_1 = deconv_bn_relu(input=conv5_2, output_chn=512, is_training=phase_flag, name='deconv1_1')
concat_1 = tf.concat([deconv1_1, conv4_2], axis=concat_dim, name='concat_1')
deconv1_2_in = conv_bn_relu(input=concat_1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv1_2')
deconv1_2 = deconv1_2_in
deconv2_1 = deconv_bn_relu(input=deconv1_2, output_chn=256, is_training=phase_flag, name='deconv2_1')
concat_2 = tf.concat([deconv2_1, conv3_2], axis=concat_dim, name='concat_2')
deconv2_2_in = conv_bn_relu(input=concat_2, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv2_2')
deconv2_2 = deconv2_2_in
deconv3_1 = deconv_bn_relu(input=deconv2_2, output_chn=128, is_training=phase_flag, name='deconv3_1')
concat_3 = tf.concat([deconv3_1, conv2_1], axis=concat_dim, name='concat_3')
deconv3_2_in = conv_bn_relu(input=concat_3, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv3_2')
deconv3_2 = deconv3_2_in
deconv4_1 = deconv_bn_relu(input=deconv3_2, output_chn=64, is_training=phase_flag, name='deconv4_1')
concat_4 = tf.concat([deconv4_1, conv1_1], axis=concat_dim, name='concat_4')
deconv4_2 = conv_bn_relu(input=concat_4, output_chn=32, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv4_2')
pre_pro = conv3d(input=deconv4_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='pre_pro')
pred_prob = pre_pro
aux0_conv = conv3d(input=deconv1_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux0_conv')
aux0_deconv_1 = Deconv3d(input=aux0_conv, output_chn=output_channel, name='aux0_deconv_1')
aux0_deconv_2 = Deconv3d(input=aux0_deconv_1, output_chn=output_channel, name='aux0_deconv_2')
aux0_prob = Deconv3d(input=aux0_deconv_2, output_chn=output_channel, name='aux0_prob')
aux1_conv = conv3d(input=deconv2_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux1_conv')
aux1_deconv_1 = Deconv3d(input=aux1_conv, output_chn=output_channel, name='aux1_deconv_1')
aux1_prob = Deconv3d(input=aux1_deconv_1, output_chn=output_channel, name='aux1_prob')
aux2_conv = conv3d(input=deconv3_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux2_conv')
aux2_prob = Deconv3d(input=aux2_conv, output_chn=output_channel, name='aux2_prob')
soft_prob = tf.nn.softmax(pred_prob, name='pred_soft')
pred_label = tf.argmax(soft_prob, axis=4, name='argmax')
return (pred_prob, pred_label, aux0_prob, aux1_prob, aux2_prob) | 6,890,914,431,589,737,000 | 3D U-net | src/models.py | unet | JohnleeHIT/Brats2019 | python | def unet(inputI, output_channel):
phase_flag = 1
concat_dim = 4
conv1_1 = conv3d(input=inputI, output_chn=64, kernel_size=3, stride=1, use_bias=False, name='conv1')
conv1_bn = tf.contrib.layers.batch_norm(conv1_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv1_batch_norm')
conv1_relu = tf.nn.relu(conv1_bn, name='conv1_relu')
pool1_in = tf.layers.max_pooling3d(inputs=conv1_relu, pool_size=2, strides=2, name='pool1')
pool1 = pool1_in
conv2_1 = conv3d(input=pool1, output_chn=128, kernel_size=3, stride=1, use_bias=False, name='conv2')
conv2_bn = tf.contrib.layers.batch_norm(conv2_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv2_batch_norm')
conv2_relu = tf.nn.relu(conv2_bn, name='conv2_relu')
pool2_in = tf.layers.max_pooling3d(inputs=conv2_relu, pool_size=2, strides=2, name='pool2')
pool2 = pool2_in
conv3_1 = conv3d(input=pool2, output_chn=256, kernel_size=3, stride=1, use_bias=False, name='conv3a')
conv3_1_bn = tf.contrib.layers.batch_norm(conv3_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv3_1_batch_norm')
conv3_1_relu = tf.nn.relu(conv3_1_bn, name='conv3_1_relu')
conv3_2 = conv3d(input=conv3_1_relu, output_chn=256, kernel_size=3, stride=1, use_bias=False, name='conv3b')
conv3_2 = (conv3_2 + conv3_1)
conv3_2_bn = tf.contrib.layers.batch_norm(conv3_2, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv3_2_batch_norm')
conv3_2_relu = tf.nn.relu(conv3_2_bn, name='conv3_2_relu')
pool3_in = tf.layers.max_pooling3d(inputs=conv3_2_relu, pool_size=2, strides=2, name='pool3')
pool3 = pool3_in
conv4_1 = conv3d(input=pool3, output_chn=512, kernel_size=3, stride=1, use_bias=False, name='conv4a')
conv4_1_bn = tf.contrib.layers.batch_norm(conv4_1, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv4_1_batch_norm')
conv4_1_relu = tf.nn.relu(conv4_1_bn, name='conv4_1_relu')
conv4_2 = conv3d(input=conv4_1_relu, output_chn=512, kernel_size=3, stride=1, use_bias=False, name='conv4b')
conv4_2 = (conv4_2 + conv4_1)
conv4_2_bn = tf.contrib.layers.batch_norm(conv4_2, decay=0.9, updates_collections=None, epsilon=1e-05, scale=True, is_training=phase_flag, scope='conv4_2_batch_norm')
conv4_2_relu = tf.nn.relu(conv4_2_bn, name='conv4_2_relu')
pool4 = tf.layers.max_pooling3d(inputs=conv4_2_relu, pool_size=2, strides=2, name='pool4')
conv5_1 = conv_bn_relu(input=pool4, output_chn=512, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='conv5_1')
conv5_2 = conv_bn_relu(input=conv5_1, output_chn=512, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='conv5_2')
deconv1_1 = deconv_bn_relu(input=conv5_2, output_chn=512, is_training=phase_flag, name='deconv1_1')
concat_1 = tf.concat([deconv1_1, conv4_2], axis=concat_dim, name='concat_1')
deconv1_2_in = conv_bn_relu(input=concat_1, output_chn=256, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv1_2')
deconv1_2 = deconv1_2_in
deconv2_1 = deconv_bn_relu(input=deconv1_2, output_chn=256, is_training=phase_flag, name='deconv2_1')
concat_2 = tf.concat([deconv2_1, conv3_2], axis=concat_dim, name='concat_2')
deconv2_2_in = conv_bn_relu(input=concat_2, output_chn=128, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv2_2')
deconv2_2 = deconv2_2_in
deconv3_1 = deconv_bn_relu(input=deconv2_2, output_chn=128, is_training=phase_flag, name='deconv3_1')
concat_3 = tf.concat([deconv3_1, conv2_1], axis=concat_dim, name='concat_3')
deconv3_2_in = conv_bn_relu(input=concat_3, output_chn=64, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv3_2')
deconv3_2 = deconv3_2_in
deconv4_1 = deconv_bn_relu(input=deconv3_2, output_chn=64, is_training=phase_flag, name='deconv4_1')
concat_4 = tf.concat([deconv4_1, conv1_1], axis=concat_dim, name='concat_4')
deconv4_2 = conv_bn_relu(input=concat_4, output_chn=32, kernel_size=3, stride=1, use_bias=False, is_training=phase_flag, name='deconv4_2')
pre_pro = conv3d(input=deconv4_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='pre_pro')
pred_prob = pre_pro
aux0_conv = conv3d(input=deconv1_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux0_conv')
aux0_deconv_1 = Deconv3d(input=aux0_conv, output_chn=output_channel, name='aux0_deconv_1')
aux0_deconv_2 = Deconv3d(input=aux0_deconv_1, output_chn=output_channel, name='aux0_deconv_2')
aux0_prob = Deconv3d(input=aux0_deconv_2, output_chn=output_channel, name='aux0_prob')
aux1_conv = conv3d(input=deconv2_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux1_conv')
aux1_deconv_1 = Deconv3d(input=aux1_conv, output_chn=output_channel, name='aux1_deconv_1')
aux1_prob = Deconv3d(input=aux1_deconv_1, output_chn=output_channel, name='aux1_prob')
aux2_conv = conv3d(input=deconv3_2, output_chn=output_channel, kernel_size=1, stride=1, use_bias=True, name='aux2_conv')
aux2_prob = Deconv3d(input=aux2_conv, output_chn=output_channel, name='aux2_prob')
soft_prob = tf.nn.softmax(pred_prob, name='pred_soft')
pred_label = tf.argmax(soft_prob, axis=4, name='argmax')
return (pred_prob, pred_label, aux0_prob, aux1_prob, aux2_prob) |
def extractReMonsterWiki(item):
"\n\tParser for 'Re:Monster Wiki'\n\t"
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False | 2,093,858,153,736,959,000 | Parser for 'Re:Monster Wiki' | WebMirror/management/rss_parser_funcs/feed_parse_extractReMonsterWiki.py | extractReMonsterWiki | fake-name/ReadableWebProxy | python | def extractReMonsterWiki(item):
"\n\t\n\t"
(vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title'])
if ((not (chp or vol)) or ('preview' in item['title'].lower())):
return None
if ('WATTT' in item['tags']):
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False |
def update_fields(module, p):
'This updates the module field names\n to match the field names tower-cli expects to make\n calling of the modify/delete methods easier.\n '
params = p.copy()
field_map = {'fact_caching_enabled': 'use_fact_cache', 'ask_diff_mode': 'ask_diff_mode_on_launch', 'ask_extra_vars': 'ask_variables_on_launch', 'ask_limit': 'ask_limit_on_launch', 'ask_tags': 'ask_tags_on_launch', 'ask_skip_tags': 'ask_skip_tags_on_launch', 'ask_verbosity': 'ask_verbosity_on_launch', 'ask_inventory': 'ask_inventory_on_launch', 'ask_credential': 'ask_credential_on_launch', 'ask_job_type': 'ask_job_type_on_launch', 'diff_mode_enabled': 'diff_mode', 'concurrent_jobs_enabled': 'allow_simultaneous', 'force_handlers_enabled': 'force_handlers'}
params_update = {}
for (old_k, new_k) in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars')
extra_vars_path = params.get('extra_vars_path')
if extra_vars:
params_update['extra_vars'] = [json.dumps(extra_vars)]
elif (extra_vars_path is not None):
params_update['extra_vars'] = [('@' + extra_vars_path)]
module.deprecate(msg='extra_vars_path should not be used anymore. Use \'extra_vars: "{{ lookup(\'file\', \'/path/to/file\') | from_yaml }}"\' instead', version='3.8')
params.update(params_update)
return params | -303,015,818,293,956,200 | This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier. | awx_collection/plugins/modules/tower_job_template.py | update_fields | activelan/awx | python | def update_fields(module, p):
'This updates the module field names\n to match the field names tower-cli expects to make\n calling of the modify/delete methods easier.\n '
params = p.copy()
field_map = {'fact_caching_enabled': 'use_fact_cache', 'ask_diff_mode': 'ask_diff_mode_on_launch', 'ask_extra_vars': 'ask_variables_on_launch', 'ask_limit': 'ask_limit_on_launch', 'ask_tags': 'ask_tags_on_launch', 'ask_skip_tags': 'ask_skip_tags_on_launch', 'ask_verbosity': 'ask_verbosity_on_launch', 'ask_inventory': 'ask_inventory_on_launch', 'ask_credential': 'ask_credential_on_launch', 'ask_job_type': 'ask_job_type_on_launch', 'diff_mode_enabled': 'diff_mode', 'concurrent_jobs_enabled': 'allow_simultaneous', 'force_handlers_enabled': 'force_handlers'}
params_update = {}
for (old_k, new_k) in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars')
extra_vars_path = params.get('extra_vars_path')
if extra_vars:
params_update['extra_vars'] = [json.dumps(extra_vars)]
elif (extra_vars_path is not None):
params_update['extra_vars'] = [('@' + extra_vars_path)]
module.deprecate(msg='extra_vars_path should not be used anymore. Use \'extra_vars: "{{ lookup(\'file\', \'/path/to/file\') | from_yaml }}"\' instead', version='3.8')
params.update(params_update)
return params |
@command
def echo(bot, mask, target, args):
'Echo command\n\n %%echo <words>...\n '
(yield ' '.join(args['<words>'])) | 1,500,019,934,740,953,900 | Echo command
%%echo <words>... | examples/mycommands.py | echo | gawel/irc3 | python | @command
def echo(bot, mask, target, args):
'Echo command\n\n %%echo <words>...\n '
(yield ' '.join(args['<words>'])) |
@command(permission='admin', public=False)
def adduser(bot, mask, target, args):
'Add a user\n\n %%adduser <name> <password>\n '
bot.privmsg(mask.nick, 'User added') | 3,192,555,016,350,542,000 | Add a user
%%adduser <name> <password> | examples/mycommands.py | adduser | gawel/irc3 | python | @command(permission='admin', public=False)
def adduser(bot, mask, target, args):
'Add a user\n\n %%adduser <name> <password>\n '
bot.privmsg(mask.nick, 'User added') |
@command(show_in_help_list=False)
def my_secret_operation(bot, mask, target, args):
"Do something you don't want in !help all the time\n\n %%my_secret_operation\n "
(yield 'I like turtles') | -5,074,065,158,678,942,000 | Do something you don't want in !help all the time
%%my_secret_operation | examples/mycommands.py | my_secret_operation | gawel/irc3 | python | @command(show_in_help_list=False)
def my_secret_operation(bot, mask, target, args):
"Do something you don't want in !help all the time\n\n %%my_secret_operation\n "
(yield 'I like turtles') |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.