hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a2344ed66e39868f0b7f906d38c04836219aa94 | 190 | py | Python | kipp/aio/exceptions.py | Laisky/kipp | 7b0d0a0121f06fb7f9c0fea386ad30e92c6fe2d2 | [
"MIT"
] | 8 | 2019-01-11T11:02:34.000Z | 2020-03-05T11:09:38.000Z | kipp/aio/exceptions.py | Laisky/kipp | 7b0d0a0121f06fb7f9c0fea386ad30e92c6fe2d2 | [
"MIT"
] | null | null | null | kipp/aio/exceptions.py | Laisky/kipp | 7b0d0a0121f06fb7f9c0fea386ad30e92c6fe2d2 | [
"MIT"
] | 2 | 2019-01-11T11:02:43.000Z | 2019-09-09T03:27:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tornado.gen import TimeoutError
from kipp.libs.aio import KippAIOException, KippAIOTimeoutError
| 21.111111 | 63 | 0.784211 |
4a234537b3f9a25d6723641e8a24c56e43675ec2 | 734 | py | Python | Ex60.py | Kevinwmiguel/PythonExercises | e976b274d8f17f427b2bcf0c2a614c0043478ea5 | [
"MIT"
] | null | null | null | Ex60.py | Kevinwmiguel/PythonExercises | e976b274d8f17f427b2bcf0c2a614c0043478ea5 | [
"MIT"
] | null | null | null | Ex60.py | Kevinwmiguel/PythonExercises | e976b274d8f17f427b2bcf0c2a614c0043478ea5 | [
"MIT"
] | null | null | null | """
Ex 60 - Create a program that reads several integers from the keyboard. the program will only stop when the user enters the value 999, which is the stop condition. at the end, show how many numbers were entered and what was the sum between them. (disregarding the flag)
"""
# ----- function -----
def header():
print('\n')
print('=' * 40)
print('The program will stop when you press 999')
print('=' * 40)
print('\n')
# ----- Var -----
c = 0
tt = 0
tn = 0
# ----- Header -----
header()
# ----- Loop -----
while c != 999:
c = int(input('Enter a number: '))
if c != 999:
tt += c
tn += 1
print(f'You entered {tn} numbers')
print(f'The total sum is: {tt}')
input('Enter to exit')
| 22.242424 | 269 | 0.579019 |
4a23454fad7ca5528382afa785286ee8f5c25761 | 1,067 | py | Python | featurelist.py | IdeaBot/basics | 50c89001aceebebd2a67ad59c7c75d9db0843d0e | [
"MIT"
] | null | null | null | featurelist.py | IdeaBot/basics | 50c89001aceebebd2a67ad59c7c75d9db0843d0e | [
"MIT"
] | null | null | null | featurelist.py | IdeaBot/basics | 50c89001aceebebd2a67ad59c7c75d9db0843d0e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 22 19:11:09 2018
@author: Yuhas
"""
from libs import command
import re
class Command(command.DirectOnlyCommand):
'''Gives information about my features
**Usage**
```@Idea featurelist [-v]```
If you're interested in helping to improve Idea, check out what happens when you include `-v`'''
MESSAGE = '''
Got a sweet new idea for Idea? Send it to the devs here:
<https://discord.gg/gwq2vS7> in #ideas
Or inject your idea into the pipe named "Idea Ideas" '''
MESSAGE_V = MESSAGE+'''
To learn how to add new features, go here:
<https://github.com/NGnius/IdeaBot/wiki>
Contact the devs with your question through the server invite'''
def matches(self, message):
return re.search(r'\bfeature\s?(list|request)', message.content, re.I)
def action(self, message):
if "-v" in message.content.lower():
yield from self.send_message(message.channel, self.MESSAGE_V)
else:
yield from self.send_message(message.channel, self.MESSAGE)
| 28.078947 | 96 | 0.665417 |
4a23469577c6add5436b474a646f77472822109a | 1,998 | py | Python | secondary.py | moonstruckblossm/dto | fc79ad847b922a4e9ac29c13aff1bd081fd1e55d | [
"MIT"
] | null | null | null | secondary.py | moonstruckblossm/dto | fc79ad847b922a4e9ac29c13aff1bd081fd1e55d | [
"MIT"
] | null | null | null | secondary.py | moonstruckblossm/dto | fc79ad847b922a4e9ac29c13aff1bd081fd1e55d | [
"MIT"
] | null | null | null | #author: <author here>
# date: <date here>
# --------------- Section 3 --------------- #
# String Duplication / Pattern Recognition #
# you may choose to use any symbol, it does not have to be the dollar sign
# create the following pattern using string duplication and print statments:
#
# $
# $$
# $$$
# $$$$
# $$$$$
print("$")
print("$"*2)
print("$"*3)
print("$"*4)
print("$"*5)
print()
# create the following pattern using string duplication and print statments:
#
# $$$$$
# $$$$
# $$$
# $$
# $
print("$"*5)
print("$"*4)
print("$"*3)
print("$"*2)
print("$")
print()
# create the following pattern using string duplication and print statments:
#
# $
# $$
# $$$
# $$$$
# $$$$$
#though they are not visible, the spaces before each dollar sign are characters
#for each line you print, there is a certain number of spaces and a certain number of dollar signs
#either way, you'll want to have FIVE characters on each line that you print, so how can we approach this?
#think about how many dollar signs are on each line. if you need five characters on each line, but there's less than five
#dollar signs on a line, what will you do?
#remember that both spaces (" ") and dollar signs ($) are characters!
#FIVE characters in total - the number of dollar signs you want = the amount of spaces to put before the dollar signs!
#for example, if you want THREE dollar signs, you'll want to put TWO spaces before them, since three plus two is five.
#your total number of characters five.
print((" "*4)+"$"*1)
print((" "*3)+"$"*2)
print((" "*2)+"$"*3)
print((" "*1)+"$"*4)
print((" "*0)+"$"*5)
print()
# create the following pattern using string duplication and print statments:
#
# $$$$$
# $$$$
# $$$
# $$
# $
#this is very similar to the previous problem! you'll just want to do the same thing in the opposite order to get a
#right-oriented upside down right triangle!
print((" "*0)+"$"*5)
print((" "*1)+"$"*4)
print((" "*2)+"$"*3)
print((" "*3)+"$"*2)
print((" "*4)+"$"*1) | 24.365854 | 121 | 0.633634 |
4a234731d73f6968c6a545284bca205fe4b0706b | 534 | py | Python | tests/onegov/ballot/collections/test_ballots.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | tests/onegov/ballot/collections/test_ballots.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | tests/onegov/ballot/collections/test_ballots.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from datetime import date
from onegov.ballot import Ballot
from onegov.ballot import BallotCollection
from onegov.ballot import Vote
def test_ballots(session):
vote = Vote(
title="A",
shortcode="Z",
domain='federation',
date=date(2015, 6, 14)
)
vote.ballots.append(Ballot(type='proposal'))
session.add(vote)
session.flush()
collection = BallotCollection(session)
assert collection.query().count() == 1
assert collection.by_id(vote.ballots[0].id) == vote.ballots[0]
| 23.217391 | 66 | 0.672285 |
4a234859b9e49491f1fdd6d0ea72561ff9de1bf2 | 2,242 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/ARB/compressed_texture_pixel_storage.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/ARB/compressed_texture_pixel_storage.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/ARB/compressed_texture_pixel_storage.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''OpenGL extension ARB.compressed_texture_pixel_storage
This module customises the behaviour of the
OpenGL.raw.GL.ARB.compressed_texture_pixel_storage to provide a more
Python-friendly API
Overview (from the spec)
This extension expands the functionality of the PixelStore modes
to allow UNPACK_ROW_LENGTH, UNPACK_SKIP_ROWS, UNPACK_SKIP_PIXELS,
UNPACK_IMAGE_HEIGHT and UNPACK_SKIP_IMAGES to affect the operation of
CompressedTexImage*D and CompressedTexSubImage*D. Similarly, it
also allows PACK_ROW_LENGTH, PACK_SKIP_ROWS, PACK_SKIP_PIXELS,
PACK_IMAGE_HEIGHT and PACK_SKIP_IMAGES to affect the operation of
GetCompressedTexImage*D. This allows data to be transferred
to or from a specified sub-rectangle of a larger compressed image.
This extension is designed primarily to support compressed image
formats with fixed-size blocks. To use this new mechanism, an
application should program new parameters UNPACK_COMPRESSED_BLOCK_
{WIDTH,HEIGHT,DEPTH,SIZE} to indicate the number of texels in each
dimension of the fixed-size block as well as the number of bytes
consumed by each block. These parameters, in addition to the
existing PixelStore parameters, are used to identify a collection
of bytes in client memory or a buffer object's data store to use
as compressed texture data. This operation is unlikely to have
the desired results if the client programs a block size inconsistent
with the underlying compressed image format, or if the compressed
image format has variable-sized blocks.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/compressed_texture_pixel_storage.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.compressed_texture_pixel_storage import *
from OpenGL.raw.GL.ARB.compressed_texture_pixel_storage import _EXTENSION_NAME
def glInitCompressedTexturePixelStorageARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 47.702128 | 79 | 0.806423 |
4a2348910b4e2550c6e7ecb69a45349005ae093b | 513 | py | Python | 2-Medium/minFallingPathSum.py | Sma-Das/Leetcode | 6f9b8f069e2ef198408abd6780fd0697a8bebada | [
"MIT"
] | null | null | null | 2-Medium/minFallingPathSum.py | Sma-Das/Leetcode | 6f9b8f069e2ef198408abd6780fd0697a8bebada | [
"MIT"
] | null | null | null | 2-Medium/minFallingPathSum.py | Sma-Das/Leetcode | 6f9b8f069e2ef198408abd6780fd0697a8bebada | [
"MIT"
] | null | null | null | def minFallingPathSum():
if not matrix:
return 0
for row in range(1, len(matrix)):
prev_row = matrix[row-1]
for column in range(c := len(matrix[0])):
v = matrix[row][column]
ops = []
if column > 0:
ops.append(prev_row[column-1] + v)
if column < c-1:
ops.append(prev_row[column+1] + v)
ops.append(prev_row[column] + v)
matrix[row][column] = min(ops)
return min(matrix[-1])
| 30.176471 | 50 | 0.497076 |
4a2348aa758efa3397f34553e80e46dd7cf6c6ed | 1,481 | py | Python | process_build.py | jbaker10/sal-saml | 861fe0ec10324698e6b617dec70d053016a7be8c | [
"Apache-2.0"
] | null | null | null | process_build.py | jbaker10/sal-saml | 861fe0ec10324698e6b617dec70d053016a7be8c | [
"Apache-2.0"
] | null | null | null | process_build.py | jbaker10/sal-saml | 861fe0ec10324698e6b617dec70d053016a7be8c | [
"Apache-2.0"
] | null | null | null | import argparse
import subprocess
import os
# parser = argparse.ArgumentParser()
# parser.add_argument('tag', nargs='?', default='')
# args = parser.parse_args()
tag = os.getenv('TAG', '')
if tag == '':
if os.getenv('CIRCLE_BRANCH') == 'master':
tag = 'latest'
else:
tag = os.getenv('CIRCLE_BRANCH')
dockerfile_content = """FROM macadmins/sal:{}
MAINTAINER Graham Gilbert <[email protected]>
ENV DJANGO_SAML_VERSION 0.16.11
RUN apt-get update && apt-get install -y python-setuptools python-dev libxmlsec1-dev libxml2-dev xmlsec1 python-pip
RUN pip install -U setuptools
RUN pip install git+git://github.com/francoisfreitag/djangosaml2.git@613356c7f0e18ecfde07e4d282d0b82b0f4f7268
ADD attributemaps /home/app/sal/sal/attributemaps
RUN mv /home/app/sal/sal/urls.py /home/app/sal/sal/origurls.py
ADD urls.py /home/app/sal/sal/urls.py
""".format(tag)
with open("Dockerfile", "w") as dockerfile:
dockerfile.write(dockerfile_content)
cmd = [
'docker',
'build',
'-t',
'macadmins/sal-saml:{}'.format(tag),
'.'
]
print subprocess.check_output(cmd)
cmd = [
'docker',
'login',
'-u',
'{}'.format(os.getenv('DOCKER_USER')),
'-p',
'{}'.format(os.getenv('DOCKER_PASS'))
]
try:
print subprocess.check_output(cmd)
except subprocess.CalledProcessError:
print 'Failed to login to docker'
cmd = [
'docker',
'push',
'macadmins/sal-saml:{}'.format(tag)
]
print subprocess.check_output(cmd)
| 22.784615 | 115 | 0.682647 |
4a2348c1c29175bde51385f4fb781604b44dea52 | 12,564 | py | Python | dans_pymodules/ascii2h5block.py | DanielWinklehner/dans_pymodules | 04dfdaeccc171712cad6eb24202608e2eda21eca | [
"MIT"
] | null | null | null | dans_pymodules/ascii2h5block.py | DanielWinklehner/dans_pymodules | 04dfdaeccc171712cad6eb24202608e2eda21eca | [
"MIT"
] | null | null | null | dans_pymodules/ascii2h5block.py | DanielWinklehner/dans_pymodules | 04dfdaeccc171712cad6eb24202608e2eda21eca | [
"MIT"
] | null | null | null | import h5py
import numpy as np
class TableToH5(object):
def __init__(self, spacing, r_min, r_max, filename='my_h5'):
self.spacing = spacing
self.r_min = r_min
self.r_max = r_max
self.filename = filename
# Create a new h5 file
self.h5_file = h5py.File(filename + '.h5part', )
# Calculates the size of data arrays
# noinspection PyTypeChecker
self._size = np.array((r_max - r_min) / spacing + 1, int)
# Initialize the h5 data
# Data Format:
# Dictionary with keys "ex", "ey", "ez", "hx", "hy", and "hz", which correspond to the vector components
# of the electric field and the H field.
self.data = {"ex": np.zeros(self._size),
"ey": np.zeros(self._size),
"ez": np.zeros(self._size),
"hx": np.zeros(self._size),
"hy": np.zeros(self._size),
"hz": np.zeros(self._size)}
def set_data(self):
with open(self.filename + '.table', 'r') as table:
h = 0
_s = self._size[0] * self._size[1] * self._size[2]
_lines = table.readlines()[5:]
_ex, _ey, _ez = np.zeros(_s), np.zeros(_s), np.zeros(_s)
# _hx, _hy, _hz = np.zeros(_s), np.zeros(_s), np.zeros(_s)
for line in _lines:
_tmp = line.lstrip().rstrip().split()
_ex[h], _ey[h], _ez[h] = float(_tmp[0]), float(_tmp[1]), float(_tmp[2])
# _hx, _hy, _hz = float(_tmp[0]), float(_tmp[1]), float(_tmp[2])
h += 1
for i in range(self._size[2]):
for j in range(self._size[1]):
for k in range(self._size[0]):
self.data["ex"][k, j, i] = _ex[k + j * self._size[2] + i * self._size[2] * self._size[1]] * 1e-4
self.data["ey"][k, j, i] = _ey[k + j * self._size[2] + i * self._size[2] * self._size[1]] * 1e-4
self.data["ez"][k, j, i] = _ez[k + j * self._size[2] + i * self._size[2] * self._size[1]] * 1e-4
# self.data["hx"][i, j, k] = _hx[i + j * self._size[2] + k * self._size[2] * self._size[1]] * 1e-3
# self.data["hy"][i, j, k] = _hy[i + j * self._size[2] + k * self._size[2] * self._size[1]] * 1e-3
# self.data["hz"][i, j, k] = _hz[i + j * self._size[2] + k * self._size[2] * self._size[1]] * 1e-3
def generate(self):
# Create the zeroth step and the Block inside of it
self.h5_file.attrs.__setitem__("Resonance Frequency(Hz)", np.array([49200000.0]))
step0 = self.h5_file.create_group("Step#0")
block = step0.create_group("Block")
# Create the E Field group
e_field = block.create_group("Efield")
# Store the x, y, and z data for the E Field
e_field.create_dataset("0", data=self.data["ex"])
e_field.create_dataset("1", data=self.data["ey"])
e_field.create_dataset("2", data=self.data["ez"])
# Set the spacing and origin attributes for the E Field group
e_field.attrs.__setitem__("__Spacing__", self.spacing)
e_field.attrs.__setitem__("__Origin__", self.r_min)
# Create the H Field group
h_field = block.create_group("Hfield")
# Store the x, y, and z data points for the H Fiend
h_field.create_dataset("0", data=self.data["hx"])
h_field.create_dataset("1", data=self.data["hy"])
h_field.create_dataset("2", data=self.data["hz"])
# Set the spacing and origin attributes for the H Field group
h_field.attrs.__setitem__("__Spacing__", self.spacing)
h_field.attrs.__setitem__("__Origin__", self.r_min)
# Close the file
self.h5_file.close()
def _set_uniform_bfield(self, tesla=None, kgauss=None):
# Set the magnetic field in the "hz" direction
if tesla is not None:
self.data["hz"][:, :, :] = tesla * 10.0
elif kgauss is not None:
self.data["hz"][:, :, :] = kgauss
class COMSOLToH5(object):
def __init__(self, spacing, r_min, r_max, filename='my_h5'):
self.spacing = spacing
self.r_min = r_min
self.r_max = r_max
self.filename = filename
# Create a new h5 file
self.h5_file = h5py.File(filename + '.h5part', )
# Calculates the size of data arrays
# noinspection PyTypeChecker
self._size = np.array((r_max - r_min) / spacing + 1, int)
print("Size = ", self._size)
# Initialize the h5 data
# Data Format:
# Dictionary with keys "ex", "ey", "ez", "hx", "hy", and "hz", which correspond to the vector components
# of the electric field and the H field.
self.data = {"ex": np.zeros([self._size[2], self._size[1], self._size[0]]),
"ey": np.zeros([self._size[2], self._size[1], self._size[0]]),
"ez": np.zeros([self._size[2], self._size[1], self._size[0]]),
"hx": np.zeros([self._size[2], self._size[1], self._size[0]]),
"hy": np.zeros([self._size[2], self._size[1], self._size[0]]),
"hz": np.zeros([self._size[2], self._size[1], self._size[0]])}
def set_data(self):
with open(self.filename + '.txt', 'r') as table:
h = 0
_s = self._size[0] * self._size[1] * self._size[2]
_lines = table.readlines()[9:]
# _x, _y, _z = np.zeros(_s), np.zeros(_s), np.zeros(_s)
_ex, _ey, _ez = np.zeros(_s), np.zeros(_s), np.zeros(_s)
# _hx, _hy, _hz = np.zeros(_s), np.zeros(_s), np.zeros(_s)
for line in _lines:
# [X] [Y] [Z] [EX] [EY] [EZ]
_tmp = line.lstrip().rstrip().split()
# _xy_values = [float(_tmp[0]), float(_tmp[1]), float(_tmp[2])]
_values = [float(_tmp[3]), float(_tmp[4]), float(_tmp[5])]
for i in range(3):
if np.isnan(_values[i]):
_values[i] = 0.0
# if np.isnan(_xy_values[i]):
# _xy_values[i] = 0.0
# _x[h], _y[h], _z[h] = _xy_values
_ex[h], _ey[h], _ez[h] = _values
# _hx, _hy, _hz = float(_tmp[0]), float(_tmp[1]), float(_tmp[2])
h += 1
for i in range(self._size[2]):
for j in range(self._size[1]):
for k in range(self._size[0]):
self.data["ex"][i, j, k] = _ex[k + j * self._size[0] + i * self._size[0] * self._size[1]] * 1e-6
self.data["ey"][i, j, k] = _ey[k + j * self._size[0] + i * self._size[0] * self._size[1]] * 1e-6
self.data["ez"][i, j, k] = _ez[k + j * self._size[0] + i * self._size[0] * self._size[1]] * 1e-6
# self.data["hx"][i, j, k] = _hx[i + j * self._size[2] + k * self._size[2] * self._size[1]] * 1e-3
# self.data["hy"][i, j, k] = _hy[i + j * self._size[2] + k * self._size[2] * self._size[1]] * 1e-3
# self.data["hz"][i, j, k] = _hz[i + j * self._size[2] + k * self._size[2] * self._size[1]] * 1e-3
print(self.data["ex"].shape)
def generate(self):
# Create the zeroth step and the Block inside of it
self.h5_file.attrs.__setitem__("Resonance Frequency(Hz)", np.array([49200000.0]))
step0 = self.h5_file.create_group("Step#0")
block = step0.create_group("Block")
# Create the E Field group
e_field = block.create_group("Efield")
# Store the x, y, and z data for the E Field
e_field.create_dataset("0", data=self.data["ex"])
e_field.create_dataset("1", data=self.data["ey"])
e_field.create_dataset("2", data=self.data["ez"])
# Set the spacing and origin attributes for the E Field group
e_field.attrs.__setitem__("__Spacing__", self.spacing)
e_field.attrs.__setitem__("__Origin__", self.r_min)
# Create the H Field group
h_field = block.create_group("Hfield")
# Store the x, y, and z data points for the H Fiend
h_field.create_dataset("0", data=self.data["hx"])
h_field.create_dataset("1", data=self.data["hy"])
h_field.create_dataset("2", data=self.data["hz"])
# Set the spacing and origin attributes for the H Field group
h_field.attrs.__setitem__("__Spacing__", self.spacing)
h_field.attrs.__setitem__("__Origin__", self.r_min)
# Close the file
self.h5_file.close()
def _set_uniform_bfield(self, tesla=None, kgauss=None):
# Set the magnetic field in the "hz" direction
if tesla is not None:
self.data["hz"][:, :, :] = tesla * 10.0
elif kgauss is not None:
self.data["hz"][:, :, :] = kgauss
class createBFieldMap(object):
def __init__(self, spacing, r_min, r_max, filename='dummy_field'):
self.spacing = spacing
self.r_min = r_min
self.r_max = r_max
self.filename = filename
# Create a new h5 file
self.h5_file = h5py.File(filename + '.h5part', )
# Calculates the size of data arrays
# noinspection PyTypeChecker
self._size = np.array((r_max - r_min) / spacing + 1, int)
# Initialize the h5 data
# Data Format:
# Dictionary with keys "ex", "ey", "ez", "hx", "hy", and "hz", which correspond to the vector components
# of the electric field and the H field.
self.data = {"ex": np.zeros(self._size),
"ey": np.zeros(self._size),
"ez": np.zeros(self._size),
"hx": np.zeros(self._size),
"hy": np.zeros(self._size),
"hz": np.zeros(self._size)}
def generate(self):
# Create the zeroth step and the Block inside of it
self.h5_file.attrs.__setitem__("Resonance Frequency(Hz)", np.array([49200000.0]))
step0 = self.h5_file.create_group("Step#0")
block = step0.create_group("Block")
# Create the E Field group
e_field = block.create_group("Efield")
# Store the x, y, and z data for the E Field
e_field.create_dataset("0", data=self.data["ex"])
e_field.create_dataset("1", data=self.data["ey"])
e_field.create_dataset("2", data=self.data["ez"])
# Set the spacing and origin attributes for the E Field group
e_field.attrs.__setitem__("__Spacing__", self.spacing)
e_field.attrs.__setitem__("__Origin__", self.r_min)
# Create the H Field group
h_field = block.create_group("Hfield")
# Store the x, y, and z data points for the H Fiend
h_field.create_dataset("0", data=self.data["hx"])
h_field.create_dataset("1", data=self.data["hy"])
h_field.create_dataset("2", data=self.data["hz"])
# Set the spacing and origin attributes for the H Field group
h_field.attrs.__setitem__("__Spacing__", self.spacing)
h_field.attrs.__setitem__("__Origin__", self.r_min)
# Close the file
self.h5_file.close()
def _set_uniform_bfield(self, tesla=None, kgauss=None):
# Set the magnetic field in the "hz" direction
if tesla is not None:
self.data["hz"][:, :, :] = tesla * 10.0
elif kgauss is not None:
self.data["hz"][:, :, :] = kgauss
if __name__ == '__main__':
# Spacing and origin attributes
# spacing = np.array([20.0, 20.0, 20.0])
# r_min = np.array([-100.0, -100.0, -100.0])
# r_max = np.array([100.0, 100.0, 100.0])
spacing = np.array([1.0, 1.0, 1.0])
r_min = np.array([-250.0, -250.0, -30.0])
r_max = np.array([250.0, 250.0, 30.0])
filename = r"C:\Users\Daniel Winklehner\Dropbox (MIT)\Projects\RFQ Direct" \
r" Injection\Comsol\AIMA CR Design\AIMA_80kV_RF_no_SI_2mm"
# Assumes that the .table filename is the same as the filename you want to save the h5 to.
# filename = '/home/philip/src/dans_pymodules/dans_pymodules/test_fieldmaps/plate_capacitor_11x11x11_test'
# my_h5 = TableToH5(spacing=spacing, r_min=r_min, r_max=r_max, filename=filename)
# my_h5.set_data()
# # my_h5 = createBFieldMap(spacing, r_min, r_max, filename=filename)
# my_h5._set_uniform_bfield(tesla=1.041684)
my_h5 = COMSOLToH5(spacing=spacing, r_min=r_min, r_max=r_max, filename=filename)
my_h5.set_data()
my_h5.generate()
| 42.30303 | 118 | 0.558103 |
4a23492fd99a74e70aaa55e7955eb4eac1084ba8 | 467 | py | Python | config.py | najma-amin/News-Highlights | caad6c5af7650c2cc8490f100d070573fe18f6a6 | [
"MIT"
] | 7 | 2019-03-14T08:15:39.000Z | 2021-01-21T11:34:32.000Z | config.py | najma-amin/News-Highlights | caad6c5af7650c2cc8490f100d070573fe18f6a6 | [
"MIT"
] | null | null | null | config.py | najma-amin/News-Highlights | caad6c5af7650c2cc8490f100d070573fe18f6a6 | [
"MIT"
] | 11 | 2019-02-15T07:19:43.000Z | 2021-08-06T06:16:31.000Z | import os
class Config:
NEWS_SOURCES_BASE_URL ='https://newsapi.org/v2/sources?language=en&category={}&apiKey={}'
ARTICLES_BASE_URL = 'https://newsapi.org/v2/everything?language=en&sources={}&apiKey={}'
NEWS_API_KEY = os.environ.get('NEWS_API_KEY')
@staticmethod
def init_app(app):
pass
class ProdConfig(Config):
pass
class DevConfig(Config):
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig
}
| 18.68 | 93 | 0.704497 |
4a2349ff0102dcdbfd999d882769e8afc9f8d74d | 2,144 | py | Python | modules/signatures/windows/injection_explorer.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 24 | 2021-06-21T07:35:37.000Z | 2022-03-22T03:33:59.000Z | modules/signatures/windows/injection_explorer.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 3 | 2021-07-01T08:09:05.000Z | 2022-01-28T03:38:36.000Z | modules/signatures/windows/injection_explorer.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 6 | 2021-06-22T05:32:57.000Z | 2022-02-11T02:05:45.000Z | # Copyright (C) 2017 Kevin Ross
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class InjectionExplorer(Signature):
name = "injection_explorer"
description = "Performs code injection into the Explorer process using the Shell_TrayWnd technique"
severity = 3
categories = ["injection"]
authors = ["Kevin Ross"]
minimum = "2.0"
ttp = ["T1055"]
references = ["www.endgame.com/blog/technical-blog/ten-process-injection-techniques-technical-survey-common-and-trending-process"]
filter_apinames = [
"Process32NextW",
"FindWindowW",
"SendNotifyMessageA",
]
explorerpids = []
windowhandle = ""
injected = False
def on_call(self, call, process):
if call["api"] == "Process32NextW":
if call["arguments"]["process_name"] == "explorer.exe":
self.explorerpids.append(call["arguments"]["process_identifier"])
self.mark_call()
elif call["api"] == "FindWindowW":
if call["arguments"]["class_name"] == "Shell_TrayWnd":
self.windowhandle = call["return_value"]
self.mark_call()
elif call["api"] == "SendNotifyMessageA":
if call["arguments"]["process_identifier"] in self.explorerpids and int(call["arguments"]["window_handle"], 16) == self.windowhandle:
self.injected = True
self.mark_call()
def on_complete(self):
if self.injected:
return self.has_marks()
| 37.614035 | 145 | 0.662313 |
4a234aeb79361b458bd7aefc243f75f0a41c3ec5 | 1,429 | py | Python | beastx/modules/recent flooder.py | Digasi123percy/Beast-X | cf2c47db6af0c4afaa3b51b76ef7a1a2f0e0bc81 | [
"MIT"
] | 11 | 2021-11-07T12:04:20.000Z | 2022-03-10T10:32:59.000Z | beastx/modules/recent flooder.py | Digasi123percy/Beast-X | cf2c47db6af0c4afaa3b51b76ef7a1a2f0e0bc81 | [
"MIT"
] | null | null | null | beastx/modules/recent flooder.py | Digasi123percy/Beast-X | cf2c47db6af0c4afaa3b51b76ef7a1a2f0e0bc81 | [
"MIT"
] | 114 | 2021-11-07T13:11:19.000Z | 2022-03-31T02:00:04.000Z |
from telethon import events
import asyncio
from beastx import bot as javes
from beastx.utils import admin_cmd
@javes.on(admin_cmd("chaos2"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0
animation_ttl = range(0,10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000)
await event.edit("☠️Chaos2(Recent_Flooder) Begins...HA...HA...HA ...👽‼️👾")
animation_chars = ["😀","😃","😄","😁","😆","😅","😂","🤣","☺️","😊","😇","🙂","🙃","😉","😌","😍","🥰","😘","😗","😙","😚","😋","😛","😝","😜","🤪","🤨","🧐","🤓","😎","🤩","🥳","😏","😒", "😞","😔","😟","😕","🙁","☹️","😣","😖","😫","😩","🥺","😢","😭","😤","😠","😡","🤬","🤯","😳","🥵","🥶","😱","😰","😥","😓","🤗","🤔","🤭","🤫","🤥","😶","😐","😑", "😬","🙄","😯","😦","😧","😲","🥱","😴","🤤","😪","😵","🤐","🥴","🤢","🤮","🤧","😷","🤒","🤕","🤑","🤠","😈","👿","👹","👺","🤡","👻","💀","☠️","👽","👾","🤖","🎃","😺" ,"😸","😹","😻","😼","😽","🙀","😿","😾","🌞","🌝","🌛","🌜","🌚","🌕","🌖","🌗","🌘","🌑","🌒","🌓","🌔","🌙","🌎","🌍","🌏","🪐","💫","⭐️","🌟","✨","⚡️","☄️","💥", "🔥","🌪","🌈","☀️","🌤","⛅️","🌥","☁️","🌦","🌧","⛈","🌩","🌨","❄️","☃️","⛄️","🌬","💨","💧","💦","🌊","🌫","🎯","🎲","⚽️","🏀"]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 160])
| 68.047619 | 778 | 0.343597 |
4a234b5d0b7069f7087d1046c8d84fc3563751d8 | 631 | py | Python | num/romanToInt.py | hikair/leetcode_py | f917fd619fa25960e5d046aa512ac0d582f2084a | [
"Apache-2.0"
] | null | null | null | num/romanToInt.py | hikair/leetcode_py | f917fd619fa25960e5d046aa512ac0d582f2084a | [
"Apache-2.0"
] | null | null | null | num/romanToInt.py | hikair/leetcode_py | f917fd619fa25960e5d046aa512ac0d582f2084a | [
"Apache-2.0"
] | null | null | null | class Solution:
def romanToInt(self, s):
map = {'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
size = len(s)
num = 0
for index,str in enumerate(s):
sign = 1
if index + 1 < size:
if map[str] < map[s[index+1]]:
sign = -1
num += map[str] * sign
return num
s = Solution()
result = s.romanToInt('III') # 3
print(result)
result = s.romanToInt('IV') # 4
print(result)
result = s.romanToInt('IX') # 9
print(result)
result = s.romanToInt('LVIII') # 58
print(result)
result = s.romanToInt('MCMXCIV') # 1994
print(result) | 27.434783 | 66 | 0.524564 |
4a234bed2ae9f0fffbcaf874086727ac51e1cdcd | 2,965 | py | Python | 06-sistemaLinear/sistemaLinear_v11/uteis.py | jonasht/python | 68cf6bd7e27d71a104917d402bbd443afb82810d | [
"MIT"
] | null | null | null | 06-sistemaLinear/sistemaLinear_v11/uteis.py | jonasht/python | 68cf6bd7e27d71a104917d402bbd443afb82810d | [
"MIT"
] | null | null | null | 06-sistemaLinear/sistemaLinear_v11/uteis.py | jonasht/python | 68cf6bd7e27d71a104917d402bbd443afb82810d | [
"MIT"
] | null | null | null | import numpy as np
conta = '2x-32y3z=20'
vars_conta = ['x', 'y', 'z', '=']
import exemplo as ex
def remove_dirty(conta):
vars_toRemove = ['[', ']', ',', '\'']
for var in vars_toRemove:
conta = conta.replace(var, '')
return conta
def insert_1var(conta):
conta_copy = list(conta)
i = 0
for c in conta:
# print('1:', c)
if (c.isalpha() and (conta_copy[i-1].isalpha() or
conta_copy[i-1] in '+-' or
conta_copy[0].isalpha()
)):
# print('2:', c)
conta_copy.insert(i, '1')
i+=1
i+=1
conta_copy = ''.join(conta_copy)
return conta_copy
def get_vars(conta) -> list:
vars = list()
for c in conta:
# print(c)
if c.isalpha() or c == '=':
vars.append(c)
# print(c, vars)
# print(vars)
return vars
def separar_linhas(conta):
conta = conta.split('\n')
while '' in conta:
conta.remove('')
return conta
def arrumar(conta, vars):
a = list()
b = 0
for var in vars:
conta = conta.split(var)
# print(conta)
# conta.remove("''")
if var != '=':
a.append(conta.pop(0))
else:
conta = ''.join(conta)
b = conta
conta = ''.join(conta)
# print('a', a, 'conta:', conta)
# print('='*30)
# print('a:', a, 'b:', b)
a = list(map(int, a))
b = int(b)
return a, b
def calcular(conta):
# removendo espaços
while ' ' in conta:
conta = conta.replace(' ', '')
print(conta)
conta = separar_linhas(conta)
print('1:', conta)
conta_c = list()
for c in conta:
conta_c.append(insert_1var(c))
conta = conta_c
print('2:', conta)
vars = get_vars(conta[0])
print('vars:', vars)
# conta_c = conta.copy()
# print('c', conta_c)
A = list()
B = list()
for l in conta:
print(l)
a, b = arrumar(l, vars)
A.append(a)
B.append(b)
A = np.array(A)
B = np.array(B)
print('A:', A, 'B:', B)
conta = np.linalg.solve(A, B)
# print(conta)
vars = vars[:-1]
solve = dict()
for v, c in zip(vars, conta):
solve[v] = c
# print(solve)
return solve
if __name__ == '__main__':
# conta3x3 = '''2x+6y-2z=24\n4x+5y-4z=24\n6x+5y-4z=28\n'''
conta3x3v1 = '''
2x+y+z=8\n
x+y+4z=15\n
3x+2y+0z=9\n
'''
conta3x3_v2 = '1x+2y-3z=1\n3x-1y+2z=0\n2x+1y+1z=2\n\n'
conta3x3v3 = '''
x+0y+0z=3
0x+y+0z=2
0x+0y+z=1
'''
# colocar essa conta como exemplo do programa
conta3x3v4 = '''
x-3y+5z=1
x+2y+z=12
2x-y+3z=10
'''
# print(calcular(conta3x3))
# calcular(conta3x3v3)
# calcular(conta3x3v4)
# print(insert_1var('2x+y+1z=8'))
print(calcular(ex.conta5x5)) | 20.308219 | 62 | 0.483642 |
4a234c83e8610cdcd3dcd1ebd1579f6862ec60c4 | 486 | py | Python | api.py | idomoz/rpc-over-http | f5161241d185411a0be4720e69a878506dd2018e | [
"MIT"
] | 3 | 2018-10-19T20:54:07.000Z | 2020-06-04T16:32:59.000Z | api.py | idomoz/rpc-over-http | f5161241d185411a0be4720e69a878506dd2018e | [
"MIT"
] | null | null | null | api.py | idomoz/rpc-over-http | f5161241d185411a0be4720e69a878506dd2018e | [
"MIT"
] | null | null | null | def logout_user(address):
pass
def register_user(username, password, age):
pass
BANNES_ADDRESS = '1.1.1.1'
def login(username, password, request):
if username == 'foo' and password == 'bar' and request.remote_addr != BANNES_ADDRESS:
return 'logged in!'
return 'forbidden'
def logout(request):
logout_user(request.remote_addr)
def register(username, password, age=20, request=None):
register_user(username, password, age)
| 20.25 | 90 | 0.666667 |
4a234dabc6de0b221e02074da71f8b3f09cf4c7b | 3,565 | py | Python | bindings/python/ensmallen/datasets/string/thioalkalivibriospakl19.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/thioalkalivibriospakl19.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/thioalkalivibriospakl19.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Thioalkalivibrio sp. AKL19.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ThioalkalivibrioSpAkl19(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Thioalkalivibrio sp. AKL19 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Thioalkalivibrio sp. AKL19 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ThioalkalivibrioSpAkl19",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.009259 | 223 | 0.677419 |
4a234dfefb9a906744480adb007a16bb213e816d | 1,755 | py | Python | web_api/yonyou/outputs/sale_delivery.py | zhanghe06/flask_restful | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | [
"MIT"
] | 1 | 2020-12-04T03:15:47.000Z | 2020-12-04T03:15:47.000Z | web_api/yonyou/outputs/sale_delivery.py | zhanghe06/flask_restful | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | [
"MIT"
] | 1 | 2021-06-01T22:24:27.000Z | 2021-06-01T22:24:27.000Z | web_api/yonyou/outputs/sale_delivery.py | zhanghe06/flask_restful | 6ef54f3f7efbbaff6169e963dcf45ab25e11e593 | [
"MIT"
] | 2 | 2020-12-04T03:16:18.000Z | 2021-09-04T14:10:12.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: sale_delivery.py
@time: 2018-07-24 16:58
"""
from __future__ import unicode_literals
from flask_restful import fields
fields_item_sale_delivery = {
'id': fields.Integer(attribute='ID'),
'code': fields.String(attribute='code'),
'id_customer': fields.Integer(attribute='idcustomer'), # 客户id
'id_settle': fields.Integer(attribute='idsettlecustomer'), # 结算客户id
'is_sale_out': fields.Integer(attribute='isSaleOut'),
'recive_type': fields.Integer(attribute='reciveType'),
'amount': fields.Float(attribute='amount'),
'amount_tax': fields.Float(attribute='taxAmount'),
'receive_balance': fields.Float(attribute='ReceiveBalance'),
'makerid': fields.Integer(attribute='makerid'),
'maker': fields.String(attribute='maker'),
'create_time': fields.DateTime(dt_format=b'iso8601', attribute='createdtime'),
'update_time': fields.DateTime(dt_format=b'iso8601', attribute='updated'),
}
fields_item_sale_delivery_cn = {
'主键': fields.Integer(attribute='ID'),
'编号': fields.String(attribute='code'),
'客户': fields.Integer(attribute='idcustomer'), # 客户id
'结算单位': fields.Integer(attribute='idsettlecustomer'), # 结算客户id
'出库状态': fields.Integer(attribute='isSaleOut'),
'收款方式': fields.Integer(attribute='reciveType'),
'金额': fields.Float(attribute='amount'),
'含税金额': fields.Float(attribute='taxAmount'),
'应收余额': fields.Float(attribute='ReceiveBalance'),
'制单人ID': fields.Integer(attribute='makerid'),
'制单人': fields.String(attribute='maker'),
'创建时间': fields.DateTime(dt_format=b'iso8601', attribute='createdtime'),
'更细时间': fields.DateTime(dt_format=b'iso8601', attribute='updated'),
}
| 37.340426 | 82 | 0.702564 |
4a234f249046ba4c6ab779d8f0b2c474974be586 | 5,296 | py | Python | origins/allometric.py | weka511/fractals | fa4e39677ea3ed7713e40a55b9453b2826f11a6c | [
"MIT"
] | 2 | 2020-07-22T01:39:49.000Z | 2020-07-28T04:36:22.000Z | origins/allometric.py | weka511/fractals | fa4e39677ea3ed7713e40a55b9453b2826f11a6c | [
"MIT"
] | 11 | 2019-06-20T03:20:12.000Z | 2020-07-20T03:07:55.000Z | origins/allometric.py | weka511/complexity | 435ffab978e4499aea7c2c83788533867cc9b062 | [
"MIT"
] | null | null | null | # Copyright (C) 2019-2020 Greenweaves Software Limited
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Optimize flow through a branching network, after West et al--
# A General Model for the Origin of Allometric Scaling Laws in Biology
# http://hermes.ffn.ub.es/oscar/Biologia/Escala/Science_276_122_1997.pdf
from random import random, seed, choice
from ga import evolve,plot_fitness, perturb, perturb_n
from matplotlib.pyplot import plot, show, legend, xlabel, ylabel, ylim, title, figure, savefig, subplot
from numpy import mean, std
from math import sqrt
# create_branching_network
#
# Create a representation of flow though a network
#
def create_branching_network(c = 10, #Number of levels
gamma0 = 0.1):
gamma = [gamma0+(1-gamma0)*random() for _ in range(c)] # scale factor for branching - lengths
n = [choice([2,4,8,16,32]) for _ in range(c)] # number of branches at each level
beta = [1/sqrt(n0) for n0 in n] # scale factor for branching - radii
return (beta,gamma,n)
# get_resistance
#
# Calculate resistance using West et al equation (6)
def get_resistance(beta, # scale factor for branching - radii
gamma, # scale factor for branching - lengths
n, # number of branches at each level
r_c=1, # initial radius at root of tree
l_c=1): # initial length at root of tree
r = r_c
l = l_c
R = []
for k in range(len(beta),0,-1):
r /= beta[k-1]
l /= gamma[k-1]
R.append(l * r**-4)
Z = 0
N = 1
for k in range(len(beta)):
Z += R[k]/N
N *= n[k]
return Z
# evaluate_branching_network
#
# The score is 1/resiatnce, as we want a network that minimizes resistance.
def evaluate_branching_network(individual):
beta, gamma,n = individual
return 1/get_resistance(beta,gamma,n)
# mutate_branching_network
def mutate_branching_network(individual, # Individual to be mutated
probability = 0.5, # Probability of mutation
sigma = 0.5): # standard deviation for mutating continuous values
beta,gamma,n = individual
gamma = [perturb(g,probability=probability) for g in gamma]
n = [perturb_n(n0,probability=probability) for n0 in n]
beta = [1/sqrt(n0) for n0 in n]
return beta, gamma,n
if __name__=='__main__':
import argparse
from matplotlib import rc
rc('text', usetex=True)
parser = argparse.ArgumentParser('Evolve branching network')
parser.add_argument('--seed', default=None, type=int, help='Seed for random number generation')
parser.add_argument('--N', default=1000, type=int, help='Number of generations')
parser.add_argument('--M', default=100, type=int, help='Population size')
parser.add_argument('--c', default=10, type=int, help='c')
parser.add_argument('--m', default=0.1, type=float, help='mutation probability')
parser.add_argument('--gamma', default=0.1, type=float, help='Initial gamma')
parser.add_argument('--savefig', default=None, help='File name for saving plots')
args = parser.parse_args()
seed(args.seed)
population,statistics,indices = evolve(
N = args.N,
M = args.M,
create = lambda :create_branching_network(c=args.c, gamma0=args.gamma),
evaluate = evaluate_branching_network,
mutate = lambda individual:mutate_branching_network(individual,probability=args.m),
crossover = lambda population:population)
beta,gamma,n = population[indices[-1]]
print (std(n)/mean(n), std(beta)/mean(beta), std(gamma)/mean(gamma))
fig=figure(figsize=(10,10))
fig.tight_layout()
subplot(211)
plot_fitness(statistics,name='Fitness')
subplot(212)
title('Evolution of Parameters')
plot([b/max(beta) for b in beta], 'r', label=r'$\beta$')
plot([g/max(gamma) for g in gamma], 'g', label=r'$\gamma$')
plot([n0/max(n) for n0 in n], 'b', label='n')
legend()
if args.savefig!=None:
savefig(args.savefig)
show() | 41.375 | 107 | 0.653512 |
4a23509fb070c1002f2d1a07178e3ef9ecaa9e96 | 961 | py | Python | sagas/kit/viz_base.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 3 | 2020-01-11T13:55:38.000Z | 2020-08-25T22:34:15.000Z | sagas/kit/viz_base.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | null | null | null | sagas/kit/viz_base.py | samlet/stack | 47db17fd4fdab264032f224dca31a4bb1d19b754 | [
"Apache-2.0"
] | 1 | 2021-01-01T05:21:44.000Z | 2021-01-01T05:21:44.000Z | class BaseViz(object):
def __init__(self, shape='egg', size='8,5', fontsize=0, enable_node_pos=False, translit_lang=None):
from graphviz import Digraph
self.f = Digraph('deps', filename='deps.gv')
self.f.attr(rankdir='LR', size=size)
# font 'Calibri' support Arabic text
self.f.attr('node', shape=shape, fontname='Calibri')
if fontsize != 0:
self.f.attr(fontsize=str(fontsize))
self.enable_node_pos = enable_node_pos
self.translit_lang = translit_lang
def default_node(self):
self.f.attr('node', style='solid', color='black')
def edge(self, head, node, rel):
self.f.edge(head, node,
rel, fontsize='10', fontname='Calibri')
def node(self, text, emphasis=False):
if emphasis:
self.f.attr('node', style='filled', color='lightgrey')
self.f.node(text)
if emphasis:
self.default_node()
| 34.321429 | 103 | 0.600416 |
4a2351043bd5f90834060be78606dce4cdf58b90 | 3,614 | py | Python | ProgramFlow/tkinter/screen.py | kumarvgit/python3 | 318c5e7503fafc9c60082fa123e2930bd82a4ec9 | [
"MIT"
] | null | null | null | ProgramFlow/tkinter/screen.py | kumarvgit/python3 | 318c5e7503fafc9c60082fa123e2930bd82a4ec9 | [
"MIT"
] | null | null | null | ProgramFlow/tkinter/screen.py | kumarvgit/python3 | 318c5e7503fafc9c60082fa123e2930bd82a4ec9 | [
"MIT"
] | null | null | null | try:
import tkinter
except ImportError: # python 2
import Tkinter as tkinter
import os
mainWindow = tkinter.Tk()
mainWindow.title("Grid Demo")
mainWindow.geometry('640x480-8-200')
mainWindow['padx'] = 8
label= tkinter.Label(mainWindow, text="Tkinter Grid Demo")
label.grid(row=0, column=0, columnspan=3)
mainWindow.columnconfigure(0, weight=100)
mainWindow.columnconfigure(1, weight=1)
mainWindow.columnconfigure(2, weight=1000)
mainWindow.columnconfigure(3, weight=600)
mainWindow.columnconfigure(4, weight=1000)
mainWindow.rowconfigure(0, weight=1)
mainWindow.rowconfigure(1, weight=10)
mainWindow.rowconfigure(2, weight=1)
mainWindow.rowconfigure(3, weight=3)
mainWindow.rowconfigure(4, weight=3)
fileList = tkinter.Listbox(mainWindow)
fileList.grid(row=1, column=0, sticky='nsew', rowspan=2)
fileList.config(border=2, relief='sunken')
for zone in os.listdir('/usr/bin'): # '/Windows/System32'
fileList.insert(tkinter.END, zone)
listScroll = tkinter.Scrollbar(mainWindow, orient=tkinter.VERTICAL, command=fileList.yview)
listScroll.grid(row=1, column=1, sticky='nsw', rowspan=2)
fileList['yscrollcommand'] = listScroll.set
# frame for the radio buttons
optionFrame = tkinter.LabelFrame(mainWindow, text="File Details")
optionFrame.grid(row=1, column=2, sticky='ne')
rbValue = tkinter.IntVar()
rbValue.set(1)
# Radio buttons
radio1 = tkinter.Radiobutton(optionFrame, text="Filename", value=1, variable=rbValue)
radio2 = tkinter.Radiobutton(optionFrame, text="Path", value=2, variable=rbValue)
radio3 = tkinter.Radiobutton(optionFrame, text="Timestamp", value=3, variable=rbValue)
radio1.grid(row=0, column=0, sticky='w')
radio2.grid(row=1, column=0, sticky='w')
radio3.grid(row=2, column=0, sticky='w')
# Widget to display the result
resultLabel = tkinter.Label(mainWindow, text="Result")
resultLabel.grid(row=2, column=2, sticky='nw')
result = tkinter.Entry(mainWindow)
result.grid(row=2, column=2, sticky='sw')
# Frame for the time spinners
timeFrame = tkinter.LabelFrame(mainWindow, text="Time")
timeFrame.grid(row=3, column=0, sticky='new')
# Time spinners
hourSpinner = tkinter.Spinbox(timeFrame, width=2, values=tuple(range(0, 24)))
minuteSpinner = tkinter.Spinbox(timeFrame, width=2, from_=0, to=59)
secondSpinner = tkinter.Spinbox(timeFrame, width=2, from_=0, to=59)
hourSpinner.grid(row=0, column=0)
tkinter.Label(timeFrame, text=':').grid(row=0, column=1)
minuteSpinner.grid(row=0, column=2)
tkinter.Label(timeFrame, text=':').grid(row=0, column=3)
secondSpinner.grid(row=0, column=4)
timeFrame['padx'] = 36
# Frame for the date spinners
dateFrame = tkinter.Frame(mainWindow)
dateFrame.grid(row=4, column=0, sticky='new')
# Date labels
dayLabel = tkinter.Label(dateFrame, text="Day")
monthLabel = tkinter.Label(dateFrame, text="Month")
yearLabel = tkinter.Label(dateFrame, text="Year")
dayLabel.grid(row=0, column=0, sticky='w')
monthLabel.grid(row=0, column=1, sticky='w')
yearLabel.grid(row=0, column=2, sticky='w')
# Date spinners
daySpin = tkinter.Spinbox(dateFrame, width=5, from_=1, to=31)
monthSpin = tkinter.Spinbox(dateFrame, width=5, values=("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"))
yearSpin = tkinter.Spinbox(dateFrame, width=5, from_=2000, to=2099)
daySpin.grid(row=1, column=0)
monthSpin.grid(row=1, column=1)
yearSpin.grid(row=1, column=2)
# Buttons
okButton = tkinter.Button(mainWindow, text="OK")
cancelButton = tkinter.Button(mainWindow, text="Cancel", command=mainWindow.destroy)
okButton.grid(row=4, column=3, sticky='e')
cancelButton.grid(row=4, column=4, sticky='w')
mainWindow.mainloop()
print(rbValue.get()) | 36.877551 | 140 | 0.749032 |
4a2351d637ce620252cb8d887145ae4806c5ae75 | 176 | py | Python | StoreApp/errors.py | ttimms/coffee_shop | 34bc240a71d005b575d6ea2801990cadad2021f8 | [
"MIT"
] | null | null | null | StoreApp/errors.py | ttimms/coffee_shop | 34bc240a71d005b575d6ea2801990cadad2021f8 | [
"MIT"
] | null | null | null | StoreApp/errors.py | ttimms/coffee_shop | 34bc240a71d005b575d6ea2801990cadad2021f8 | [
"MIT"
] | null | null | null | from flask import render_template
from StoreApp import storeApp, db, routes
@storeApp.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404 | 29.333333 | 43 | 0.801136 |
4a2351e24e91028103fe2654bcf5f5505c72a94a | 77,842 | py | Python | sarpy/io/complex/gff.py | mstewart-vsc/sarpy | 2c9a51f1961524da8607cdb5042da0ec5b65130c | [
"MIT"
] | null | null | null | sarpy/io/complex/gff.py | mstewart-vsc/sarpy | 2c9a51f1961524da8607cdb5042da0ec5b65130c | [
"MIT"
] | null | null | null | sarpy/io/complex/gff.py | mstewart-vsc/sarpy | 2c9a51f1961524da8607cdb5042da0ec5b65130c | [
"MIT"
] | null | null | null | """
Functionality for reading a GFF file into a SICD model.
Note: This has been tested on files of version 1.8 and 2.5, but hopefully works for others.
"""
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
import logging
import os
import struct
from typing import Union, BinaryIO
from datetime import datetime
from tempfile import mkstemp
import zlib
import gc
import numpy
from scipy.constants import speed_of_light
from sarpy.io.general.base import BaseReader, BIPChipper, BSQChipper, \
is_file_like, SarpyIOError
from sarpy.io.general.nitf import MemMap
from sarpy.geometry.geocoords import geodetic_to_ecf, wgs_84_norm, ned_to_ecf
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, \
RadarModeType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, \
WgtTypeType
from sarpy.io.complex.sicd_elements.SCPCOA import SCPCOAType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
WaveformParametersType, ChanParametersType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, \
RcvChanProcType
from sarpy.io.complex.sicd_elements.Radiometric import RadiometricType, \
NoiseLevelType_
try:
import PIL
except ImportError:
PIL = None
logger = logging.getLogger(__name__)
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a Cosmo Skymed file. Returns a reader instance, if so.
Parameters
----------
file_name : str|BinaryIO
the file_name to check
Returns
-------
CSKReader|None
`CSKReader` instance if Cosmo Skymed file, `None` otherwise
"""
if is_file_like(file_name):
return None
try:
gff_details = GFFDetails(file_name)
logger.info('File {} is determined to be a GFF version {} file.'.format(
file_name, gff_details.version))
return GFFReader(gff_details)
except SarpyIOError:
return None
####################
# utility functions
def _get_string(bytes_in):
bytes_in = bytes_in.replace(b'\x00', b'')
return bytes_in.decode('utf-8')
def _rescale_float(int_in, scale):
return float(int_in)/scale
####################
# version 1 specific header parsing
class _GFFHeader_1_6(object):
"""
Interpreter for the GFF version 1.6 header
"""
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.file_object = fi
self.estr = estr
self.version = '1.6'
fi.seek(12, os.SEEK_SET)
# starting at line 3 of def
self.header_length = struct.unpack(estr+'I', fi.read(4))[0]
if self.header_length < 952:
raise ValueError(
'The provided header is apparently too short to be a version 1.6 GFF header')
fi.read(2) # redundant
self.creator = _get_string(fi.read(24))
self.date_time = struct.unpack(estr+'6H', fi.read(6*2)) # year,month, day, hour, minute, second
fi.read(2) # endian, already parsed
self.bytes_per_pixel, self.frame_count, self.image_type, \
self.row_major, self.range_count, self.azimuth_count = \
struct.unpack(estr+'6I', fi.read(6*4))
self.scale_exponent, self.scale_mantissa, self.offset_exponent, self.offset_mantissa = \
struct.unpack(estr+'4i', fi.read(4*4))
# at line 17 of def
fi.read(2) # redundant
self.comment = _get_string(fi.read(166))
self.image_plane = struct.unpack(estr+'I', fi.read(4))[0]
range_pixel_size, azimuth_pixel_size, azimuth_overlap = struct.unpack(estr+'3I', fi.read(3*4))
self.range_pixel_size = _rescale_float(range_pixel_size, 1 << 16)
self.azimuth_pixel_size = _rescale_float(azimuth_pixel_size, 1 << 16)
self.azimuth_overlap = _rescale_float(azimuth_overlap, 1 << 16)
srp_lat, srp_lon, srp_alt, rfoa, x_to_srp = struct.unpack(estr+'5i', fi.read(5*4))
self.srp_lat = _rescale_float(srp_lat, 1 << 23)
self.srp_lon = _rescale_float(srp_lon, 1 << 23)
self.srp_alt = _rescale_float(srp_alt, 1 << 16)
self.rfoa = _rescale_float(rfoa, 1 << 23)
self.x_to_srp = _rescale_float(x_to_srp, 1 << 16)
fi.read(2)
self.phase_name = _get_string(fi.read(128))
fi.read(2)
self.image_name = _get_string(fi.read(128))
# at line 32 of def
self.look_count, self.param_ref_ap, self.param_ref_pos = \
struct.unpack(estr+'3I', fi.read(3*4))
graze_angle, squint, gta, range_beam_ctr, flight_time = \
struct.unpack(estr + 'I2i2I', fi.read(5*4))
self.graze_angle = _rescale_float(graze_angle, 1 << 23)
self.squint = _rescale_float(squint, 1 << 23)
self.gta = _rescale_float(gta, 1 << 23)
self.range_beam_ctr = _rescale_float(range_beam_ctr, 1 << 8)
self.flight_time = _rescale_float(flight_time, 1000)
self.range_chirp_rate, x_to_start, self.mo_comp_mode, v_x = \
struct.unpack(estr+'fi2I', fi.read(4*4))
self.x_to_start = _rescale_float(x_to_start, 1 << 16)
self.v_x = _rescale_float(v_x, 1 << 16)
# at line 44 of def
apc_lat, apc_lon, apc_alt = struct.unpack(estr+'3i', fi.read(3*4))
self.apc_lat = _rescale_float(apc_lat, 1 << 23)
self.apc_lon = _rescale_float(apc_lon, 1 << 23)
self.apc_alt = _rescale_float(apc_alt, 1 << 16)
cal_parm, self.logical_block_address = struct.unpack(estr+'2I', fi.read(2*4))
self.cal_parm = _rescale_float(cal_parm, 1 << 24)
az_resolution, range_resolution = struct.unpack(estr+'2I', fi.read(2*4))
self.az_resolution = _rescale_float(az_resolution, 1 << 16)
self.range_resolution = _rescale_float(range_resolution, 1 << 16)
des_sigma_n, des_graze, des_squint, des_range, scene_track_angle = \
struct.unpack(estr+'iIiIi', fi.read(5*4))
self.des_sigma_n = _rescale_float(des_sigma_n, 1 << 23)
self.des_graze = _rescale_float(des_graze, 1 << 23)
self.des_squint = _rescale_float(des_squint, 1 << 23)
self.des_range = _rescale_float(des_range, 1 << 8)
self.scene_track_angle = _rescale_float(scene_track_angle, 1 << 23)
# at line 56 of def
self.user_param = fi.read(48) # leave uninterpreted
self.coarse_snr, self.coarse_azimuth_sub, self.coarse_range_sub, \
self.max_azimuth_shift, self.max_range_shift, \
self.coarse_delta_azimuth, self.coarse_delta_range = \
struct.unpack(estr+'7i', fi.read(7*4))
self.tot_procs, self.tpt_box_cmode, self.snr_thresh, self.range_size, \
self.map_box_size, self.box_size, self.box_spc, self.tot_tpts, \
self.good_tpts, self.range_seed, self.range_shift, self.azimuth_shift = \
struct.unpack(estr+'12i', fi.read(12*4))
# at line 76 of def
self.sum_x_ramp, self.sum_y_ramp = struct.unpack(estr+'2i', fi.read(2*4))
self.cy9k_tape_block, self.nominal_center_frequency = struct.unpack(estr+'If', fi.read(2*4))
self.image_flags, self.line_number, self.patch_number = struct.unpack(estr+'3I', fi.read(3*4))
self.lambda0, self.srange_pix_space = struct.unpack(estr+'2f', fi.read(2*4))
self.dopp_pix_space, self.dopp_offset, self.dopp_range_scale, self.mux_time_delay = \
struct.unpack(estr+'4f', fi.read(4*4))
# at line 89 of def
self.apc_ecef = struct.unpack(estr+'3d', fi.read(3*8))
self.vel_ecef = struct.unpack(estr+'3f', fi.read(3*4))
self.phase_cal = struct.unpack(estr+'f', fi.read(4))[0]
self.srp_ecef = struct.unpack(estr+'3d', fi.read(3*8))
self.res5 = fi.read(64) # leave uninterpreted
class _Radar_1_8(object):
"""
The radar details, for version 1.8
"""
def __init__(self, the_bytes, estr):
"""
Parameters
----------
the_bytes : bytes
This will be required to have length 76
estr : str
The endianness format string
"""
if not (isinstance(the_bytes, bytes) and len(the_bytes) == 76):
raise ValueError('Incorrect length input')
self.platform = _get_string(the_bytes[:24])
self.proc_id = _get_string(the_bytes[24:36])
self.radar_model = _get_string(the_bytes[36:48])
self.radar_id = struct.unpack(estr+'I', the_bytes[48:52])[0]
self.swid = _get_string(the_bytes[52:76])
class _GFFHeader_1_8(object):
"""
Interpreter for the GFF version 1.8 header
"""
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.file_object = fi
self.estr = estr
self.version = '1.8'
fi.seek(12, os.SEEK_SET)
# starting at line 3 of def
self.header_length = struct.unpack(estr+'I', fi.read(4))[0]
if self.header_length < 2040:
raise ValueError(
'The provided header is apparently too short to be a version 1.8 GFF header')
fi.read(2) # redundant
self.creator = _get_string(fi.read(24))
self.date_time = struct.unpack(estr+'6H', fi.read(6*2)) # year, month, day, hour, minute, second
fi.read(2) # endian, already parsed
self.bytes_per_pixel = int(struct.unpack(estr+'f', fi.read(4))[0])
self.frame_count, self.image_type, self.row_major, self.range_count, \
self.azimuth_count = struct.unpack(estr+'5I', fi.read(5*4))
self.scale_exponent, self.scale_mantissa, self.offset_exponent, self.offset_mantissa = \
struct.unpack(estr+'4i', fi.read(4*4))
# at line 17 of def
self.res1 = fi.read(32) # leave uninterpreted
fi.read(2) # redundant
self.comment = _get_string(fi.read(166))
self.image_plane = struct.unpack(estr+'I', fi.read(4))[0]
range_pixel_size, azimuth_pixel_size, azimuth_overlap = struct.unpack(estr+'3I', fi.read(3*4))
self.range_pixel_size = _rescale_float(range_pixel_size, 1 << 16)
self.azimuth_pixel_size = _rescale_float(azimuth_pixel_size, 1 << 16)
self.azimuth_overlap = _rescale_float(azimuth_overlap, 1 << 16)
srp_lat, srp_lon, srp_alt, rfoa, x_to_srp = struct.unpack(estr+'5i', fi.read(5*4))
self.srp_lat = _rescale_float(srp_lat, 1 << 23)
self.srp_lon = _rescale_float(srp_lon, 1 << 23)
self.srp_alt = _rescale_float(srp_alt, 1 << 16)
self.rfoa = _rescale_float(rfoa, 1 << 23)
self.x_to_srp = _rescale_float(x_to_srp, 1 << 16)
self.res2 = fi.read(32) # leave uninterpreted
fi.read(2)
self.phase_name = _get_string(fi.read(128))
fi.read(2)
self.image_name = _get_string(fi.read(128))
# at line 34 of def
self.look_count, self.param_ref_ap, self.param_ref_pos = \
struct.unpack(estr + '3I', fi.read(3*4))
graze_angle, squint, gta, range_beam_ctr, flight_time = \
struct.unpack(estr + 'I2i2I', fi.read(5*4))
self.graze_angle = _rescale_float(graze_angle, 1 << 23)
self.squint = _rescale_float(squint, 1 << 23)
self.gta = _rescale_float(gta, 1 << 23)
self.range_beam_ctr = _rescale_float(range_beam_ctr, 1 << 8)
self.flight_time = _rescale_float(flight_time, 1000)
self.range_chirp_rate, x_to_start, self.mo_comp_mode, v_x = \
struct.unpack(estr + 'fi2I', fi.read(4*4))
self.x_to_start = _rescale_float(x_to_start, 1 << 16)
self.v_x = _rescale_float(v_x, 1 << 16)
# at line 46 of def
apc_lat, apc_lon, apc_alt = struct.unpack(estr + '3i', fi.read(3*4))
self.apc_lat = _rescale_float(apc_lat, 1 << 23)
self.apc_lon = _rescale_float(apc_lon, 1 << 23)
self.apc_alt = _rescale_float(apc_alt, 1 << 16)
cal_parm, self.logical_block_address = struct.unpack(estr + '2I', fi.read(2*4))
self.cal_parm = _rescale_float(cal_parm, 1 << 24)
az_resolution, range_resolution = struct.unpack(estr + '2I', fi.read(2*4))
self.az_resolution = _rescale_float(az_resolution, 1 << 16)
self.range_resolution = _rescale_float(range_resolution, 1 << 16)
des_sigma_n, des_graze, des_squint, des_range, scene_track_angle = \
struct.unpack(estr + 'iIiIi', fi.read(5*4))
self.des_sigma_n = _rescale_float(des_sigma_n, 1 << 23)
self.des_graze = _rescale_float(des_graze, 1 << 23)
self.des_squint = _rescale_float(des_squint, 1 << 23)
self.des_range = _rescale_float(des_range, 1 << 8)
self.scene_track_angle = _rescale_float(scene_track_angle, 1 << 23)
# at line 58 of def
self.user_param = fi.read(48) # leave uninterpreted
self.coarse_snr, self.coarse_azimuth_sub, self.coarse_range_sub, \
self.max_azimuth_shift, self.max_range_shift, \
self.coarse_delta_azimuth, self.coarse_delta_range = \
struct.unpack(estr + '7i', fi.read(7*4))
self.tot_procs, self.tpt_box_cmode, self.snr_thresh, self.range_size, \
self.map_box_size, self.box_size, self.box_spc, self.tot_tpts, \
self.good_tpts, self.range_seed, self.range_shift, self.azimuth_shift = \
struct.unpack(estr + '12i', fi.read(12*4))
# at line 78 of def
self.sum_x_ramp, self.sum_y_ramp = struct.unpack(estr + '2i', fi.read(2*4))
self.cy9k_tape_block, self.nominal_center_frequency = struct.unpack(estr + 'If', fi.read(2*4))
self.image_flags, self.line_number, self.patch_number = struct.unpack(estr + '3I', fi.read(3*4))
self.lambda0, self.srange_pix_space = struct.unpack(estr + '2f', fi.read(2*4))
self.dopp_pix_space, self.dopp_offset, self.dopp_range_scale, self.mux_time_delay = \
struct.unpack(estr + '4f', fi.read(4*4))
# at line 91 of def
self.apc_ecef = struct.unpack(estr+'3d', fi.read(3*8))
self.vel_ecef = struct.unpack(estr+'3f', fi.read(3*4))
self.phase_cal = struct.unpack(estr+'f', fi.read(4))[0]
self.srp_ecef = struct.unpack(estr+'3d', fi.read(3*8))
self.res5 = fi.read(64) # leave uninterpreted
# at line 102
self.header_length1 = struct.unpack(estr+'I', fi.read(4))[0]
self.image_date = struct.unpack(estr+'6H', fi.read(6*2)) # year,month, day, hour, minute, second
self.comp_file_name = _get_string(fi.read(128))
self.ref_file_name = _get_string(fi.read(128))
self.IE = _Radar_1_8(fi.read(76), estr)
self.IF = _Radar_1_8(fi.read(76), estr)
self.if_algo = _get_string(fi.read(8))
self.PH = _Radar_1_8(fi.read(76), estr)
# at line 122 of def
self.ph_data_rcd, self.proc_product = struct.unpack(estr+'2i', fi.read(2*4))
self.mission_text = _get_string(fi.read(8))
self.ph_source, self.gps_week = struct.unpack(estr+'iI', fi.read(2*4))
self.data_collect_reqh = _get_string(fi.read(14))
self.res6 = fi.read(2) # leave uninterpreted
# at line 129
self.grid_name = _get_string(fi.read(24))
self.pix_val_linearity, self.complex_or_real, self.bits_per_magnitude, \
self.bits_per_phase = struct.unpack(estr+'2i2H', fi.read(2*4+2*2))
self.complex_order_type, self.pix_data_type, self.image_length, \
self.image_cmp_scheme = struct.unpack(estr+'4i', fi.read(4*4))
# at line 138
self.apbo, self.asa_pitch, self.asa_squint, self.dsa_pitch, self.ira = \
struct.unpack(estr+'5f', fi.read(5*4))
self.rx_polarization = struct.unpack(estr+'2f', fi.read(2*4))
self.tx_polarization = struct.unpack(estr+'2f', fi.read(2*4))
self.v_avg = struct.unpack(estr+'3f', fi.read(3*4))
self.apc_avg = struct.unpack(estr+'3f', fi.read(3*4))
self.averaging_time, self.dgta = struct.unpack(estr+'2f', fi.read(2*4))
# at line 153
velocity_y, velocity_z = struct.unpack(estr+'2I', fi.read(2*4))
self.velocity_y = _rescale_float(velocity_y, 1 << 16)
self.velocity_z = _rescale_float(velocity_z, 1 << 16)
self.ba, self.be = struct.unpack(estr+'2f', fi.read(2*4))
self.az_geom_corr, self.range_geom_corr, self.az_win_fac_bw, \
self.range_win_fac_bw = struct.unpack(estr+'2i2f', fi.read(4*4))
self.az_win_id = _get_string(fi.read(48))
self.range_win_id = _get_string(fi.read(48))
# at line 163
self.keep_out_viol_prcnt = struct.unpack(estr+'f', fi.read(4))[0]
self.az_coeff = struct.unpack(estr+'6f', fi.read(6*4))
self.pos_uncert = struct.unpack(estr+'3f', fi.read(3*4))
self.nav_aiding_type = struct.unpack(estr+'i', fi.read(4))[0]
self.two_dnl_phase_coeffs = struct.unpack(estr+'10f', fi.read(10*4))
self.clutter_snr_thresh = struct.unpack(estr+'f', fi.read(4))[0]
# at line 171
self.elevation_coeff = struct.unpack(estr+'9f', fi.read(9*4))
self.monopulse_coeff = struct.unpack(estr+'12f', fi.read(12*4))
self.twist_pt_err_prcnt, self.tilt_pt_err_prcnt, self.az_pt_err_prcnt = \
struct.unpack(estr+'3f', fi.read(3*4))
sigma_n, self.take_num = struct.unpack(estr+'Ii', fi.read(2*4))
self.sigma_n = _rescale_float(sigma_n, 1 << 23)
self.if_sar_flags = struct.unpack(estr+'5i', fi.read(5*4))
self.mu_threshold, self.gff_app_type = struct.unpack(estr+'fi', fi.read(2*4))
self.res7 = fi.read(8) # leave uninterpreted
#####################
# version 2 specific header parsing
# NB: I am only parsing the GSATIMG, APINFO, IFINFO, and GEOINFO blocks
# because those are the only blocks referenced in the matlab that I
# am mirroring
class _BlockHeader_2(object):
"""
Read and interpret a block "sub"-header. This generically precedes every version
2 data block, including the main file header
"""
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.name = _get_string(fi.read(16))
self.major_version, self.minor_version = struct.unpack(estr+'HH', fi.read(2*2))
what0 = fi.read(4) # not sure what this is from looking at the matlab.
self.size = struct.unpack(estr+'I', fi.read(4))[0]
what1 = fi.read(4) # not sure what this is from looking at the matlab.
if (self.version == '2.0' and self.size == 64) or (self.version == '1.0' and self.size == 52):
self.name = 'RADARINFO' # fix known issue for some early version 2 GFF files
@property
def version(self):
"""
str: The version
"""
return '{}.{}'.format(self.major_version, self.minor_version)
# APINFO definitions
class _APInfo_1_0(object):
"""
The APINFO block
"""
serialized_length = 314
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.missionText = _get_string(fi.read(8))
self.swVerNum = _get_string(fi.read(8))
self.radarSerNum, self.phSource = struct.unpack(estr+'2I', fi.read(2*4))
fi.read(2)
self.phName = _get_string(fi.read(128))
self.ctrFreq, self.wavelength = struct.unpack(estr+'2f', fi.read(2*4))
self.rxPolarization, self.txPolarization = struct.unpack(estr+'2I', fi.read(2*4))
self.azBeamWidth, self.elBeamWidth = struct.unpack(estr+'2f', fi.read(2*4))
self.grazingAngle, self.squintAngle, self.gta, self.rngToBeamCtr = \
struct.unpack(estr+'4f', fi.read(4*4))
# line 16
self.desSquint, self.desRng, self.desGTA, self.antPhaseCtrBear = \
struct.unpack(estr+'4f', fi.read(4*4))
self.ApTimeUTC = struct.unpack(estr+'6H', fi.read(6*2))
self.flightTime, self.flightWeek = struct.unpack(estr+'2I', fi.read(2*4))
self.chirpRate, self.xDistToStart = struct.unpack(estr+'2f', fi.read(2*4))
self.momeasMode, self.radarMode = struct.unpack(estr+'2I', fi.read(2*4))
# line 32
self.rfoa = struct.unpack(estr+'f', fi.read(4))[0]
self.apcVel = struct.unpack(estr+'3d', fi.read(3*8))
self.apcLLH = struct.unpack(estr+'3d', fi.read(3*8))
self.keepOutViol, self.gimStopTwist, self.gimStopTilt, self.gimStopAz = \
struct.unpack(estr+'4f', fi.read(4*4))
class _APInfo_2_0(_APInfo_1_0):
"""
The APINFO block
"""
serialized_length = 318
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_APInfo_1_0.__init__(self, fi, estr)
self.apfdFactor = struct.unpack(estr+'i', fi.read(4))[0]
class _APInfo_3_0(_APInfo_2_0):
"""
The APINFO block
"""
serialized_length = 334
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_APInfo_2_0.__init__(self, fi, estr)
self.fastTimeSamples, self.adSampleFreq, self.apertureTime, \
self.numPhaseHistories = struct.unpack(estr+'I2fI', fi.read(4*4))
class _APInfo_4_0(object):
"""
The APINFO block
"""
serialized_length = 418
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
# essentially the same as version 3, except the first two fields are longer
self.missionText = _get_string(fi.read(50))
self.swVerNum = _get_string(fi.read(50))
self.radarSerNum, self.phSource = struct.unpack(estr+'2I', fi.read(2*4))
fi.read(2)
self.phName = _get_string(fi.read(128))
self.ctrFreq, self.wavelength = struct.unpack(estr+'2f', fi.read(2*4))
self.rxPolarization, self.txPolarization = struct.unpack(estr+'2I', fi.read(2*4))
self.azBeamWidth, self.elBeamWidth = struct.unpack(estr+'2f', fi.read(2*4))
self.grazingAngle, self.squintAngle, self.gta, self.rngToBeamCtr = \
struct.unpack(estr+'4f', fi.read(4*4))
# line 16
self.desSquint, self.desRng, self.desGTA, self.antPhaseCtrBear = \
struct.unpack(estr+'4f', fi.read(4*4))
self.ApTimeUTC = struct.unpack(estr+'6H', fi.read(6*2))
self.flightTime, self.flightWeek = struct.unpack(estr+'2I', fi.read(2*4))
self.chirpRate, self.xDistToStart = struct.unpack(estr+'2f', fi.read(2*4))
self.momeasMode, self.radarMode = struct.unpack(estr+'2I', fi.read(2*4))
# line 32
self.rfoa = struct.unpack(estr+'f', fi.read(4))[0]
self.apcVel = struct.unpack(estr+'3d', fi.read(3*8))
self.apcLLH = struct.unpack(estr+'3d', fi.read(3*8))
self.keepOutViol, self.gimStopTwist, self.gimStopTilt, self.gimStopAz = \
struct.unpack(estr+'4f', fi.read(4*4))
self.apfdFactor = struct.unpack(estr+'i', fi.read(4))[0]
self.fastTimeSamples, self.adSampleFreq, self.apertureTime, \
self.numPhaseHistories = struct.unpack(estr+'I2fI', fi.read(4*4))
class _APInfo_5_0(_APInfo_4_0):
"""
The APINFO block
"""
serialized_length = 426
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_APInfo_4_0.__init__(self, fi, estr)
self.lightSpeed = struct.unpack(estr+'d', fi.read(8))[0] # really?
class _APInfo_5_1(_APInfo_5_0):
"""
The APINFO block
"""
serialized_length = 430
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_APInfo_5_0.__init__(self, fi, estr)
self.delTanApAngle = struct.unpack(estr+'f', fi.read(4))[0]
class _APInfo_5_2(_APInfo_5_1):
"""
The APINFO block
"""
serialized_length = 434
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_APInfo_5_1.__init__(self, fi, estr)
self.metersInSampledDoppler = struct.unpack(estr+'f', fi.read(4))[0]
# IFINFO definitions
class _IFInfo_1_0(object):
"""
Interpreter for IFInfo object
"""
serialized_length = 514
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.procProduct = struct.unpack(estr+'I', fi.read(4))[0]
fi.read(2)
self.imgFileName = _get_string(fi.read(128))
self.azResolution, self.rngResolution = struct.unpack(estr+'2f', fi.read(2*4))
self.imgCalParam, self.sigmaN = struct.unpack(estr+'2f', fi.read(2*4))
self.sampLocDCRow, self.sampLocDCCol = struct.unpack(estr+'2i', fi.read(2*4))
self.ifAlgo = _get_string(fi.read(8))
self.imgFlag = struct.unpack(estr+'i', fi.read(4))[0]
self.azCoeff = struct.unpack(estr+'6f', fi.read(6*4))
self.elCoeff = struct.unpack(estr+'9f', fi.read(9*4))
self.azGeoCorrect, self.rngGeoCorrect = struct.unpack(estr+'2i', fi.read(2*4))
self.wndBwFactAz, self.wndBwFactRng = struct.unpack(estr+'2f', fi.read(2*4))
self.wndFncIdAz = _get_string(fi.read(48))
self.wndFncIdRng = _get_string(fi.read(48))
fi.read(2)
self.cmtText = _get_string(fi.read(166))
self.autoFocusInfo = struct.unpack(estr+'i', fi.read(4))[0]
class _IFInfo_2_0(_IFInfo_1_0):
"""
Interpreter for IFInfo object - identical with version 2.1 and 2.2
"""
serialized_length = 582
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_IFInfo_1_0.__init__(self, fi, estr)
self.rngFFTSize = struct.unpack(estr+'i', fi.read(4))[0]
self.RangePaneFilterCoeff = struct.unpack(estr+'11f', fi.read(11*4))
self.AzPreFilterCoeff = struct.unpack(estr+'5f', fi.read(5*4))
class _IFInfo_3_0(_IFInfo_2_0):
"""
Interpreter for IFInfo object
"""
serialized_length = 586
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
_IFInfo_2_0.__init__(self, fi, estr)
self.afPeakQuadComp = struct.unpack(estr+'f', fi.read(4))[0]
# GEOINFO definitions
class _GeoInfo_1(object):
"""
Interpreter for GeoInfo object - note that versions 1.0 and 1.1 are identical
"""
serialized_length = 52
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.imagePlane = struct.unpack(estr+'i', fi.read(4))[0]
self.rangePixSpacing, self.desiredGrazAng, self.azPixSpacing = \
struct.unpack(estr+'3f', fi.read(3*4))
self.patchCtrLLH = struct.unpack(estr+'3d', fi.read(3*8))
self.pixLocImCtrRow, self.pixLocImCtrCol = struct.unpack(estr+'2I', fi.read(2*4))
self.imgRotAngle = struct.unpack(estr+'f', fi.read(4))[0]
# GSATIMG definition
def _get_complex_domain_code(code_int):
# type: (int) -> str
if code_int in [0, 3]:
return 'IQ'
elif code_int in [1, 4]:
return 'QI'
elif code_int in [2, 5]:
return 'MP'
elif code_int == 6:
return 'PM'
elif code_int == 7:
return 'M'
elif code_int == 8:
return 'P'
else:
raise ValueError('Got unexpected code `{}`'.format(code_int))
def _get_band_order(code_int):
# type: (int) -> str
if code_int in [0, 1, 2, 7, 8]:
return 'interleaved'
elif code_int in [3, 4, 5, 6]:
return 'sequential'
else:
raise ValueError('Got unexpected code `{}`'.format(code_int))
class _PixelFormat(object):
"""
Interpreter for pixel format object
"""
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.comp0_bitSize, self.comp0_dataType = struct.unpack(estr+'HI', fi.read(2+4))
self.comp1_bitSize, self.comp1_dataType = struct.unpack(estr+'HI', fi.read(2+4))
self.cmplxDomain, self.numComponents = struct.unpack(estr+'Ii', fi.read(2*4))
class _GSATIMG_2(object):
"""
Interpreter for the GSATIMG object
"""
serialized_length = 82
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self.endian = struct.unpack(estr+'I', fi.read(4))[0]
fi.read(2)
self.imageCreator = _get_string(fi.read(24))
self.rangePixels, self.azPixels = struct.unpack(estr+'2I', fi.read(2*4))
self.pixOrder, self.imageLengthBytes, self.imageCompressionScheme, \
self.pixDataType = struct.unpack(estr+'4I', fi.read(4*4))
self.pixelFormat = _PixelFormat(fi, estr)
self.pixValLin, self.autoScaleFac = struct.unpack(estr+'if', fi.read(2*4))
complex_domain = _get_complex_domain_code(self.pixelFormat.cmplxDomain)
if complex_domain not in ['IQ', 'QI', 'MP', 'PM']:
raise ValueError('We got unsupported complex domain `{}`'.format(complex_domain))
# combined GFF version 2 header collection
def _check_serialization(block_header, expected_length):
# type: (_BlockHeader_2, int) -> None
if block_header.size == expected_length:
return
raise ValueError(
'Got `{}` block of version `{}` and serialized length {},\n\t'
'but expected serialized length {}'.format(
block_header.name, block_header.version, block_header.size, expected_length))
class _GFFHeader_2(object):
"""
Interpreter for the GFF version 2.* header
"""
__slots__ = (
'file_object', 'estr', '_gsat_img', '_ap_info', '_if_info', '_geo_info',
'_image_header', '_image_offset')
def __init__(self, fi, estr):
"""
Parameters
----------
fi : BinaryIO
estr : str
The endianness string for format interpretation, one of `['<', '>']`
"""
self._gsat_img = None
self._ap_info = None
self._if_info = None
self._geo_info = None
self._image_header = None
self._image_offset = None
self.file_object = fi
self.estr = estr
# extract the initial file location
init_location = fi.tell()
# go to the begining of the file
fi.seek(0, os.SEEK_SET)
gsat_header = _BlockHeader_2(fi, estr)
self._gsat_img = _GSATIMG_2(fi, estr)
while True:
block_header = _BlockHeader_2(fi, estr)
if block_header.name == 'IMAGEDATA':
self._image_header = block_header
self._image_offset = fi.tell()
break
elif block_header.name == 'APINFO':
self._parse_apinfo(fi, estr, block_header)
elif block_header.name == 'IFINFO':
self._parse_ifinfo(fi, estr, block_header)
elif block_header.name == 'GEOINFO':
self._parse_geoinfo(fi, estr, block_header)
else:
# we are not parsing this block, so just skip it
fi.seek(block_header.size, os.SEEK_CUR)
# return to the initial file location
fi.seek(init_location, os.SEEK_SET)
self._check_valid(gsat_header)
@property
def gsat_img(self):
# type: () -> _GSATIMG_2
return self._gsat_img
@property
def ap_info(self):
# type: () -> Union[_APInfo_1_0, _APInfo_2_0, _APInfo_3_0, _APInfo_4_0, _APInfo_5_0, _APInfo_5_1, _APInfo_5_2]
return self._ap_info
@property
def if_info(self):
# type: () -> Union[_IFInfo_1_0, _IFInfo_2_0, _IFInfo_3_0]
return self._if_info
@property
def geo_info(self):
# type: () -> _GeoInfo_1
return self._geo_info
@property
def image_header(self):
# type: () -> _BlockHeader_2
return self._image_header
@property
def image_offset(self):
# type: () -> int
return self._image_offset
def _parse_apinfo(self, fi, estr, block_header):
if block_header.name != 'APINFO':
return
if block_header.major_version == 1:
_check_serialization(block_header, _APInfo_1_0.serialized_length)
self._ap_info = _APInfo_1_0(fi, estr)
elif block_header.major_version == 2:
_check_serialization(block_header, _APInfo_2_0.serialized_length)
self._ap_info = _APInfo_2_0(fi, estr)
elif block_header.major_version == 3:
_check_serialization(block_header, _APInfo_3_0.serialized_length)
self._ap_info = _APInfo_3_0(fi, estr)
elif block_header.major_version == 4:
_check_serialization(block_header, _APInfo_4_0.serialized_length)
self._ap_info = _APInfo_4_0(fi, estr)
elif block_header.major_version == 5:
if block_header.minor_version == 0:
_check_serialization(block_header, _APInfo_5_0.serialized_length)
self._ap_info = _APInfo_5_0(fi, estr)
elif block_header.minor_version == 1:
_check_serialization(block_header, _APInfo_5_1.serialized_length)
self._ap_info = _APInfo_5_1(fi, estr)
elif block_header.minor_version == 2:
_check_serialization(block_header, _APInfo_5_2.serialized_length)
self._ap_info = _APInfo_5_2(fi, estr)
else:
raise ValueError(
'Could not parse required `{}` block version `{}`'.format(
block_header.name, block_header.version))
def _parse_ifinfo(self, fi, estr, block_header):
if block_header.name != 'IFINFO':
return
if block_header.major_version == 1:
_check_serialization(block_header, _IFInfo_1_0.serialized_length)
self._if_info = _IFInfo_1_0(fi, estr)
elif block_header.major_version == 2:
_check_serialization(block_header, _IFInfo_2_0.serialized_length)
self._if_info = _IFInfo_2_0(fi, estr)
elif block_header.major_version == 3:
_check_serialization(block_header, _IFInfo_3_0.serialized_length)
self._if_info = _IFInfo_3_0(fi, estr)
else:
raise ValueError(
'Could not parse required `{}` block version `{}`'.format(
block_header.name, block_header.version))
def _parse_geoinfo(self, fi, estr, block_header):
if block_header.name != 'GEOINFO':
return
_check_serialization(block_header, _GeoInfo_1.serialized_length)
self._geo_info = _GeoInfo_1(fi, estr)
def _check_valid(self, gsat_header):
# ensure that the required elements are all set
valid = True
if self._ap_info is None:
valid = False
logger.error(
'GFF version {} file did not present APINFO block'.format(
gsat_header.version))
if self._if_info is None:
valid = False
logger.error(
'GFF version {} file did not present IFINFO block'.format(
gsat_header.version))
if self._geo_info is None:
valid = False
logger.error(
'GFF version {} file did not present GEOINFO block'.format(
gsat_header.version))
if not valid:
raise ValueError('GFF file determined to be invalid')
def get_arp_vel(self):
"""
Gets the aperture velocity in ECF coordinates
Returns
-------
numpy.ndarray
"""
# TODO: this is not correct
# get the aperture velocity in its native frame of reference (rotated ENU)
arp_vel_orig = numpy.array(self.ap_info.apcVel, dtype='float64')
# TODO: arp_vel_orig is in what coordinate system? Rick said "rotated ENU", wrt gta?
# gets the angle wrt to True North for the radar frame of reference
angle = numpy.deg2rad(self.ap_info.rfoa)
cosine, sine = numpy.cos(angle), numpy.sin(angle)
# construct the NED velocity vector
transform = numpy.array([[cosine, -sine, 0], [sine, cosine, 0], [0, 0, -1]], dtype='float64')
ned_velocity = transform.dot(arp_vel_orig)
# convert to ECF
orp = geodetic_to_ecf(self.ap_info.apcLLH, ordering='latlon')
out = ned_to_ecf(ned_velocity, orp, absolute_coords=False)
return out
####################
# object for creation of sicd structure from GFF header object
def _get_wgt(str_in):
# type: (str) -> Union[None, WgtTypeType]
if str_in == '':
return None
elements = str_in.split()
win_name = elements[0].upper()
parameters = None
if win_name == 'TAYLOR':
if len(elements) < 2:
raise ValueError('Got unparseable window definition `{}`'.format(str_in))
params = elements[1].split(',')
if len(params) != 2:
raise ValueError('Got unparseable window definition `{}`'.format(str_in))
parameters = {'SLL': params[0].strip(), 'NBAR': params[1].strip()}
return WgtTypeType(
WindowName=win_name,
Parameters=parameters)
def _get_polarization_string(int_value):
# type: (int) -> Union[None, str]
if int_value == 0:
return 'H'
elif int_value == 1:
return 'V'
elif int_value == 2:
return 'LHC'
elif int_value == 3:
return 'RHC'
elif int_value in [4, 5]:
# TODO: according to their enum, we have 4 -> "T" and 5 -> "P"
# what does that mean?
return 'OTHER'
else:
return 'UNKNOWN'
def _get_tx_rcv_polarization(tx_pol_int, rcv_pol_int):
# type: (int, int) -> (str, str)
tx_pol = _get_polarization_string(tx_pol_int)
rcv_pol = _get_polarization_string(rcv_pol_int)
if tx_pol in ['OTHER', 'UNKNOWN'] or rcv_pol in ['OTHER', 'UNKNOWN']:
tx_rcv_pol = 'OTHER'
else:
tx_rcv_pol = '{}:{}'.format(tx_pol, rcv_pol)
return tx_pol, tx_rcv_pol
class _GFFInterpreter(object):
"""
Extractor for the sicd details
"""
def get_sicd(self):
"""
Gets the SICD structure.
Returns
-------
SICDType
"""
raise NotImplementedError
def get_chipper(self):
"""
Gets the chipper for reading the data.
Returns
-------
BIPChipper
"""
raise NotImplementedError
class _GFFInterpreter1(_GFFInterpreter):
"""
Extractor of SICD structure and parameters from gff_header_1*
object
"""
def __init__(self, header):
"""
Parameters
----------
header : _GFFHeader_1_6|_GFFHeader_1_8
"""
self.header = header
if self.header.image_type == 0:
raise ValueError(
'ImageType indicates a magnitude only image, which is incompatible with SICD')
def get_sicd(self):
def get_collection_info():
# type: () -> CollectionInfoType
core_name = self.header.image_name.replace(':', '_')
return CollectionInfoType(
CoreName=core_name,
CollectType='MONOSTATIC',
RadarMode=RadarModeType(
ModeType='SPOTLIGHT'),
Classification='UNCLASSIFIED')
def get_image_creation():
# type: () -> ImageCreationType
from sarpy.__about__ import __version__
from datetime import datetime
return ImageCreationType(
Application=self.header.creator,
DateTime=numpy.datetime64(datetime(*self.header.date_time)),
Site='Unknown',
Profile='sarpy {}'.format(__version__))
def get_image_data():
# type: () -> ImageDataType
return ImageDataType(
PixelType='RE32F_IM32F',
NumRows=num_rows,
NumCols=num_cols,
FullImage=(num_rows, num_cols),
FirstRow=0,
FirstCol=0,
SCPPixel=(scp_row, scp_col))
def get_geo_data():
# type: () -> GeoDataType
return GeoDataType(
SCP=SCPType(
LLH=[self.header.srp_lat, self.header.srp_lon, self.header.srp_alt]))
def get_grid():
# type: () -> GridType
image_plane = 'GROUND' if self.header.image_plane == 0 else 'SLANT'
# we presume that image_plane in [0, 1]
row_ss = self.header.range_pixel_size
col_ss = self.header.azimuth_pixel_size
row_bw = 1./row_ss
col_bw = 1./col_ss
if self.header.version == '1.8':
if self.header.range_win_fac_bw > 0:
row_bw = self.header.range_win_fac_bw/row_ss
if self.header.az_win_fac_bw > 0:
col_bw = self.header.az_win_fac_bw/col_ss
row = DirParamType(
Sgn=-1,
SS=row_ss,
ImpRespWid=self.header.range_resolution,
ImpRespBW=row_bw,
DeltaK1=0.5*row_bw,
DeltaK2=-0.5*row_bw,
WgtType=_get_wgt(
self.header.range_win_id if self.header.version == '1.8' else ''),
DeltaKCOAPoly=[[0, ], ] # TODO: revisit this?
)
col = DirParamType(
Sgn=-1,
SS=col_ss,
ImpRespWid=self.header.az_resolution,
ImpRespBW=col_bw,
DeltaK1=0.5*col_bw,
DeltaK2=-0.5*col_bw,
WgtType=_get_wgt(
self.header.az_win_id if self.header.version == '1.8' else ''),
DeltaKCOAPoly=[[0, ], ] # TODO: revisit this?
)
return GridType(
ImagePlane=image_plane,
Type='PLANE',
Row=row,
Col=col)
def get_scpcoa():
# type: () -> Union[None, SCPCOAType]
side_of_track = 'L' if self.header.squint < 0 else 'R'
apc_llh = numpy.array(
[self.header.apc_lat, self.header.apc_lon, self.header.apc_alt],
dtype='float64')
if numpy.all(apc_llh == 0):
arp_pos = None
else:
arp_pos = geodetic_to_ecf(apc_llh, ordering='latlon')
return SCPCOAType(
ARPPos=arp_pos,
GrazeAng=self.header.graze_angle,
SideOfTrack=side_of_track)
num_rows = self.header.range_count
num_cols = self.header.azimuth_count
scp_row = int(0.5*num_rows)
scp_col = int(0.5*num_cols)
collection_info = get_collection_info()
image_creation = get_image_creation()
image_data = get_image_data()
geo_data = get_geo_data()
grid = get_grid()
scpcoa = get_scpcoa()
return SICDType(
CollectionInfo=collection_info,
ImageCreation=image_creation,
ImageData=image_data,
GeoData=geo_data,
Grid=grid,
SCPCOA=scpcoa)
def get_chipper(self):
if self.header.bits_per_phase not in [8, 16, 32]:
raise ValueError('Got unexpected bits per phase {}'.format(self.header.bits_per_phase))
if self.header.bits_per_magnitude not in [8, 16, 32]:
raise ValueError('Got unexpected bits per phase {}'.format(self.header.bits_per_magnitude))
# creating a custom phase/magnitude data type
phase_dtype = numpy.dtype('{}u{}'.format(self.header.estr, int(self.header.bits_per_phase/8)))
magnitude_dtype = numpy.dtype('{}u{}'.format(self.header.estr, int(self.header.bits_per_magnitude/8)))
raw_dtype = numpy.dtype([('phase', phase_dtype), ('magnitude', magnitude_dtype)])
raw_bands = 1
output_bands = 1
output_dtype = 'complex64'
data_size = (self.header.range_count, self.header.azimuth_count)
if self.header.row_major:
symmetry = (True, True, True)
else:
symmetry = (True, True, False)
data_offset = self.header.header_length
if self.header.image_type == 1:
# phase/magnitude which is integer
transform_data = phase_amp_int_to_complex()
return BIPChipper(
self.header.file_object, raw_dtype, data_size, raw_bands, output_bands, output_dtype,
symmetry=symmetry, transform_data=transform_data,
data_offset=data_offset, limit_to_raw_bands=None)
else:
raise ValueError('Got unsupported image type `{}`'.format(self.header.image_type))
def _get_numpy_dtype(data_type_int):
# type: (int) -> str
if data_type_int == 0:
return 'u1'
elif data_type_int == 1:
return 'u2'
elif data_type_int == 2:
return 'u4'
elif data_type_int == 3:
return 'u8'
elif data_type_int == 4:
return 'i1'
elif data_type_int == 5:
return 'i2'
elif data_type_int == 6:
return 'i4'
elif data_type_int == 7:
return 'i8'
elif data_type_int == 8:
return 'f4'
elif data_type_int == 9:
return 'f8'
else:
raise ValueError('Got unsupported data type code `{}`'.format(data_type_int))
class _GFFInterpreter2(_GFFInterpreter):
"""
Extractor of SICD structure and parameters from GFFHeader_2 object
"""
def __init__(self, header):
"""
Parameters
----------
header : _GFFHeader_2
"""
self.header = header
self._cached_files = []
if self.header.gsat_img.pixelFormat.numComponents != 2:
raise ValueError(
'The pixel format indicates that the number of components is `{}`, '
'which is not supported for a complex image'.format(
self.header.gsat_img.pixelFormat.numComponents))
def get_sicd(self):
def get_collection_info():
# type: () -> CollectionInfoType
core_name = self.header.ap_info.phName # TODO: double check this...
return CollectionInfoType(
CollectorName=self.header.ap_info.missionText,
CoreName=core_name,
CollectType='MONOSTATIC',
RadarMode=RadarModeType(
ModeType='SPOTLIGHT'),
Classification='UNCLASSIFIED')
def get_image_creation():
# type: () -> ImageCreationType
from sarpy.__about__ import __version__
application = '{} {}'.format(self.header.gsat_img.imageCreator, self.header.ap_info.swVerNum)
date_time = None # todo: really?
return ImageCreationType(
Application=application,
DateTime=date_time,
Site='Unknown',
Profile='sarpy {}'.format(__version__))
def get_image_data():
# type: () -> ImageDataType
pix_data_type = self.header.gsat_img.pixDataType
amp_table = None
if pix_data_type == 12:
pixel_type = 'AMP8I_PHS8I'
amp_table = numpy.arange(256, dtype='float64')
elif pix_data_type in [1, 3, 4, 6, 8, 9, 10, 11]:
pixel_type = 'RE32F_IM32F'
elif pix_data_type in [2, 7]:
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Unhandled pixTypeData value `{}`'.format(pix_data_type))
return ImageDataType(
PixelType=pixel_type,
AmpTable=amp_table,
NumRows=num_rows,
NumCols=num_cols,
FullImage=(num_rows, num_cols),
FirstRow=0,
FirstCol=0,
SCPPixel=(scp_row, scp_col))
def get_geo_data():
# type: () -> GeoDataType
return GeoDataType(SCP=SCPType(ECF=scp))
def get_grid():
# type: () -> GridType
image_plane = 'GROUND' if self.header.geo_info.imagePlane == 0 else 'SLANT'
# we presume that image_plane in [0, 1]
# derive row/col uvect
ground_uvec = wgs_84_norm(scp)
urng = scp - arp_pos # unit vector for row in the slant plane
urng /= numpy.linalg.norm(urng)
if image_plane == 'GROUND':
row_uvec = urng - numpy.dot(urng, ground_uvec)*ground_uvec
row_uvec /= numpy.linalg.norm(row_uvec)
else:
row_uvec = urng
col_uvec = arp_vel/numpy.linalg.norm(arp_vel)
if self.header.ap_info.squintAngle < 0:
col_uvec *= -1
# verify that my orientation makes some sense
dumb_check = ground_uvec.dot(numpy.cross(row_uvec, col_uvec))
if dumb_check <= 0:
raise ValueError(
'The range vector, velocity vector, and squint angle have '
'incompatible orientations')
parallel_component = numpy.dot(row_uvec, col_uvec)
if numpy.abs(parallel_component) > 1e-7:
col_uvec = col_uvec - parallel_component*row_uvec
col_uvec /= numpy.linalg.norm(col_uvec)
row_ss = self.header.geo_info.rangePixSpacing
row_bw = self.header.if_info.wndBwFactRng/self.header.if_info.rngResolution
row_delta_kcoa_constant = 0.5*(1 - (self.header.if_info.sampLocDCRow/int(0.5*num_rows)))/row_ss
row = DirParamType(
Sgn=-1,
SS=row_ss,
UVectECF=row_uvec,
ImpRespWid=self.header.if_info.rngResolution,
ImpRespBW=row_bw,
KCtr=2*center_frequency/speed_of_light,
DeltaK1=0.5*row_bw,
DeltaK2=-0.5*row_bw,
WgtType=_get_wgt(self.header.if_info.wndFncIdRng),
DeltaKCOAPoly=[[row_delta_kcoa_constant, ], ])
col_ss = self.header.geo_info.azPixSpacing
col_bw = self.header.if_info.wndBwFactAz/self.header.if_info.azResolution
col_delta_kcoa_constant = 0.5*(1 - (self.header.if_info.sampLocDCCol/int(0.5*num_cols)))/col_ss
col = DirParamType(
Sgn=-1,
SS=col_ss,
UVectECF=col_uvec,
ImpRespWid=self.header.if_info.azResolution,
ImpRespBW=col_bw,
KCtr=0,
DeltaK1=0.5*col_bw,
DeltaK2=-0.5*col_bw,
WgtType=_get_wgt(self.header.if_info.wndFncIdAz),
DeltaKCOAPoly=[[col_delta_kcoa_constant, ], ])
return GridType(
ImagePlane=image_plane,
Type=grid_type,
Row=row,
Col=col)
def get_scpcoa():
# type: () -> SCPCOAType
return SCPCOAType(
ARPPos=arp_pos,
ARPVel=arp_vel,
SCPTime=0.5*collect_duration)
def get_timeline():
# type: () -> TimelineType
try:
# only exists for APINFO version 3 and above
ipp_end = self.header.ap_info.numPhaseHistories
ipp = [IPPSetType(
TStart=0,
TEnd=collect_duration,
IPPStart=0,
IPPEnd=ipp_end,
IPPPoly=[0, ipp_end/collect_duration]), ]
except AttributeError:
ipp = None
return TimelineType(
CollectStart=start_time,
CollectDuration=collect_duration,
IPP=ipp)
def get_radar_collection():
# type: () -> RadarCollectionType
try:
sample_rate = self.header.ap_info.adSampleFreq
pulse_length = float(self.header.ap_info.fastTimeSamples)/sample_rate
waveform = [
WaveformParametersType(ADCSampleRate=sample_rate, TxPulseLength=pulse_length), ]
except AttributeError:
waveform = None
rcv_channels = [ChanParametersType(TxRcvPolarization=tx_rcv_pol, index=1), ]
return RadarCollectionType(
TxFrequency=(center_frequency-0.5*band_width, center_frequency+0.5*band_width),
Waveform=waveform,
TxPolarization=tx_pol,
RcvChannels=rcv_channels)
def get_image_formation():
# type: () -> ImageFormationType
return ImageFormationType(
RcvChanProc=RcvChanProcType(ChanIndices=[1, ]),
TxRcvPolarizationProc=tx_rcv_pol,
TxFrequencyProc=(
center_frequency-0.5*band_width,
center_frequency+0.5*band_width),
TStartProc=0,
TEndProc=collect_duration,
ImageFormAlgo=image_form_algo,
STBeamComp='NO',
ImageBeamComp='NO',
AzAutofocus='NO',
RgAutofocus='NO')
def repair_scpcoa():
# call after deriving the sicd fields
if out_sicd.SCPCOA.GrazeAng is None:
out_sicd.SCPCOA.GrazeAng = self.header.ap_info.grazingAngle
if out_sicd.SCPCOA.IncidenceAng is None:
out_sicd.SCPCOA.IncidenceAng = 90 - out_sicd.SCPCOA.GrazeAng
if out_sicd.SCPCOA.SideOfTrack is None:
out_sicd.SCPCOA.SideOfTrack = 'L' if self.header.ap_info.squintAngle < 0 else 'R'
def populate_radiometric():
# call after deriving the sicd fields
rcs_constant = self.header.if_info.imgCalParam**2
radiometric = RadiometricType(RCSSFPoly=[[rcs_constant, ]])
# noinspection PyProtectedMember
radiometric._derive_parameters(out_sicd.Grid, out_sicd.SCPCOA)
if radiometric.SigmaZeroSFPoly is not None:
noise_constant = self.header.if_info.sigmaN - 10*numpy.log10(radiometric.SigmaZeroSFPoly[0, 0])
radiometric.NoiseLevel = NoiseLevelType_(
NoiseLevelType='ABSOLUTE',
NoisePoly=[[noise_constant, ]])
out_sicd.Radiometric = radiometric
num_rows = self.header.gsat_img.rangePixels
num_cols = self.header.gsat_img.azPixels
scp_row = self.header.geo_info.pixLocImCtrRow
scp_col = self.header.geo_info.pixLocImCtrCol
collect_duration = self.header.ap_info.apertureTime
scp_time_utc_us = numpy.datetime64(datetime(*self.header.ap_info.ApTimeUTC), 'us').astype('int64')
start_time = (scp_time_utc_us - int(0.5*collect_duration*1e6)).astype('datetime64[us]')
tx_pol, tx_rcv_pol = _get_tx_rcv_polarization(
self.header.ap_info.txPolarization, self.header.ap_info.rxPolarization)
center_frequency = self.header.ap_info.ctrFreq
band_width = 0.0 # TODO: is this defined anywhere?
scp = geodetic_to_ecf(self.header.geo_info.patchCtrLLH)
arp_llh = self.header.ap_info.apcLLH
arp_pos = geodetic_to_ecf(arp_llh, ordering='latlon')
arp_vel = self.header.get_arp_vel()
if self.header.if_info.ifAlgo in ['PFA', 'OSAPF']:
# if self.header.if_info.ifAlgo == 'PFA':
image_form_algo = 'PFA'
grid_type = 'RGAZIM'
else:
image_form_algo = 'OTHER'
grid_type = 'PLANE'
collection_info = get_collection_info()
image_creation = get_image_creation()
image_data = get_image_data()
geo_data = get_geo_data()
scp = geo_data.SCP.ECF.get_array()
grid = get_grid()
scpcoa = get_scpcoa()
timeline = get_timeline()
radar_collection = get_radar_collection()
image_formation = get_image_formation()
out_sicd = SICDType(
CollectionInfo=collection_info,
ImageCreation=image_creation,
ImageData=image_data,
GeoData=geo_data,
Grid=grid,
SCPCOA=scpcoa,
Timeline=timeline,
RadarCollection=radar_collection,
ImageFormation=image_formation)
out_sicd.derive()
repair_scpcoa()
populate_radiometric()
out_sicd.populate_rniirs(override=False)
return out_sicd
def _get_size_and_symmetry(self):
# type: () -> ((int, int), (bool, bool, bool))
if self.header.gsat_img.pixOrder == 0:
# in range consecutive order, opposite from a SICD
data_size = (self.header.gsat_img.azPixels, self.header.gsat_img.rangePixels)
symmetry = (True, True, True)
elif self.header.gsat_img.pixOrder == 1:
# in azimuth consecutive order, like a SICD
data_size = (self.header.gsat_img.rangePixels, self.header.gsat_img.azPixels)
symmetry = (True, True, False)
else:
raise ValueError('Got unexpected pixel order `{}`'.format(self.header.gsat_img.pixOrder))
return data_size, symmetry
def _check_image_validity(self, band_order):
# type: (str) -> None
if self.header.gsat_img.pixelFormat.numComponents != 2:
raise ValueError(
'Got unexpected number of components `{}`'.format(
self.header.gsat_img.pixelFormat.numComponents))
image_compression_scheme = self.header.gsat_img.imageCompressionScheme
if image_compression_scheme in [1, 3]:
if band_order == 'sequential':
raise ValueError(
'GFF with sequential bands and jpeg or jpeg 2000 compression currently unsupported.')
if PIL is None:
raise ValueError(
'The GFF image is compressed using jpeg or jpeg 2000 compression, '
'and decompression requires the PIL library')
def _extract_zlib_image(self):
# type: () -> str
if self.header.gsat_img.imageCompressionScheme != 2:
raise ValueError('The image is not zlib compressed')
self.header.file_object.seek(self.header.image_offset, os.SEEK_SET)
data_bytes = zlib.decompress(self.header.file_object.read(self.header.image_header.size))
fi, path_name = mkstemp(suffix='.sarpy_cache', text=False)
os.close(fi)
self._cached_files.append(path_name)
logger.info('Created cached file {} for decompressed data'.format(path_name))
with open(path_name, 'wb') as the_file:
the_file.write(data_bytes)
logger.info('Filled cached file {}'.format(path_name))
return path_name
def _extract_pil_image(self, band_order, data_size):
# type: (str, (int, int)) -> str
if band_order == 'sequential':
raise ValueError(
'GFF with sequential bands and jpeg or jpeg 2000 compression currently unsupported.')
our_memmap = MemMap(self.header.file_object.name, self.header.image_header.size, self.header.image_offset)
img = PIL.Image.open(our_memmap) # this is a lazy operation
# dump the extracted image data out to a temp file
fi, path_name = mkstemp(suffix='.sarpy_cache', text=False)
os.close(fi)
self._cached_files.append(path_name)
logger.info('Created cached file {} for decompressed data'.format(path_name))
data = numpy.asarray(img) # create our numpy array from the PIL Image
if data.shape[:2] != data_size:
raise ValueError(
'Naively decompressed data of shape {}, but expected ({}, {}, {}).'.format(
data.shape, data_size[0], data_size[1], 2))
mem_map = numpy.memmap(path_name, dtype=data.dtype, mode='w+', offset=0, shape=data.shape)
mem_map[:] = data
# clean up this memmap and file overhead
del mem_map
logger.info('Filled cached file {}'.format(path_name))
return path_name
def _get_interleaved_chipper(self):
complex_domain = _get_complex_domain_code(self.header.gsat_img.pixelFormat.cmplxDomain)
dtype0 = _get_numpy_dtype(self.header.gsat_img.pixelFormat.comp0_dataType)
dtype1 = _get_numpy_dtype(self.header.gsat_img.pixelFormat.comp1_dataType)
raw_bands = 1
output_bands = 1
output_dtype = 'complex64'
data_size, symmetry = self._get_size_and_symmetry()
if complex_domain == 'IQ':
raw_dtype = numpy.dtype([('real', dtype0), ('imag', dtype1)])
transform_data = I_Q_to_complex()
elif complex_domain == 'QI':
raw_dtype = numpy.dtype([('imag', dtype0), ('real', dtype1)])
transform_data = I_Q_to_complex()
elif complex_domain == 'MP':
raw_dtype = numpy.dtype([('magnitude', dtype0), ('phase', dtype1)])
transform_data = phase_amp_int_to_complex()
elif complex_domain == 'PM':
raw_dtype = numpy.dtype([('phase', dtype0), ('magnitude', dtype1)])
transform_data = phase_amp_int_to_complex()
else:
raise ValueError('Got unexpected complex domain `{}`'.format(complex_domain))
image_compression_scheme = self.header.gsat_img.imageCompressionScheme
if image_compression_scheme == 0:
# no compression
the_file = self.header.file_object
data_offset = self.header.image_offset
elif image_compression_scheme in [1, 3]:
# jpeg or jpeg 2000 compression
the_file = self._extract_pil_image('interleaved', data_size)
data_offset = 0
elif image_compression_scheme == 2:
# zlib compression
the_file = self._extract_zlib_image()
data_offset = 0
else:
raise ValueError('Got unhandled image compression scheme code `{}`'.format(image_compression_scheme))
return BIPChipper(
the_file, raw_dtype, data_size, raw_bands, output_bands, output_dtype,
symmetry=symmetry, transform_data=transform_data,
data_offset=data_offset, limit_to_raw_bands=None)
def _get_sequential_chipper(self):
image_compression_scheme = self.header.gsat_img.imageCompressionScheme
complex_domain = _get_complex_domain_code(self.header.gsat_img.pixelFormat.cmplxDomain)
if self.header.gsat_img.pixelFormat.comp0_dataType != self.header.gsat_img.pixelFormat.comp1_dataType:
raise ValueError(
'GFF with sequential bands has the two components with different data types.\n\t'
'This is currently unsupported.')
raw_dtype = numpy.dtype(_get_numpy_dtype(self.header.gsat_img.pixelFormat.comp0_dataType))
raw_bands = 1
data_size, symmetry = self._get_size_and_symmetry()
band_size = data_size[0]*data_size[1]*raw_dtype.itemsize
if complex_domain in ['IQ', 'QI']:
transform_data = 'complex'
elif complex_domain in ['MP', 'PM']:
transform_data = phase_amp_seq_to_complex()
else:
raise ValueError('Got unexpected complex domain `{}`'.format(complex_domain))
if image_compression_scheme == 0:
# no compression
the_file = self.header.file_object
main_offset = self.header.image_offset
elif image_compression_scheme == 2:
the_file = self._extract_zlib_image()
main_offset = 0
else:
raise ValueError('Unhandled image compression scheme `{}`'.format(image_compression_scheme))
if complex_domain in ['IQ', 'MP']:
chippers = (
BIPChipper(
the_file, raw_dtype, data_size, raw_bands, raw_bands, raw_dtype,
symmetry=symmetry, transform_data=None,
data_offset=main_offset, limit_to_raw_bands=None),
BIPChipper(
the_file, raw_dtype, data_size, raw_bands, raw_bands, raw_dtype,
symmetry=symmetry, transform_data=None,
data_offset=main_offset+band_size, limit_to_raw_bands=None))
else:
# construct as IQ/MP order
chippers = (
BIPChipper(
the_file, raw_dtype, data_size, raw_bands, raw_bands, raw_dtype,
symmetry=symmetry, transform_data=None,
data_offset=main_offset+band_size, limit_to_raw_bands=None),
BIPChipper(
the_file, raw_dtype, data_size, raw_bands, raw_bands, raw_dtype,
symmetry=symmetry, transform_data=None,
data_offset=main_offset, limit_to_raw_bands=None))
return BSQChipper(chippers, raw_dtype, transform_data=transform_data)
def get_chipper(self):
band_order = _get_band_order(self.header.gsat_img.pixelFormat.cmplxDomain)
self._check_image_validity(band_order)
if band_order == 'interleaved':
return self._get_interleaved_chipper()
elif band_order == 'sequential':
return self._get_sequential_chipper()
else:
raise ValueError('Unhandled band order `{}`'.format(band_order))
def __del__(self):
"""
Clean up any cached files.
Returns
-------
None
"""
for fil in self._cached_files:
if os.path.exists(fil):
# noinspection PyBroadException
try:
os.remove(fil)
logger.info('Deleted cached file {}'.format(fil))
except Exception:
logger.error(
'Error in attempt to delete cached file {}.\n\t'
'Manually delete this file'.format(fil), exc_info=True)
####################
# formatting functions properly reading the data
def phase_amp_seq_to_complex():
"""
This constructs the function to convert from phase/magnitude format data,
assuming that data type is simple with two bands, to complex64 data.
Returns
-------
callable
"""
def converter(data):
if not isinstance(data, numpy.ndarray):
raise TypeError(
'Requires a numpy.ndarray, got {}'.format(type(data)))
if len(data.shape) != 3 and data.shape[2] != 2:
raise ValueError('Requires a three-dimensional numpy.ndarray (with band '
'in the last dimension), got shape {}'.format(data.shape))
if data.dtype.name not in ['uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError(
'Requires a numpy.ndarray of unsigned integer type.')
bit_depth = data.dtype.itemsize*8
out = numpy.zeros(data.shape[:2] + (1, ), dtype=numpy.complex64)
mag = data[:, :, 0]
theta = data[:, :, 1]*(2*numpy.pi/(1 << bit_depth))
out[:, :, 0].real = mag*numpy.cos(theta)
out[:, :, 0].imag = mag*numpy.sin(theta)
return out
return converter
def phase_amp_int_to_complex():
"""
This constructs the function to convert from phase/magnitude or magnitude/phase
format data, assuming that the data type is custom with a single band, to complex64 data.
Returns
-------
callable
"""
def converter(data):
if not isinstance(data, numpy.ndarray):
raise TypeError(
'Requires a numpy.ndarray, got {}'.format(type(data)))
if len(data.shape) != 3 and data.shape[2] != 1:
raise ValueError('Requires a three-dimensional numpy.ndarray (with band '
'in the last dimension), got shape {}'.format(data.shape))
if data.dtype['phase'].name not in ['uint8', 'uint16', 'uint32', 'uint64'] or \
data.dtype['magnitude'].name not in ['uint8', 'uint16', 'uint32', 'uint64']:
raise ValueError(
'Requires a numpy.ndarray of composite dtype with phase and magnitude '
'of unsigned integer type.')
bit_depth = data.dtype['phase'].itemsize*8
out = numpy.zeros(data.shape, dtype=numpy.complex64)
mag = data['magnitude']
theta = data['phase']*(2*numpy.pi/(1 << bit_depth))
out[:].real = mag*numpy.cos(theta)
out[:].imag = mag*numpy.sin(theta)
return out
return converter
def I_Q_to_complex():
"""
For simple consistency, this constructs the function to simply convert from
I/Q or Q/I format data of a given bit-depth to complex64 data.
Returns
-------
callable
"""
def converter(data):
if not isinstance(data, numpy.ndarray):
raise TypeError(
'Requires a numpy.ndarray, got {}'.format(type(data)))
if len(data.shape) != 3 and data.shape[2] != 1:
raise ValueError('Requires a three-dimensional numpy.ndarray (with band '
'in the last dimension), got shape {}'.format(data.shape))
out = numpy.zeros(data.shape, dtype='complex64')
out.real = data['real']
out.imag = data['imag']
return out
return converter
####################
# the actual reader implementation
class GFFDetails(object):
__slots__ = (
'_file_name', '_file_object', '_close_after',
'_endianness', '_major_version', '_minor_version',
'_header', '_interpreter')
def __init__(self, file_name):
"""
Parameters
----------
file_name : str
"""
self._endianness = None
self._major_version = None
self._minor_version = None
self._header = None
self._close_after = True
if not os.path.isfile(file_name):
raise SarpyIOError('Path {} is not a file'.format(file_name))
self._file_name = file_name
self._file_object = open(self._file_name, 'rb')
check = self._file_object.read(7)
if check != b'GSATIMG':
self._file_object.close()
self._close_after = False
raise SarpyIOError('file {} is not a GFF file'.format(self._file_name))
# initialize things
self._initialize()
@property
def file_name(self):
"""
str: the file name
"""
return self._file_name
@property
def endianness(self):
"""
str: The endian format of the GFF storage. Returns '<' if little-endian or '>' if big endian.
"""
return self._endianness
@property
def major_version(self):
"""
int: The major GFF version number
"""
return self._major_version
@property
def minor_version(self):
"""
int: The minor GFF version number
"""
return self._minor_version
@property
def version(self):
"""
str: The GFF version number
"""
return '{}.{}'.format(self._major_version, self._minor_version)
@property
def header(self):
"""
The GFF header object.
Returns
-------
_GFFHeader_1_6|_GFFHeader_1_8|_GFFHeader_2
"""
return self._header
@property
def interpreter(self):
"""
The GFF interpreter object.
Returns
-------
_GFFInterpreter
"""
return self._interpreter
def _initialize(self):
"""
Initialize the various elements
"""
self._file_object.seek(7, os.SEEK_SET)
check = self._file_object.read(1)
if check == b'\x20':
# this should be version 1.*, but we will verify
self._file_object.seek(54, os.SEEK_SET)
endianness = struct.unpack('H', self._file_object.read(2))[0] # 0 if little endian
estr = '<' if endianness == 0 else '>'
self._file_object.seek(8, os.SEEK_SET)
self._minor_version, self._major_version = struct.unpack('{}HH'.format(estr), self._file_object.read(4))
elif check == b'\x00':
# this should be a version 2.*, but we will verify
estr = '<'
self._file_object.seek(16, os.SEEK_SET)
self._major_version, self._minor_version = struct.unpack('{}HH'.format(estr), self._file_object.read(4))
else:
raise ValueError('Got unexpected check byte')
self._file_object.seek(0, os.SEEK_SET)
self._endianness = estr
version = self.version
if version == '1.6':
self._header = _GFFHeader_1_6(self._file_object, self.endianness)
self._interpreter = _GFFInterpreter1(self._header)
elif version == '1.8':
self._header = _GFFHeader_1_8(self._file_object, self.endianness)
self._interpreter = _GFFInterpreter1(self._header)
elif self.major_version == 2:
self._header = _GFFHeader_2(self._file_object, self.endianness)
self._interpreter = _GFFInterpreter2(self._header)
else:
raise ValueError('Got unhandled GFF version `{}`'.format(version))
def get_sicd(self):
"""
Gets the sicd structure.
Returns
-------
SICDType
"""
return self._interpreter.get_sicd()
def get_chipper(self):
"""
Gets the chipper for reading data.
Returns
-------
BIPChipper
"""
return self._interpreter.get_chipper()
def __del__(self):
if self._close_after:
self._close_after = False
# noinspection PyBroadException
try:
self._file_object.close()
except Exception:
pass
class GFFReader(BaseReader, SICDTypeReader):
"""
Gets a reader type object for GFF files
"""
__slots__ = ('_gff_details', )
def __init__(self, gff_details):
"""
Parameters
----------
gff_details : str|GFFDetails
file name or GFFDetails object
"""
if isinstance(gff_details, str):
gff_details = GFFDetails(gff_details)
if not isinstance(gff_details, GFFDetails):
raise TypeError('The input argument for a GFFReader must be a '
'filename or GFFDetails object')
self._gff_details = gff_details
sicd = gff_details.get_sicd()
chipper = gff_details.get_chipper()
SICDTypeReader.__init__(self, sicd)
BaseReader.__init__(self, chipper, reader_type="SICD")
self._check_sizes()
@property
def gff_details(self):
# type: () -> GFFDetails
"""
GFFDetails: The details object.
"""
return self._gff_details
@property
def file_name(self):
return self.gff_details.file_name
def __del__(self):
# noinspection PyBroadException
try:
del self._chipper # you have to explicitly delete and garbage collect the chipper to delete any temp file
gc.collect()
del self._gff_details
except Exception:
pass
| 36.459953 | 118 | 0.597993 |
4a2352991407d83d432cfaed248c401e43e49893 | 208 | py | Python | 1_beginner/chapter6/practice/too_long.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 4 | 2021-03-01T00:32:45.000Z | 2021-05-21T22:01:52.000Z | 1_beginner/chapter6/practice/too_long.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 29 | 2020-09-12T22:56:04.000Z | 2021-09-25T17:08:42.000Z | 1_beginner/chapter6/practice/too_long.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 7 | 2021-02-25T01:50:55.000Z | 2022-02-28T00:00:42.000Z | """
Too Long
Print and remove all elements with length
greater than 4 in a given list of strings.
"""
# list to help you test your code
the_list = ["dragon", "cab", "science", "dove", "lime", "river", "pop"]
| 26 | 71 | 0.673077 |
4a2352e749b000c1813e809ee35548b9ba1864cd | 203 | py | Python | zilencer/forms.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 4 | 2019-06-04T09:06:53.000Z | 2019-06-04T09:07:47.000Z | zilencer/forms.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 58 | 2018-11-27T15:18:54.000Z | 2018-12-09T13:43:07.000Z | zilencer/forms.py | fearless0307/zulip | 378d14af7ea73a9a83c7245706cd918bec5a37bf | [
"Apache-2.0"
] | 9 | 2019-11-04T18:59:29.000Z | 2022-03-22T17:46:37.000Z | from django import forms
class EnterpriseToSForm(forms.Form):
full_name = forms.CharField(max_length=100)
company = forms.CharField(max_length=100)
terms = forms.BooleanField(required=True)
| 29 | 47 | 0.768473 |
4a235394f2b4f0eb563dcd2fd14c6b4ff89123cd | 43,838 | py | Python | python/ray/autoscaler/_private/commands.py | luanagbmartins/ray | 6e3721a34eced8013ac4ff8ecd60105ea4056050 | [
"Apache-2.0"
] | null | null | null | python/ray/autoscaler/_private/commands.py | luanagbmartins/ray | 6e3721a34eced8013ac4ff8ecd60105ea4056050 | [
"Apache-2.0"
] | null | null | null | python/ray/autoscaler/_private/commands.py | luanagbmartins/ray | 6e3721a34eced8013ac4ff8ecd60105ea4056050 | [
"Apache-2.0"
] | 1 | 2021-10-11T22:53:49.000Z | 2021-10-11T22:53:49.000Z | import copy
import hashlib
import json
import logging
import os
import random
import sys
import subprocess
import tempfile
import time
from types import ModuleType
from typing import Any, Dict, List, Optional, Tuple, Union
import click
import redis
import yaml
try: # py3
from shlex import quote
except ImportError: # py2
from pipes import quote
from ray.experimental.internal_kv import _internal_kv_get
import ray._private.services as services
from ray.autoscaler.node_provider import NodeProvider
from ray.autoscaler._private.constants import \
AUTOSCALER_RESOURCE_REQUEST_CHANNEL
from ray.autoscaler._private.util import validate_config, hash_runtime_conf, \
hash_launch_conf, prepare_config, DEBUG_AUTOSCALING_ERROR, \
DEBUG_AUTOSCALING_STATUS
from ray.autoscaler._private.providers import _get_node_provider, \
_NODE_PROVIDERS, _PROVIDER_PRETTY_NAMES
from ray.autoscaler.tags import TAG_RAY_NODE_KIND, TAG_RAY_LAUNCH_CONFIG, \
TAG_RAY_NODE_NAME, NODE_KIND_WORKER, NODE_KIND_HEAD, TAG_RAY_USER_NODE_TYPE
from ray.autoscaler._private.cli_logger import cli_logger, cf
from ray.autoscaler._private.updater import NodeUpdaterThread
from ray.autoscaler._private.command_runner import set_using_login_shells, \
set_rsync_silent
from ray.autoscaler._private.log_timer import LogTimer
from ray.worker import global_worker # type: ignore
from ray.util.debug import log_once
import ray.autoscaler._private.subprocess_output_util as cmd_output_util
logger = logging.getLogger(__name__)
redis_client = None
RUN_ENV_TYPES = ["auto", "host", "docker"]
POLL_INTERVAL = 5
Port_forward = Union[Tuple[int, int], List[Tuple[int, int]]]
def _redis() -> redis.StrictRedis:
global redis_client
if redis_client is None:
redis_client = services.create_redis_client(
global_worker.node.redis_address,
password=global_worker.node.redis_password)
return redis_client
def try_logging_config(config: Dict[str, Any]) -> None:
if config["provider"]["type"] == "aws":
from ray.autoscaler._private.aws.config import log_to_cli
log_to_cli(config)
def try_get_log_state(provider_config: Dict[str, Any]) -> Optional[dict]:
if provider_config["type"] == "aws":
from ray.autoscaler._private.aws.config import get_log_state
return get_log_state()
return None
def try_reload_log_state(provider_config: Dict[str, Any],
log_state: dict) -> None:
if not log_state:
return
if provider_config["type"] == "aws":
from ray.autoscaler._private.aws.config import reload_log_state
return reload_log_state(log_state)
def debug_status() -> str:
"""Return a debug string for the autoscaler."""
status = _internal_kv_get(DEBUG_AUTOSCALING_STATUS)
error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if not status:
status = "No cluster status."
else:
status = status.decode("utf-8")
if error:
status += "\n"
status += error.decode("utf-8")
return status
def request_resources(num_cpus: Optional[int] = None,
bundles: Optional[List[dict]] = None) -> None:
"""Remotely request some CPU or GPU resources from the autoscaler.
This function is to be called e.g. on a node before submitting a bunch of
ray.remote calls to ensure that resources rapidly become available.
This function is EXPERIMENTAL.
Args:
num_cpus: int -- the number of CPU cores to request
bundles: List[dict] -- list of resource dicts (e.g., {"CPU": 1}). This
only has an effect if you've configured `available_node_types`
if your cluster config.
"""
r = _redis()
if num_cpus is not None and num_cpus > 0:
r.publish(AUTOSCALER_RESOURCE_REQUEST_CHANNEL,
json.dumps({
"CPU": num_cpus
}))
if bundles:
r.publish(AUTOSCALER_RESOURCE_REQUEST_CHANNEL, json.dumps(bundles))
def create_or_update_cluster(config_file: str,
override_min_workers: Optional[int],
override_max_workers: Optional[int],
no_restart: bool,
restart_only: bool,
yes: bool,
override_cluster_name: Optional[str] = None,
no_config_cache: bool = False,
redirect_command_output: Optional[bool] = False,
use_login_shells: bool = True) -> None:
"""Create or updates an autoscaling Ray cluster from a config json."""
set_using_login_shells(use_login_shells)
if not use_login_shells:
cmd_output_util.set_allow_interactive(False)
if redirect_command_output is None:
# Do not redirect by default.
cmd_output_util.set_output_redirected(False)
else:
cmd_output_util.set_output_redirected(redirect_command_output)
def handle_yaml_error(e):
cli_logger.error("Cluster config invalid")
cli_logger.newline()
cli_logger.error("Failed to load YAML file " + cf.bold("{}"),
config_file)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("PyYAML error:"):
cli_logger.error(e)
cli_logger.abort()
try:
config = yaml.safe_load(open(config_file).read())
except FileNotFoundError:
cli_logger.abort(
"Provided cluster configuration file ({}) does not exist",
cf.bold(config_file))
raise
except yaml.parser.ParserError as e:
handle_yaml_error(e)
raise
except yaml.scanner.ScannerError as e:
handle_yaml_error(e)
raise
# todo: validate file_mounts, ssh keys, etc.
importer = _NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
cli_logger.abort(
"Unknown provider type " + cf.bold("{}") + "\n"
"Available providers are: {}", config["provider"]["type"],
cli_logger.render_list([
k for k in _NODE_PROVIDERS.keys()
if _NODE_PROVIDERS[k] is not None
]))
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
printed_overrides = False
def handle_cli_override(key, override):
if override is not None:
if key in config:
nonlocal printed_overrides
printed_overrides = True
cli_logger.warning(
"`{}` override provided on the command line.\n"
" Using " + cf.bold("{}") + cf.dimmed(
" [configuration file has " + cf.bold("{}") + "]"),
key, override, config[key])
config[key] = override
handle_cli_override("min_workers", override_min_workers)
handle_cli_override("max_workers", override_max_workers)
handle_cli_override("cluster_name", override_cluster_name)
if printed_overrides:
cli_logger.newline()
cli_logger.labeled_value("Cluster", config["cluster_name"])
# disable the cli_logger here if needed
# because it only supports aws
if config["provider"]["type"] != "aws":
cli_logger.old_style = True
cli_logger.newline()
config = _bootstrap_config(config, no_config_cache=no_config_cache)
try_logging_config(config)
get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name)
CONFIG_CACHE_VERSION = 1
def _bootstrap_config(config: Dict[str, Any],
no_config_cache: bool = False) -> Dict[str, Any]:
config = prepare_config(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(tempfile.gettempdir(),
"ray-config-{}".format(hasher.hexdigest()))
if os.path.exists(cache_key) and not no_config_cache:
cli_logger.old_info(logger, "Using cached config at {}", cache_key)
config_cache = json.loads(open(cache_key).read())
if config_cache.get("_version", -1) == CONFIG_CACHE_VERSION:
# todo: is it fine to re-resolve? afaik it should be.
# we can have migrations otherwise or something
# but this seems overcomplicated given that resolving is
# relatively cheap
try_reload_log_state(config_cache["config"]["provider"],
config_cache.get("provider_log_info"))
if log_once("_printed_cached_config_warning"):
cli_logger.verbose_warning(
"Loaded cached provider configuration "
"from " + cf.bold("{}"), cache_key)
if cli_logger.verbosity == 0:
cli_logger.warning("Loaded cached provider configuration")
cli_logger.warning(
"If you experience issues with "
"the cloud provider, try re-running "
"the command with {}.", cf.bold("--no-config-cache"))
return config_cache["config"]
else:
cli_logger.warning(
"Found cached cluster config "
"but the version " + cf.bold("{}") + " "
"(expected " + cf.bold("{}") + ") does not match.\n"
"This is normal if cluster launcher was updated.\n"
"Config will be re-resolved.",
config_cache.get("_version", "none"), CONFIG_CACHE_VERSION)
validate_config(config)
importer = _NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
provider_cls = importer(config["provider"])
cli_logger.print("Checking {} environment settings",
_PROVIDER_PRETTY_NAMES.get(config["provider"]["type"]))
resolved_config = provider_cls.bootstrap_config(config)
if not no_config_cache:
with open(cache_key, "w") as f:
config_cache = {
"_version": CONFIG_CACHE_VERSION,
"provider_log_info": try_get_log_state(config["provider"]),
"config": resolved_config
}
f.write(json.dumps(config_cache))
return resolved_config
def teardown_cluster(config_file: str, yes: bool, workers_only: bool,
override_cluster_name: Optional[str],
keep_min_workers: bool) -> None:
"""Destroys all nodes of a Ray cluster described by a config json."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = prepare_config(config)
validate_config(config)
cli_logger.confirm(yes, "Destroying cluster.", _abort=True)
cli_logger.old_confirm("This will destroy your cluster", yes)
if not workers_only:
try:
exec_cluster(
config_file,
cmd="ray stop",
run_env="auto",
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=override_cluster_name,
port_forward=None,
with_output=False)
except Exception as e:
# todo: add better exception info
cli_logger.verbose_error("{}", str(e))
cli_logger.warning(
"Exception occurred when stopping the cluster Ray runtime "
"(use -v to dump teardown exceptions).")
cli_logger.warning(
"Ignoring the exception and "
"attempting to shut down the cluster nodes anyway.")
cli_logger.old_exception(
logger, "Ignoring error attempting a clean shutdown.")
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
def remaining_nodes():
workers = provider.non_terminated_nodes({
TAG_RAY_NODE_KIND: NODE_KIND_WORKER
})
if keep_min_workers:
min_workers = config.get("min_workers", 0)
cli_logger.print(
"{} random worker nodes will not be shut down. " +
cf.dimmed("(due to {})"), cf.bold(min_workers),
cf.bold("--keep-min-workers"))
cli_logger.old_info(logger,
"teardown_cluster: Keeping {} nodes...",
min_workers)
workers = random.sample(workers, len(workers) - min_workers)
# todo: it's weird to kill the head node but not all workers
if workers_only:
cli_logger.print(
"The head node will not be shut down. " +
cf.dimmed("(due to {})"), cf.bold("--workers-only"))
return workers
head = provider.non_terminated_nodes({
TAG_RAY_NODE_KIND: NODE_KIND_HEAD
})
return head + workers
def run_docker_stop(node, container_name):
try:
exec_cluster(
config_file,
cmd=f"docker stop {container_name}",
run_env="host",
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=override_cluster_name,
port_forward=None,
with_output=False)
except Exception:
cli_logger.warning(f"Docker stop failed on {node}")
cli_logger.old_warning(logger, f"Docker stop failed on {node}")
# Loop here to check that both the head and worker nodes are actually
# really gone
A = remaining_nodes()
container_name = config.get("docker", {}).get("container_name")
if container_name:
for node in A:
run_docker_stop(node, container_name)
with LogTimer("teardown_cluster: done."):
while A:
cli_logger.old_info(
logger, "teardown_cluster: "
"Shutting down {} nodes...", len(A))
provider.terminate_nodes(A)
cli_logger.print(
"Requested {} nodes to shut down.",
cf.bold(len(A)),
_tags=dict(interval="1s"))
time.sleep(
POLL_INTERVAL) # todo: interval should be a variable
A = remaining_nodes()
cli_logger.print("{} nodes remaining after {} second(s).",
cf.bold(len(A)), POLL_INTERVAL)
cli_logger.success("No nodes remaining.")
finally:
provider.cleanup()
def kill_node(config_file: str, yes: bool, hard: bool,
override_cluster_name: Optional[str]) -> str:
"""Kills a random Raylet worker."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
cli_logger.confirm(yes, "A random node will be killed.")
cli_logger.old_confirm("This will kill a node in your cluster", yes)
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({
TAG_RAY_NODE_KIND: NODE_KIND_WORKER
})
node = random.choice(nodes)
cli_logger.print("Shutdown " + cf.bold("{}"), node)
cli_logger.old_info(logger, "kill_node: Shutdown worker {}", node)
if hard:
provider.terminate_node(node)
else:
updater = NodeUpdaterThread(
node_id=node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
file_mounts_contents_hash="",
is_head_node=False,
docker_config=config.get("docker"))
_exec(updater, "ray stop", False, False)
time.sleep(POLL_INTERVAL)
if config.get("provider", {}).get("use_internal_ips", False) is True:
node_ip = provider.internal_ip(node)
else:
node_ip = provider.external_ip(node)
finally:
provider.cleanup()
return node_ip
def monitor_cluster(cluster_config_file: str, num_lines: int,
override_cluster_name: Optional[str]) -> None:
"""Tails the autoscaler logs of a Ray cluster."""
cmd = f"tail -n {num_lines} -f /tmp/ray/session_latest/logs/monitor*"
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="auto",
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=override_cluster_name,
port_forward=None)
def warn_about_bad_start_command(start_commands: List[str]) -> None:
ray_start_cmd = list(filter(lambda x: "ray start" in x, start_commands))
if len(ray_start_cmd) == 0:
cli_logger.warning(
"Ray runtime will not be started because `{}` is not in `{}`.",
cf.bold("ray start"), cf.bold("head_start_ray_commands"))
cli_logger.old_warning(
logger,
"Ray start is not included in the head_start_ray_commands section."
)
if not any("autoscaling-config" in x for x in ray_start_cmd):
cli_logger.warning(
"The head node will not launch any workers because "
"`{}` does not have `{}` set.\n"
"Potential fix: add `{}` to the `{}` command under `{}`.",
cf.bold("ray start"), cf.bold("--autoscaling-config"),
cf.bold("--autoscaling-config=~/ray_bootstrap_config.yaml"),
cf.bold("ray start"), cf.bold("head_start_ray_commands"))
cli_logger.old_warning(
logger, "Ray start on the head node does not have the flag"
"--autoscaling-config set. The head node will not launch"
"workers. Add --autoscaling-config=~/ray_bootstrap_config.yaml"
"to ray start in the head_start_ray_commands section.")
def get_or_create_head_node(config: Dict[str, Any],
config_file: str,
no_restart: bool,
restart_only: bool,
yes: bool,
override_cluster_name: Optional[str],
_provider: Optional[NodeProvider] = None,
_runner: ModuleType = subprocess) -> None:
"""Create the cluster head node, which in turn creates the workers."""
provider = (_provider or _get_node_provider(config["provider"],
config["cluster_name"]))
config = copy.deepcopy(config)
config_file = os.path.abspath(config_file)
try:
head_node_tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
cli_logger.confirm(
yes,
"No head node found. "
"Launching a new cluster.",
_abort=True)
cli_logger.old_confirm("This will create a new cluster", yes)
elif not no_restart:
cli_logger.old_confirm("This will restart cluster services", yes)
if head_node:
if restart_only:
cli_logger.confirm(
yes,
"Updating cluster configuration and "
"restarting the cluster Ray runtime. "
"Setup commands will not be run due to `{}`.\n",
cf.bold("--restart-only"),
_abort=True)
elif no_restart:
cli_logger.print(
"Cluster Ray runtime will not be restarted due "
"to `{}`.", cf.bold("--no-restart"))
cli_logger.confirm(
yes,
"Updating cluster configuration and "
"running setup commands.",
_abort=True)
else:
cli_logger.print(
"Updating cluster configuration and running full setup.")
cli_logger.confirm(
yes,
cf.bold("Cluster Ray runtime will be restarted."),
_abort=True)
cli_logger.newline()
# TODO(ekl) this logic is duplicated in node_launcher.py (keep in sync)
head_node_config = copy.deepcopy(config["head_node"])
if "head_node_type" in config:
head_node_tags[TAG_RAY_USER_NODE_TYPE] = config["head_node_type"]
head_node_config.update(config["available_node_types"][config[
"head_node_type"]]["node_config"])
launch_hash = hash_launch_conf(head_node_config, config["auth"])
if head_node is None or provider.node_tags(head_node).get(
TAG_RAY_LAUNCH_CONFIG) != launch_hash:
with cli_logger.group("Acquiring an up-to-date head node"):
if head_node is not None:
cli_logger.print(
"Currently running head node is out-of-date with "
"cluster configuration")
cli_logger.print(
"hash is {}, expected {}",
cf.bold(
provider.node_tags(head_node)
.get(TAG_RAY_LAUNCH_CONFIG)), cf.bold(launch_hash))
cli_logger.confirm(yes, "Relaunching it.", _abort=True)
cli_logger.old_confirm(
"Head node config out-of-date. It will be terminated",
yes)
cli_logger.old_info(
logger, "get_or_create_head_node: "
"Shutting down outdated head node {}", head_node)
provider.terminate_node(head_node)
cli_logger.print("Terminated head node {}", head_node)
cli_logger.old_info(
logger,
"get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"])
provider.create_node(head_node_config, head_node_tags, 1)
cli_logger.print("Launched a new head node")
start = time.time()
head_node = None
with cli_logger.group("Fetching the new head node"):
while True:
if time.time() - start > 50:
cli_logger.abort(
"Head node fetch timed out.") # todo: msg
raise RuntimeError("Failed to create head node.")
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) == 1:
head_node = nodes[0]
break
time.sleep(POLL_INTERVAL)
cli_logger.newline()
with cli_logger.group(
"Setting up head node",
_numbered=("<>", 1, 1),
# cf.bold(provider.node_tags(head_node)[TAG_RAY_NODE_NAME]),
_tags=dict()): # add id, ARN to tags?
# TODO(ekl) right now we always update the head node even if the
# hash matches.
# We could prompt the user for what they want to do here.
# No need to pass in cluster_sync_files because we use this
# hash to set up the head node
(runtime_hash, file_mounts_contents_hash) = hash_runtime_conf(
config["file_mounts"], None, config)
cli_logger.old_info(
logger,
"get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head
# node can update the workers
remote_config = copy.deepcopy(config)
# drop proxy options if they exist, otherwise
# head node won't be able to connect to workers
remote_config["auth"].pop("ssh_proxy_command", None)
if "ssh_private_key" in config["auth"]:
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
remote_config = provider.prepare_for_head_node(remote_config)
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile(
"w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update({
"~/ray_bootstrap_config.yaml": remote_config_file.name
})
if "ssh_private_key" in config["auth"]:
config["file_mounts"].update({
remote_key_path: config["auth"]["ssh_private_key"],
})
cli_logger.print("Prepared bootstrap config")
if restart_only:
setup_commands = []
ray_start_commands = config["head_start_ray_commands"]
elif no_restart:
setup_commands = config["head_setup_commands"]
ray_start_commands = []
else:
setup_commands = config["head_setup_commands"]
ray_start_commands = config["head_start_ray_commands"]
if not no_restart:
warn_about_bad_start_command(ray_start_commands)
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=setup_commands,
ray_start_commands=ray_start_commands,
process_runner=_runner,
runtime_hash=runtime_hash,
file_mounts_contents_hash=file_mounts_contents_hash,
is_head_node=True,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips",
False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
# todo: this does not follow the mockup and is not good enough
cli_logger.abort("Failed to setup head node.")
cli_logger.old_error(
logger, "get_or_create_head_node: "
"Updating {} failed", head_node_ip)
sys.exit(1)
cli_logger.old_info(
logger, "get_or_create_head_node: "
"Head node up-to-date, IP address is: {}", head_node_ip)
monitor_str = "tail -n 100 -f /tmp/ray/session_latest/logs/monitor*"
if override_cluster_name:
modifiers = " --cluster-name={}".format(
quote(override_cluster_name))
else:
modifiers = ""
if cli_logger.old_style:
print("To monitor autoscaling activity, you can run:\n\n"
" ray exec {} {}{}\n".format(config_file,
quote(monitor_str), modifiers))
print("To open a console on the cluster:\n\n"
" ray attach {}{}\n".format(config_file, modifiers))
print("To get a remote shell to the cluster manually, run:\n\n"
" {}\n".format(
updater.cmd_runner.remote_shell_command_str()))
cli_logger.newline()
with cli_logger.group("Useful commands"):
cli_logger.print("Monitor autoscaling with")
cli_logger.print(
cf.bold(" ray exec {}{} {}"), config_file, modifiers,
quote(monitor_str))
cli_logger.print("Connect to a terminal on the cluster head:")
cli_logger.print(
cf.bold(" ray attach {}{}"), config_file, modifiers)
remote_shell_str = updater.cmd_runner.remote_shell_command_str()
cli_logger.print("Get a remote shell to the cluster manually:")
cli_logger.print(" {}", remote_shell_str.strip())
finally:
provider.cleanup()
def attach_cluster(config_file: str,
start: bool,
use_screen: bool,
use_tmux: bool,
override_cluster_name: Optional[str],
no_config_cache: bool = False,
new: bool = False,
port_forward: Optional[Port_forward] = None) -> None:
"""Attaches to a screen for the specified cluster.
Arguments:
config_file: path to the cluster yaml
start: whether to start the cluster if it isn't up
use_screen: whether to use screen as multiplexer
use_tmux: whether to use tmux as multiplexer
override_cluster_name: set the name of the cluster
new: whether to force a new screen
port_forward ( (int,int) or list[(int,int)] ): port(s) to forward
"""
if use_tmux:
if new:
cmd = "tmux new"
else:
cmd = "tmux attach || tmux new"
elif use_screen:
if new:
cmd = "screen -L"
else:
cmd = "screen -L -xRR"
else:
if new:
raise ValueError(
"--new only makes sense if passing --screen or --tmux")
cmd = "$SHELL"
exec_cluster(
config_file,
cmd=cmd,
run_env="auto",
screen=False,
tmux=False,
stop=False,
start=start,
override_cluster_name=override_cluster_name,
no_config_cache=no_config_cache,
port_forward=port_forward,
)
def exec_cluster(config_file: str,
*,
cmd: str = None,
run_env: str = "auto",
screen: bool = False,
tmux: bool = False,
stop: bool = False,
start: bool = False,
override_cluster_name: Optional[str] = None,
no_config_cache: bool = False,
port_forward: Optional[Port_forward] = None,
with_output: bool = False) -> str:
"""Runs a command on the specified cluster.
Arguments:
config_file: path to the cluster yaml
cmd: command to run
run_env: whether to run the command on the host or in a container.
Select between "auto", "host" and "docker"
screen: whether to run in a screen
tmux: whether to run in a tmux session
stop: whether to stop the cluster after command run
start: whether to start the cluster if it isn't up
override_cluster_name: set the name of the cluster
port_forward ( (int, int) or list[(int, int)] ): port(s) to forward
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert run_env in RUN_ENV_TYPES, "--run_env must be in {}".format(
RUN_ENV_TYPES)
# TODO(rliaw): We default this to True to maintain backwards-compat.
# In the future we would want to support disabling login-shells
# and interactivity.
cmd_output_util.set_allow_interactive(True)
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config, no_config_cache=no_config_cache)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=start)
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
file_mounts_contents_hash="",
is_head_node=True,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
shutdown_after_run = False
if cmd and stop:
cmd += "; ".join([
"ray stop",
"ray teardown ~/ray_bootstrap_config.yaml --yes --workers-only"
])
shutdown_after_run = True
result = _exec(
updater,
cmd,
screen,
tmux,
port_forward=port_forward,
with_output=with_output,
run_env=run_env,
shutdown_after_run=shutdown_after_run)
if tmux or screen:
attach_command_parts = ["ray attach", config_file]
if override_cluster_name is not None:
attach_command_parts.append(
"--cluster-name={}".format(override_cluster_name))
if tmux:
attach_command_parts.append("--tmux")
elif screen:
attach_command_parts.append("--screen")
attach_command = " ".join(attach_command_parts)
cli_logger.print("Run `{}` to check command status.",
cf.bold(attach_command))
attach_info = "Use `{}` to check on command status.".format(
attach_command)
cli_logger.old_info(logger, attach_info)
return result
finally:
provider.cleanup()
def _exec(updater: NodeUpdaterThread,
cmd: Optional[str] = None,
screen: bool = False,
tmux: bool = False,
port_forward: Optional[Port_forward] = None,
with_output: bool = False,
run_env: str = "auto",
shutdown_after_run: bool = False) -> str:
if cmd:
if screen:
wrapped_cmd = [
"screen", "-L", "-dm", "bash", "-c",
quote(cmd + "; exec bash")
]
cmd = " ".join(wrapped_cmd)
elif tmux:
# TODO: Consider providing named session functionality
wrapped_cmd = [
"tmux", "new", "-d", "bash", "-c",
quote(cmd + "; exec bash")
]
cmd = " ".join(wrapped_cmd)
return updater.cmd_runner.run(
cmd,
exit_on_fail=True,
port_forward=port_forward,
with_output=with_output,
run_env=run_env,
shutdown_after_run=shutdown_after_run)
def rsync(config_file: str,
source: Optional[str],
target: Optional[str],
override_cluster_name: Optional[str],
down: bool,
ip_address: Optional[str] = None,
use_internal_ip: bool = False,
no_config_cache: bool = False,
all_nodes: bool = False,
_runner: ModuleType = subprocess) -> None:
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
ip_address (str): Address of node. Raise Exception
if both ip_address and 'all_nodes' are provided.
use_internal_ip (bool): Whether the provided ip_address is
public or private.
all_nodes: whether to sync worker nodes in addition to the head node
"""
if bool(source) != bool(target):
cli_logger.abort(
"Expected either both a source and a target, or neither.")
assert bool(source) == bool(target), (
"Must either provide both or neither source and target.")
if ip_address and all_nodes:
cli_logger.abort("Cannot provide both ip_address and 'all_nodes'.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config, no_config_cache=no_config_cache)
is_file_mount = False
if source and target:
for remote_mount in config.get("file_mounts", {}).keys():
if (source if down else target).startswith(remote_mount):
is_file_mount = True
break
provider = _get_node_provider(config["provider"], config["cluster_name"])
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
try:
nodes = []
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
if ip_address:
nodes = [
provider.get_node_id(
ip_address, use_internal_ip=use_internal_ip)
]
else:
if all_nodes:
nodes = _get_worker_nodes(config, override_cluster_name)
nodes += [head_node]
for node_id in nodes:
rsync_to_node(node_id, is_head_node=(node_id == head_node))
finally:
provider.cleanup()
def get_head_node_ip(config_file: str,
override_cluster_name: Optional[str] = None) -> str:
"""Returns head node IP for given configuration file if exists."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
head_node = _get_head_node(config, config_file, override_cluster_name)
if config.get("provider", {}).get("use_internal_ips", False):
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
finally:
provider.cleanup()
return head_node_ip
def get_worker_node_ips(config_file: str,
override_cluster_name: Optional[str] = None
) -> List[str]:
"""Returns worker node IPs for given configuration file."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({
TAG_RAY_NODE_KIND: NODE_KIND_WORKER
})
if config.get("provider", {}).get("use_internal_ips", False) is True:
return [provider.internal_ip(node) for node in nodes]
else:
return [provider.external_ip(node) for node in nodes]
finally:
provider.cleanup()
def _get_worker_nodes(config: Dict[str, Any],
override_cluster_name: Optional[str]) -> List[str]:
"""Returns worker node ids for given configuration."""
# todo: technically could be reused in get_worker_node_ips
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
return provider.non_terminated_nodes({
TAG_RAY_NODE_KIND: NODE_KIND_WORKER
})
finally:
provider.cleanup()
def _get_head_node(config: Dict[str, Any],
config_file: str,
override_cluster_name: Optional[str],
create_if_needed: bool = False) -> str:
provider = _get_node_provider(config["provider"], config["cluster_name"])
try:
head_node_tags = {
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
finally:
provider.cleanup()
if len(nodes) > 0:
head_node = nodes[0]
return head_node
elif create_if_needed:
get_or_create_head_node(
config,
config_file,
restart_only=False,
no_restart=False,
yes=True,
override_cluster_name=override_cluster_name)
return _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
else:
raise RuntimeError("Head node of cluster ({}) not found!".format(
config["cluster_name"]))
def confirm(msg: str, yes: bool) -> Optional[bool]:
return None if yes else click.confirm(msg, abort=True)
| 38.286463 | 79 | 0.579748 |
4a23545b08cfaa954390eb0ed19d51c28ee9f033 | 193,592 | py | Python | pysnmp-with-texts/OPENWAVE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/OPENWAVE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/OPENWAVE-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module OPENWAVE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/OPENWAVE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:35:08 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Counter32, ModuleIdentity, Unsigned32, IpAddress, iso, NotificationType, ObjectIdentity, Bits, MibIdentifier, Gauge32, NotificationType, Integer32, enterprises, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Counter32", "ModuleIdentity", "Unsigned32", "IpAddress", "iso", "NotificationType", "ObjectIdentity", "Bits", "MibIdentifier", "Gauge32", "NotificationType", "Integer32", "enterprises", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
class DisplayString(OctetString):
pass
openwave = MibIdentifier((1, 3, 6, 1, 4, 1, 1900))
systems = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4))
upiInit = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 1))
upiInitDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1), )
if mibBuilder.loadTexts: upiInitDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: upiInitDescriptionTable.setDescription('the table holding UPInit identifying information')
upiInitDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "upidInitIpAddr"), (0, "OPENWAVE-MIB", "upidInitProcessId"))
if mibBuilder.loadTexts: upiInitDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upiInitDescriptionEntry.setDescription('the entry associated with each UPInit process')
upidInitIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upidInitIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: upidInitIpAddr.setDescription('the ip address of the host running UPInit')
upidInitProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upidInitProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: upidInitProcessId.setDescription('the process id of UPInit')
upidInitVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upidInitVersion.setStatus('mandatory')
if mibBuilder.loadTexts: upidInitVersion.setDescription('the version of the UP system')
upidInitProcessType = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upidInitProcessType.setStatus('mandatory')
if mibBuilder.loadTexts: upidInitProcessType.setDescription('one of these : UPLInit, UPAdmInit, UPAppsInit')
upidInitHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upidInitHostName.setStatus('mandatory')
if mibBuilder.loadTexts: upidInitHostName.setDescription('the host name where the UPInit process runs')
upidInitStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upidInitStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: upidInitStartupTime.setDescription('the UPInit start up time')
upiInitStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2))
upiInitChildProcessTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1), )
if mibBuilder.loadTexts: upiInitChildProcessTable.setStatus('mandatory')
if mibBuilder.loadTexts: upiInitChildProcessTable.setDescription('the table holding child process identifying information')
upiInitChildProcessEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "upipInitIpAddr"), (0, "OPENWAVE-MIB", "upipInitProcessId"), (0, "OPENWAVE-MIB", "upipChildProcessId"))
if mibBuilder.loadTexts: upiInitChildProcessEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upiInitChildProcessEntry.setDescription('the entry associated with each child process')
upipInitIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipInitIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: upipInitIpAddr.setDescription('the ip address of the host where UPInit runs')
upipInitProcessType = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipInitProcessType.setStatus('mandatory')
if mibBuilder.loadTexts: upipInitProcessType.setDescription('the UPInit process type: UPAdmInit, UPLInit or UPApsInit')
upipInitProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipInitProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: upipInitProcessId.setDescription('the UPInit process pid')
upipChildProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessId.setDescription('the child process pid')
upipChildProcessType = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessType.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessType.setDescription('the child process type, e.g. Dispatcher, Agent')
upipChildProcessIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 6), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessIpAddr.setDescription('the ip address of the host where the child process runs')
upipChildProcessHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessHostName.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessHostName.setDescription('the name of the host where the child process runs')
upipChildProcessExePath = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessExePath.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessExePath.setDescription('the execution path of the child process')
upipChildProcessExeArgs = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessExeArgs.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessExeArgs.setDescription('the arguments used when exeuting the child process')
upipChildProcessState = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessState.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessState.setDescription('the state of the child process')
upipChildProcessStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessStatus.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessStatus.setDescription('how the child process is started')
upipChildProcessStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessStartTime.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessStartTime.setDescription('the child process start time')
upipChildProcessStopTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 1, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipChildProcessStopTime.setStatus('mandatory')
if mibBuilder.loadTexts: upipChildProcessStopTime.setDescription('the child process stop time')
upiInitChildProcessStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2), )
if mibBuilder.loadTexts: upiInitChildProcessStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: upiInitChildProcessStatsTable.setDescription('the table holding child process statistics')
upiInitChildProcessStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "upipsInitIpAddr"), (0, "OPENWAVE-MIB", "upipsInitProcessId"), (0, "OPENWAVE-MIB", "upipsChildProcessType"))
if mibBuilder.loadTexts: upiInitChildProcessStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upiInitChildProcessStatsEntry.setDescription('the statistics associated with each child process type')
upipsInitIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsInitIpAddr.setStatus('mandatory')
if mibBuilder.loadTexts: upipsInitIpAddr.setDescription(' the ip address of the host where UPInit runs')
upipsInitProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsInitProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: upipsInitProcessId.setDescription(' the UPInit process pid')
upipsChildProcessType = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsChildProcessType.setStatus('mandatory')
if mibBuilder.loadTexts: upipsChildProcessType.setDescription('the child process name, e.g. Dispatcher,Agent')
upipsInitProcessType = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsInitProcessType.setStatus('mandatory')
if mibBuilder.loadTexts: upipsInitProcessType.setDescription(' the UPInit process name, e.g. UPLInit UPAdmInit')
upipsChildProcessesStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsChildProcessesStarted.setStatus('mandatory')
if mibBuilder.loadTexts: upipsChildProcessesStarted.setDescription('the number of times processes of this type started')
upipsChildProcessesDied = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsChildProcessesDied.setStatus('mandatory')
if mibBuilder.loadTexts: upipsChildProcessesDied.setDescription('the number of times processes of this type died')
upipsChildProcessesRunning = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 1, 2, 2, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upipsChildProcessesRunning.setStatus('mandatory')
if mibBuilder.loadTexts: upipsChildProcessesRunning.setDescription('the number of times processes of this type running ')
upiInitTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 1, 20))
upitTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 1, 20, 1), DisplayString())
if mibBuilder.loadTexts: upitTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: upitTrapInfo.setDescription('Text string which provides additional information about the trap.')
upitChildProcessHostName = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 1, 20, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upitChildProcessHostName.setStatus('mandatory')
if mibBuilder.loadTexts: upitChildProcessHostName.setDescription('the hostname of the host where child process runs')
upitChildProcessType = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 1, 20, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upitChildProcessType.setStatus('mandatory')
if mibBuilder.loadTexts: upitChildProcessType.setDescription('the child process type e.g. Dispatcher, Agent')
upitChildProcessId = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 1, 20, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upitChildProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: upitChildProcessId.setDescription('the child process id')
upiChildProcessStart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,300)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitChildProcessHostName"), ("OPENWAVE-MIB", "upitChildProcessType"), ("OPENWAVE-MIB", "upitChildProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiChildProcessStart.setDescription('An event that is generated when a child process is started/restarted.')
upiChildProcessShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,301)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitChildProcessHostName"), ("OPENWAVE-MIB", "upitChildProcessType"), ("OPENWAVE-MIB", "upitChildProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiChildProcessShutdown.setDescription('An event that is generated when a child process is shut down. upitTrapInfo gives the condition.')
upiInitFailToStart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,302)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiInitFailToStart.setDescription('An event that is generated when UPInit is failed to start itself.')
upiInitShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,303)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiInitShutdown.setDescription('An event that is generated when UPInit is about to shutdown itself.')
upiAllChildProcessesStop = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,304)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiAllChildProcessesStop.setDescription('An event that is generated when UPInit is about to stop all child processes.')
upiAllChildProcessesRestart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,305)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiAllChildProcessesRestart.setDescription('An event that is generated when UPInit is about to restart all child process.')
upiDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,306)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiDatabaseConnectionDown.setDescription('An event that is generated when UPInit detects that the database is down.')
upiDatabaseConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,307)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiDatabaseConnectionUp.setDescription('An event that is generated when UPInit detects that the database is back up.')
upiChildProcessFailToStart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,308)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitChildProcessType"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiChildProcessFailToStart.setDescription('An event that is generated when a child process fails to start.')
upiNoChildProcess = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,309)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiNoChildProcess.setDescription('An event that is generated when no child process is started by this upiInit instance.')
upiChildProcessesBelowMinimum = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 1) + (0,310)).setObjects(("OPENWAVE-MIB", "upidInitHostName"), ("OPENWAVE-MIB", "upidInitProcessType"), ("OPENWAVE-MIB", "upidInitProcessId"), ("OPENWAVE-MIB", "upitChildProcessType"), ("OPENWAVE-MIB", "upitTrapInfo"))
if mibBuilder.loadTexts: upiChildProcessesBelowMinimum.setDescription('An event that is generated when the number of child processes started by means of TCP-Connect mechanism as defined in the cfg_init_tab becomes less than 2. This really applies to number of agents in most cases')
upLink = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2))
upLinkProcesses = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1))
uplDispatcher = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1))
uplDispatcherDescription = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 1))
upldHostName = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 1, 1), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldHostName.setStatus('mandatory')
if mibBuilder.loadTexts: upldHostName.setDescription('Name of the host on which dispatcher resides.')
upldProcessId = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: upldProcessId.setDescription('Process ID for the dispatcher.')
upldPortNumber = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: upldPortNumber.setDescription('Port number the dispatcher uses to listen for incomming airlink requests.')
upldStartUpTime = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldStartUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: upldStartUpTime.setDescription('Time the dispatcher started.')
upldState = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("regular", 1), ("redirect", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldState.setStatus('mandatory')
if mibBuilder.loadTexts: upldState.setDescription('The state of the dispatcher: regular: routes airlink requests to UPLAgents; redirect: routes airlink requests to other UPLDispatchers permanently.')
uplDispatcherStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2))
upldRequestsReceived = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldRequestsReceived.setStatus('mandatory')
if mibBuilder.loadTexts: upldRequestsReceived.setDescription('Requests that have been received since the dispatcher started.')
upldRequestsDropped = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldRequestsDropped.setStatus('mandatory')
if mibBuilder.loadTexts: upldRequestsDropped.setDescription('Requests that have been dropped due to lack of resources since the dispatcher started.')
upldUplAgentsLoaded = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldUplAgentsLoaded.setStatus('mandatory')
if mibBuilder.loadTexts: upldUplAgentsLoaded.setDescription('Total number of agents currently loaded in the dispatcher.')
upldUplAgentsDisconnected = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldUplAgentsDisconnected.setStatus('mandatory')
if mibBuilder.loadTexts: upldUplAgentsDisconnected.setDescription('Total number of agents currently disconnected among all the agents loaded in the dispatcher.')
upldSubscribersLoaded = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldSubscribersLoaded.setStatus('mandatory')
if mibBuilder.loadTexts: upldSubscribersLoaded.setDescription('Total number of subscribers currently loaded in the dispatcher.')
upldKeyExchanges = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 2, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldKeyExchanges.setStatus('mandatory')
if mibBuilder.loadTexts: upldKeyExchanges.setDescription('Number of key exchanges since the dispatcher started.')
uplDispRadiusClientStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 3))
upldTotalMappingTableHits = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 3, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldTotalMappingTableHits.setStatus('mandatory')
if mibBuilder.loadTexts: upldTotalMappingTableHits.setDescription('The total number of CLID mapping table database hits made since the uplDispatcher started.')
upldSuccessfulMappingHits = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 3, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldSuccessfulMappingHits.setStatus('mandatory')
if mibBuilder.loadTexts: upldSuccessfulMappingHits.setDescription('The number of successful CLID mapping hits made on the database since the uplDispatcher started.')
upldFailedMappingHits = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 3, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: upldFailedMappingHits.setStatus('mandatory')
if mibBuilder.loadTexts: upldFailedMappingHits.setDescription('The number of failed CLID mapping hits made on the database since the uplDispatcher started.')
uplDispatcherTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 20))
upldTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 20, 1), DisplayString())
if mibBuilder.loadTexts: upldTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: upldTrapInfo.setDescription('Text string which provides additional information about the trap.')
upldUplAgentId = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 1, 20, 2), Integer32())
if mibBuilder.loadTexts: upldUplAgentId.setStatus('optional')
if mibBuilder.loadTexts: upldUplAgentId.setDescription('The UPLAgent identifier related to the trap.')
upldStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,100)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldStartup.setDescription('An event that is generated when the dispatcher is started.')
upldShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,101)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldShutdown.setDescription('An event that is generated when the dispatcher is shut down. upldTrapInfo gives the condition.')
upldInvalidConfig = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,102)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldInvalidConfig.setDescription('An event that is generated when the dispatcher detects an invalid configuration. upldTrapInfo gives the configuration name.')
upldUplAgentConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,103)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldUplAgentId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldUplAgentConnectionDown.setDescription('An event that is generated when the dispatcher detects that the connection to the UPLAgent is down. The UPLAgent is identified by upldAgentId. upldTrapInfo gives the host name where the UPLAgent resides.')
upldDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,104)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldDatabaseConnectionDown.setDescription('An event that is generated when the dispatcher detects that the database is down.')
upldOutOfResouce = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,105)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldOutOfResouce.setDescription('An event that is generated when the dispatcher detects an out-of-resource condition. upldTrapInfo gives the resource name')
upldUplAgentConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,106)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldUplAgentId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldUplAgentConnectionUp.setDescription('An event that is generated when the dispatcher detects that the connection to the UPLAgent has come up. The UPLAgent is identified by upldAgentId. upldTrapInfo gives the host name where the UPLAgent resides.')
upldDatabaseConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,107)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldDatabaseConnectionUp.setDescription('An event that is generated when the dispatcher detects that the database has come into service.')
upldUplRadiusConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,108)).setObjects(("OPENWAVE-MIB", "upldHostName"), ("OPENWAVE-MIB", "upldProcessId"), ("OPENWAVE-MIB", "upldTrapInfo"))
if mibBuilder.loadTexts: upldUplRadiusConnectionDown.setDescription('An event that is generated when the dispatcher detects that the connection to the UPLRadius is down. upldTrapInfo gives the host name where the UPLRadius resides.')
uplAgent = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2))
uplAgentDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 1), )
if mibBuilder.loadTexts: uplAgentDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentDescriptionTable.setDescription('The table holding uplAgent identifying information.')
uplAgentDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplaAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentDescriptionEntry.setDescription('The entry associated with each uplAgent.')
uplaAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplaAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHostName.setDescription('The host name of the uplAgent host.')
uplaProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplaProcessId.setDescription('The process id for the uplAgent.')
uplaStartUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaStartUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaStartUpTime.setDescription('The date and time the uplAgent started.')
uplAgentProxyStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2))
uplAgentWebAccessStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1), )
if mibBuilder.loadTexts: uplAgentWebAccessStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWebAccessStatsTable.setDescription('The table holding Web access statistics.')
uplAgentWebAccessStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplawsAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentWebAccessStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWebAccessStatsEntry.setDescription('The entry associated with each uplAgent.')
uplawsAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplawsAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplawsAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaHttpRequestsStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpRequestsStarted.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpRequestsStarted.setDescription('Number of http requests initiated since the uplAgent started.')
uplaHttpRequestsSucceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpRequestsSucceeded.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpRequestsSucceeded.setDescription('Number of http requests that succeeded among all http requests initiated since the uplAgent started.')
uplaHttpMeanResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpMeanResponseTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpMeanResponseTime.setDescription('Mean response time in milliseconds for http requests.')
uplaHttpDeviationOfResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpDeviationOfResponseTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpDeviationOfResponseTime.setDescription('Standard Deviation of uplaHttpMeanResponseTime.')
uplaHttpsRequestsStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpsRequestsStarted.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpsRequestsStarted.setDescription('Number of https requests initiated since the uplAgent started.')
uplaHttpsRequestsSucceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpsRequestsSucceeded.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpsRequestsSucceeded.setDescription('Number of https requests that succeeded among all https requests initiated since the uplAgent started.')
uplaHttpsMeanResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpsMeanResponseTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpsMeanResponseTime.setDescription('Mean response time in milliseconds for https requests.')
uplaHttpsDeviationOfResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaHttpsDeviationOfResponseTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaHttpsDeviationOfResponseTime.setDescription('Standard deviation of uplaHttpsMeanResponseTime.')
uplAgentErrorStatsSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2), )
if mibBuilder.loadTexts: uplAgentErrorStatsSummaryTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentErrorStatsSummaryTable.setDescription('The table holding error-summary statistics.')
uplAgentErrorStatsSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplaesAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentErrorStatsSummaryEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentErrorStatsSummaryEntry.setDescription('The entry associated with each uplAgent.')
uplaesAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaesAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplaesAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTotalErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTotalErrors.setDescription('Total number of errors that have occurred since the uplAgent started.')
uplaSilentErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaSilentErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaSilentErrors.setDescription('Number of error occurrences for the silent class of errors since the uplAgent started. The silent class of errors refers to errors caused by client requests, but for which the uplAgent does not send the errors back to the client.')
uplaDeviceErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaDeviceErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaDeviceErrors.setDescription('Number of error occurences for the device class of errors since the uplAgent started. The device class of errors refers to unregistered and unsupported devices.')
uplaKeyErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaKeyErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaKeyErrors.setDescription('Number of error occurrences for the key class of errors since the uplAgent started. The key class of errors refers to encryption key mismatch and unsupported encryption algorithms/configurations.')
uplaSessionErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaSessionErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaSessionErrors.setDescription('Number of error occurrences for the session class of errors since the uplAgent started. The session class of errors refers to invalid sessions.')
uplaTransactionErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTransactionErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTransactionErrors.setDescription('Number of error occurrences for the transaction class of errors since the uplAgent started. The transaction class of errors refers to all errors occurring while the transaction is conducted.')
uplaOtherErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaOtherErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaOtherErrors.setDescription('Number of error occurrences for all other errors since the uplAgent started. It is simply TotalErrors minus all the named class errors.')
uplAgentErrorStatsDetailTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3), )
if mibBuilder.loadTexts: uplAgentErrorStatsDetailTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentErrorStatsDetailTable.setDescription('The table holding detailed error statistics.')
uplAgentErrorStatsDetailEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplaedAgentIdentifier"), (0, "OPENWAVE-MIB", "uplaErrorCode"))
if mibBuilder.loadTexts: uplAgentErrorStatsDetailEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentErrorStatsDetailEntry.setDescription('The entry associated with each error code in an uplAgent.')
uplaedAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaedAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplaedAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaErrorCode.setStatus('mandatory')
if mibBuilder.loadTexts: uplaErrorCode.setDescription('The error code as defined in uplAgent.')
uplaErrorName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaErrorName.setStatus('mandatory')
if mibBuilder.loadTexts: uplaErrorName.setDescription('A text string describing the error name.')
uplaErrorSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaErrorSeverity.setStatus('optional')
if mibBuilder.loadTexts: uplaErrorSeverity.setDescription('Error severity. 1 is the highest.')
uplaErrorClass = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("silent", 1), ("device", 2), ("key", 3), ("session", 4), ("transaction", 5), ("other", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaErrorClass.setStatus('mandatory')
if mibBuilder.loadTexts: uplaErrorClass.setDescription('Error class identifier.')
uplaErrorCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 2, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaErrorCount.setStatus('mandatory')
if mibBuilder.loadTexts: uplaErrorCount.setDescription('Number of times the error has occurred since the uplAgent started.')
uplHdtpStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3))
uplAgentSessionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1), )
if mibBuilder.loadTexts: uplAgentSessionStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentSessionStatsTable.setDescription('The table holding uplAgent session statistics.')
uplAgentSessionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplassAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentSessionStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentSessionStatsEntry.setDescription('The entry associated with each uplAgent.')
uplassAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplassAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplassAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaActiveSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaActiveSessions.setStatus('mandatory')
if mibBuilder.loadTexts: uplaActiveSessions.setDescription('Number of active sessions currently in the uplAgent.')
uplaEncryptedSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaEncryptedSessions.setStatus('mandatory')
if mibBuilder.loadTexts: uplaEncryptedSessions.setDescription('Number of sessions that use encryption among all active sessions in the uplAgent.')
uplaProtoSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaProtoSessions.setStatus('mandatory')
if mibBuilder.loadTexts: uplaProtoSessions.setDescription('Number of proto sessions that do not complete the session-creation procedure currently in the uplAgent.')
uplaSessionsStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaSessionsStarted.setStatus('mandatory')
if mibBuilder.loadTexts: uplaSessionsStarted.setDescription('Number of sessions started since the uplAgent started.')
uplaSessionsSucceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaSessionsSucceeded.setStatus('mandatory')
if mibBuilder.loadTexts: uplaSessionsSucceeded.setDescription('Number of sessions that have completed the session-creation procedure among all sessions started since the uplAgent started.')
uplaKeyExchanges = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaKeyExchanges.setStatus('mandatory')
if mibBuilder.loadTexts: uplaKeyExchanges.setDescription('Number of key exchanges that have completed since the uplAgent started.')
uplAgentAirLinkStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2), )
if mibBuilder.loadTexts: uplAgentAirLinkStatsTable.setStatus('deprecated')
if mibBuilder.loadTexts: uplAgentAirLinkStatsTable.setDescription('The table holding uplAgent airlink traffic statistics.')
uplAgentAirLinkStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplaasAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentAirLinkStatsEntry.setStatus('deprecated')
if mibBuilder.loadTexts: uplAgentAirLinkStatsEntry.setDescription('The entry associated with each uplAgent.')
uplaasAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaasAgentIdentifier.setStatus('deprecated')
if mibBuilder.loadTexts: uplaasAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaRequestsReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaRequestsReceived.setStatus('deprecated')
if mibBuilder.loadTexts: uplaRequestsReceived.setDescription("Number of requests that have been received by uplAgent since it started. Ack, Cancel, and SessComplete PDU's are not counted.")
uplaRequestsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaRequestsDropped.setStatus('deprecated')
if mibBuilder.loadTexts: uplaRequestsDropped.setDescription("Number of requests that have been dropped since the UPLagent started. Dropped requests include duplicated, invalid, and out of sequence PDU's.")
uplaRequestsDuplicated = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaRequestsDuplicated.setStatus('deprecated')
if mibBuilder.loadTexts: uplaRequestsDuplicated.setDescription('Number of duplicated requests that have been received since the uplAgent started.')
uplaRequestsNotValid = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaRequestsNotValid.setStatus('deprecated')
if mibBuilder.loadTexts: uplaRequestsNotValid.setDescription('Number of invalid requests that have been received since the uplAgent started.')
uplaRepliesDelivered = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaRepliesDelivered.setStatus('deprecated')
if mibBuilder.loadTexts: uplaRepliesDelivered.setDescription('Number of replies that have been successfully delivered to the browser client since the uplAgent started.')
uplaRepliesTimedOut = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaRepliesTimedOut.setStatus('deprecated')
if mibBuilder.loadTexts: uplaRepliesTimedOut.setDescription('Number of replies not delivered due to timeout since the uplAgent started.')
uplAgentTransactionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3), )
if mibBuilder.loadTexts: uplAgentTransactionStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentTransactionStatsTable.setDescription('The table holding uplAgent transaction statistics.')
uplAgentTransactionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplatsAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentTransactionStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentTransactionStatsEntry.setDescription('The entry associated with each uplAgent.')
uplatsAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplatsAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplatsAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaTransactionsActive = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTransactionsActive.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTransactionsActive.setDescription('Number of active transactions currently in the uplAgent.')
uplaTransactionsStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTransactionsStarted.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTransactionsStarted.setDescription('Number of transactions that have started since the uplAgent started.')
uplaTransactionsSucceeded = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTransactionsSucceeded.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTransactionsSucceeded.setDescription('Number of transactions that have succeeded among all transactions started since the uplAgent started.')
uplaMeanTransactionLife = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaMeanTransactionLife.setStatus('mandatory')
if mibBuilder.loadTexts: uplaMeanTransactionLife.setDescription('Mean transaction lifetime measured in milliseconds.')
uplaDeviationOfTransactionLife = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaDeviationOfTransactionLife.setStatus('mandatory')
if mibBuilder.loadTexts: uplaDeviationOfTransactionLife.setDescription('Standard deviation of uplaMeanTransactionLife.')
uplaMeanResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaMeanResponseTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaMeanResponseTime.setDescription("Mean transaction response time in milliseconds,as measured from the browser's point of view.")
uplaDeviationOfResponseTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaDeviationOfResponseTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplaDeviationOfResponseTime.setDescription('Standard deviation of uplaMeanResponseTime.')
uplaMeanRetriesPerThousandTxn = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaMeanRetriesPerThousandTxn.setStatus('mandatory')
if mibBuilder.loadTexts: uplaMeanRetriesPerThousandTxn.setDescription('Number of retransmitted requests for every one thousand completed transactions.')
uplaDeviationOfRetriesPTTxn = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 3, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaDeviationOfRetriesPTTxn.setStatus('mandatory')
if mibBuilder.loadTexts: uplaDeviationOfRetriesPTTxn.setDescription('Standard deviation of uplaMeanRetriesPerThousandTxn.')
uplAgentLimitedResourceTable = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 3, 6), Integer32())
if mibBuilder.loadTexts: uplAgentLimitedResourceTable.setStatus('deprecated')
if mibBuilder.loadTexts: uplAgentLimitedResourceTable.setDescription('The table holding information about limited resource usage.')
uplaWapStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4))
uplAgentWapWSPSessionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 1), )
if mibBuilder.loadTexts: uplAgentWapWSPSessionStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapWSPSessionStatsTable.setDescription('The table holding uplAgent WAP session statistics.')
uplAgentWapSessionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplawssAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentWapSessionStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapSessionStatsEntry.setDescription('The entry associated with each uplAgent.')
uplawssAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplawssAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplawssAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaActiveWapSessions = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaActiveWapSessions.setStatus('mandatory')
if mibBuilder.loadTexts: uplaActiveWapSessions.setDescription('')
uplaWapSessionsStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapSessionsStarted.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapSessionsStarted.setDescription('')
uplAgentWapWTPTransactionStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 2), )
if mibBuilder.loadTexts: uplAgentWapWTPTransactionStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapWTPTransactionStatsTable.setDescription('The table holding uplAgent WAP Transaction layer statistics.')
uplAgentWapTransactionStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplawtsAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentWapTransactionStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapTransactionStatsEntry.setDescription('The entry associated with each uplAgent.')
uplawtsAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplawtsAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplawtsAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaWapInvokeTpdus = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 2, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapInvokeTpdus.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapInvokeTpdus.setDescription('')
uplaWapResultTpdus = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapResultTpdus.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapResultTpdus.setDescription('')
uplaWapAbortTransaction = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapAbortTransaction.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapAbortTransaction.setDescription('')
uplAgentWapErrorStatsSummaryTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3), )
if mibBuilder.loadTexts: uplAgentWapErrorStatsSummaryTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapErrorStatsSummaryTable.setDescription('The table holding WAP stack related error-summary statistics.')
uplAgentWapErrorStatsSummaryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplawesAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentWapErrorStatsSummaryEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapErrorStatsSummaryEntry.setDescription('The entry associated with each uplAgent.')
uplawesAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplawesAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplawesAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaTotalWapErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTotalWapErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTotalWapErrors.setDescription('Total number of WAP stack errors that have occurred since the uplAgent started.')
uplaOtherWapErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaOtherWapErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaOtherWapErrors.setDescription('Number of error occurrences for all other errors since the uplAgent started. It is simply TotalErrors minus all the named class errors.')
uplaSessionWapErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaSessionWapErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaSessionWapErrors.setDescription('Number of error occurrences for the WAP session class of errors since the uplAgent started. The session class of errors refers to invalid sessions.')
uplaTransactionWapErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTransactionWapErrors.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTransactionWapErrors.setDescription('Number of error occurrences for the WAP transaction class of errors since the uplAgent started. The transaction class of errors refers to all errors occurring while the transaction is conducted.')
uplAgentWapErrorStatsDetailTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4), )
if mibBuilder.loadTexts: uplAgentWapErrorStatsDetailTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapErrorStatsDetailTable.setDescription('The table holding detailed error statistics.')
uplAgentWapErrorStatsDetailEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplaweAgentIdentifier"), (0, "OPENWAVE-MIB", "uplaWapErrorCode"))
if mibBuilder.loadTexts: uplAgentWapErrorStatsDetailEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentWapErrorStatsDetailEntry.setDescription('The entry associated with each error code in an uplAgent.')
uplaweAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaweAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplaweAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaWapErrorCode = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapErrorCode.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapErrorCode.setDescription('The error code as defined in uplAgent.')
uplaWapErrorName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapErrorName.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapErrorName.setDescription('A text string describing the error name.')
uplaWapErrorSeverity = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapErrorSeverity.setStatus('optional')
if mibBuilder.loadTexts: uplaWapErrorSeverity.setDescription('Error severity. 1 is the highest.')
uplaWapErrorClass = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("session", 2), ("txn", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapErrorClass.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapErrorClass.setDescription('Error class identifier.')
uplaWapErrorCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 4, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaWapErrorCount.setStatus('mandatory')
if mibBuilder.loadTexts: uplaWapErrorCount.setDescription('Number of times the error has occurred since the uplAgent started.')
uplaStackServiceStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5))
uplAgentStackServiceTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1), )
if mibBuilder.loadTexts: uplAgentStackServiceTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceTable.setDescription('The table holding UP.Link Stack Service data for each Agent. The static information is obtained from the Oracle database which is configured via the Admin GUI. The dynamic information is kept up to date by the process.')
uplAgentStackServiceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplasstAgentIdentifier"), (0, "OPENWAVE-MIB", "uplAgentStackServiceIdentifier"))
if mibBuilder.loadTexts: uplAgentStackServiceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceEntry.setDescription('The entry associated with Stack Services for each Agent.')
uplasstAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplasstAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplasstAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplAgentStackServiceIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceIdentifier.setDescription('The unique identifier for each Stack Service.')
uplAgentStackServiceAppProtoName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceAppProtoName.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceAppProtoName.setDescription('AppProtName.')
uplAgentStackServiceName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceName.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceName.setDescription('AppProtName + Bearer + Tunnel Proto.')
uplAgentStackServiceLoaded = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notloaded", 1), ("loaded", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceLoaded.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceLoaded.setDescription('Has the Stack Service been loaded: (1) No (2) Yes.')
uplAgentStackServiceAdaptorThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceAdaptorThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceAdaptorThreads.setDescription('The number of Adaptor threads for this Stack Service.')
uplAgentStackServiceWDPPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceWDPPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceWDPPortNumber.setDescription('The WDP Port number for this Stack Service.')
uplAgentStackServiceTableSize = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceTableSize.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceTableSize.setDescription('The value of the StackSvcNumBuckets configuration parameter.')
uplAgentStackServiceMeanTableItems = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceMeanTableItems.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceMeanTableItems.setDescription('Mean number of total items in a stack table.')
uplAgentStackServiceMeanTableItemsDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceMeanTableItemsDeviation.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceMeanTableItemsDeviation.setDescription('Std. deviation from mean number of total items in a stack table.')
uplAgentStackServiceMeanBucketChainLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceMeanBucketChainLength.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceMeanBucketChainLength.setDescription('Mean length of bucket chain in a stack table.')
uplAgentStackServiceMeanBucketChainLengthDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceMeanBucketChainLengthDeviation.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceMeanBucketChainLengthDeviation.setDescription('Std. deviation from mean length of bucket chain in a stack table.')
uplAgentStackServiceTableMeanNumberItemsGarbageCollected = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceTableMeanNumberItemsGarbageCollected.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceTableMeanNumberItemsGarbageCollected.setDescription('Mean number of items removed per garbage collection run.')
uplAgentStackServiceTableMeanNumberItemsGarbageCollectedDeviatn = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceTableMeanNumberItemsGarbageCollectedDeviatn.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceTableMeanNumberItemsGarbageCollectedDeviatn.setDescription('Std. deviation from mean number of items removed per garbage collection run.')
uplAgentStackServiceMeanGarbageCollectTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceMeanGarbageCollectTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceMeanGarbageCollectTime.setDescription('Mean time, in milliseconds, to garbage collect for a stack service.')
uplAgentStackServiceMeanGarbageCollectTimeDeviation = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 5, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplAgentStackServiceMeanGarbageCollectTimeDeviation.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentStackServiceMeanGarbageCollectTimeDeviation.setDescription('Std. deviation from mean time to garbage collect for a stack service.')
uplaRadiusClientStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6))
uplAgentRadiusClientStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6, 1), )
if mibBuilder.loadTexts: uplAgentRadiusClientStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentRadiusClientStatsTable.setDescription('The table holding uplAgent Radius Client statistics.')
uplAgentRadiusClientStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplarcsAgentIdentifier"))
if mibBuilder.loadTexts: uplAgentRadiusClientStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplAgentRadiusClientStatsEntry.setDescription('The entry associated with each uplAgent.')
uplarcsAgentIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplarcsAgentIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uplarcsAgentIdentifier.setDescription('The unique identifier for each uplAgent.')
uplaTotalMappingTableHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaTotalMappingTableHits.setStatus('mandatory')
if mibBuilder.loadTexts: uplaTotalMappingTableHits.setDescription('The total number of CLID mapping table database hits made since the uplAgent started.')
uplaSuccessfulMappingHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaSuccessfulMappingHits.setStatus('mandatory')
if mibBuilder.loadTexts: uplaSuccessfulMappingHits.setDescription('The number of successful CLID mapping hits made on the database since the uplAgent started.')
uplaFailedMappingHits = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 6, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplaFailedMappingHits.setStatus('mandatory')
if mibBuilder.loadTexts: uplaFailedMappingHits.setDescription('The number of failed CLID mapping hits made on the database since the uplAgent started.')
uplAgentTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 20))
uplaTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 2, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplaTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplaTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplaStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,200)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaStartup.setDescription('An event that is generated when the uplAgent is started.')
uplaShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,201)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaShutdown.setDescription('An event that is generated when the uplAgent is shut down. uplaTrapInfo gives the reason for the shutdown.')
uplaDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,202)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaDatabaseConnectionDown.setDescription('An event that is generated when the uplAgent detects that the database connection is down.')
uplaFaxMgrConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,203)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaFaxMgrConnectionDown.setDescription('An event that is generated when the uplAgent detects that the fax manager is down.')
uplaMessengerConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,204)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaMessengerConnectionDown.setDescription('An event that is generated when the uplAgent detects that the messenger is down.')
uplaInvalidConfig = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,205)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaInvalidConfig.setDescription('An event that is generated when the uplAgent detects an invalid configuration. uplaTrapInfo gives the configuration name.')
uplaInternalFatalErrors = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,206)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaInternalFatalErrors.setDescription('An event that is generated when the uplAgent detects an internal fatal error. uplaTrapInfo gives error name.')
uplaOutOfResource = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,207)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaOutOfResource.setDescription('An event that is generated when the uplAgent detects reportable out-of-resource conditions. uplaTrapInfo gives the resource name.')
uplaDatabaseConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,208)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaDatabaseConnectionUp.setDescription('An event that is generated when the Agent detects that the database is up.')
uplaBillingInitError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,209)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaBillingInitError.setDescription('An event that is generated when the Agent client cannot initialise communication with the Billing Manager.')
uplaBillingLogError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,210)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaBillingLogError.setDescription('An event that is generated when the Agent client Billing event cannot be logged with the Billing Manager.')
uplaDynamicUpdateStarted = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,211)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaDynamicUpdateStarted.setDescription('An event that is generated when the Agent Dynamic Update has started.')
uplaDynamicUpdateStopped = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,212)).setObjects(("OPENWAVE-MIB", "uplaAgentIdentifier"), ("OPENWAVE-MIB", "uplaHostName"), ("OPENWAVE-MIB", "uplaProcessId"), ("OPENWAVE-MIB", "uplaTrapInfo"))
if mibBuilder.loadTexts: uplaDynamicUpdateStopped.setDescription('An event that is generated when the Agent Dynamic Update has terminated.')
uplNbRouter = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3))
uplNbRouterDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1), )
if mibBuilder.loadTexts: uplNbRouterDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplNbRouterDescriptionTable.setDescription('The table holding UPLNbRouter identifying information.')
uplNbRouterDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplrdIpAddress"), (0, "OPENWAVE-MIB", "uplrdProcessId"))
if mibBuilder.loadTexts: uplNbRouterDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplNbRouterDescriptionEntry.setDescription('The entry associated with each UPLNbRouter.')
uplrdIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrdIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplrdIpAddress.setDescription('The ip address of the host running UPLNbRouter.')
uplrdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplrdProcessId.setDescription('The process id of the UPLNbRouter.')
uplrdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplrdHostName.setDescription('The host name of the node running UPLNbRouter.')
uplrdPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrdPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplrdPortNumber.setDescription('The port number on which the UPLNbRouter listens for incoming requests.')
uplrdStartUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrdStartUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplrdStartUpTime.setDescription('The date and time the UPLNbRouter started.')
uplrHdtpStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 2))
uplNbRouterAirlinkTable = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 2, 1), Integer32())
if mibBuilder.loadTexts: uplNbRouterAirlinkTable.setStatus('deprecated')
if mibBuilder.loadTexts: uplNbRouterAirlinkTable.setDescription('The table holding airlinks loaded successfully in UPLNbRouter.')
uplNbRouterAirlinkStatsTable = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 2, 2), Integer32())
if mibBuilder.loadTexts: uplNbRouterAirlinkStatsTable.setStatus('deprecated')
if mibBuilder.loadTexts: uplNbRouterAirlinkStatsTable.setDescription('The table holding statistics of mobile-terminated traffic through airlinks loaded in UPLNbRouter.')
uplrStackServiceStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 3))
uplNbRouterTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 20))
uplrTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplrTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplrTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplrClientIpAddress = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 20, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrClientIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplrClientIpAddress.setDescription('The ip address of the node running the client that is communicating to the UPLNbRouter.')
uplrClientHostName = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 20, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrClientHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplrClientHostName.setDescription('The host name of the node running the client that is communicating to the UPLNbRouter.')
uplrClientProcessId = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 3, 20, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrClientProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplrClientProcessId.setDescription('The process id of the client that is communicating to the UPLNbRouter.')
uplrStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,500)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrStartup.setDescription('An event that is generated when the UPLNbRouter is started.')
uplrShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,501)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrShutdown.setDescription('An event that is generated when the UPLNbRouter is shut down. uplrTrapInfo gives the reason for the shutdown.')
uplrDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,502)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrDatabaseConnectionDown.setDescription('An event that is generated when the UPLNbRouter detects that the database connection is down.')
uplrDatabaseConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,503)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrDatabaseConnectionUp.setDescription('An event that is generated when the UPLNbRouter detects that the database connection is back up.')
uplrInternalError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,505)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrInternalError.setDescription('An event that is generated when the UPLNbRouter detects an internal error. uplrTrapInfo gives error name.')
uplrSMSCConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,506)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrSMSCConnectionDown.setDescription('An event that is generated when the UPLNbRouter detects the connection to external SMSC is down')
uplrSMSCConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,507)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrSMSCConnectionUp.setDescription('An event that is generated when the UPLNbRouter detects the connection to external SMSC is back up')
uplrClientConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,508)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrClientConnectionDown.setDescription('An event that is generated when the UPLNbRouter detects the connection to a client is down. The client can be uplMessenger, uplAgent or uplDispatcher.')
uplrClientConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,509)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrClientConnectionUp.setDescription('An event that is generated when the UPLNbRouter detects the connection to a client is back up. The client can be uplMessenger, uplAgent or uplDispatcher.')
uplrNbRouterConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,510)).setObjects(("OPENWAVE-MIB", "uplrClientIpAddress"), ("OPENWAVE-MIB", "uplrClientHostName"), ("OPENWAVE-MIB", "uplrClientProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrNbRouterConnectionDown.setDescription('An event that is generated by a client when the client detects the connection to an UPLNbRouter is down.')
uplrNbRouterConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,511)).setObjects(("OPENWAVE-MIB", "uplrClientIpAddress"), ("OPENWAVE-MIB", "uplrClientHostName"), ("OPENWAVE-MIB", "uplrClientProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrNbRouterConnectionUp.setDescription('An event that is generated by a client when the client detects the connection to an UPLNbRouter is back up.')
uplrProtocolError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,512)).setObjects(("OPENWAVE-MIB", "uplrClientIpAddress"), ("OPENWAVE-MIB", "uplrClientHostName"), ("OPENWAVE-MIB", "uplrClientProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrProtocolError.setDescription('An event that is generated by a client process running narrowband airlink adaptor when it detects a protocol error in the messages exchange with the UPLNbRouter.')
uplrBillingInitError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,513)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrBillingInitError.setDescription('An event that is generated when the NBRouter client cannot initialise communication with the Billing Manager.')
uplrBillingLogError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,514)).setObjects(("OPENWAVE-MIB", "uplrdHostName"), ("OPENWAVE-MIB", "uplrdProcessId"), ("OPENWAVE-MIB", "uplrTrapInfo"))
if mibBuilder.loadTexts: uplrBillingLogError.setDescription('An event that is generated when the NBRouter client Billing event cannot be logged with the Billing Manager.')
uplMessenger = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4))
uplMessengerDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1), )
if mibBuilder.loadTexts: uplMessengerDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerDescriptionTable.setDescription('The table holding UPLMessenger identifying information.')
uplMessengerDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplmdIpAddress"), (0, "OPENWAVE-MIB", "uplmdProcessId"))
if mibBuilder.loadTexts: uplMessengerDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerDescriptionEntry.setDescription('The entry associated with each UPLMessenger.')
uplmdIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdIpAddress.setDescription('The ip address of the host running UPLMessenger.')
uplmdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdProcessId.setDescription('The process id of the UPLMessenger.')
uplmdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdHostName.setDescription('The host name of the node running UPLMessenger.')
uplmdMsgServerPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdMsgServerPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdMsgServerPortNumber.setDescription('The port number on which the UPLMessenger connects to the proxy to deliver notifications.')
uplmdPublicHTTPPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdPublicHTTPPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdPublicHTTPPortNumber.setDescription('The port number on which the UPLMessenger listens for nonsecure public notification services.')
uplmdPublicHTTPSPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdPublicHTTPSPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdPublicHTTPSPortNumber.setDescription('The port number on which the UPLMessenger listens for secure public notification services.')
uplmdPrivateHTTPPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdPrivateHTTPPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdPrivateHTTPPortNumber.setDescription('The port number on which the UPLMessenger listens for nonsecure private notification services.')
uplmdStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdStartupTime.setDescription('The date and time the UPLMessenger started.')
uplmDBMaxConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmDBMaxConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmDBMaxConnections.setDescription('Maximum number of database connections.')
uplmDBMinConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmDBMinConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmDBMinConnections.setDescription('Minimum number of database connections.')
uplmDBConnectionCacheThreadWaits = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmDBConnectionCacheThreadWaits.setStatus('mandatory')
if mibBuilder.loadTexts: uplmDBConnectionCacheThreadWaits.setDescription('Number of times threads had to wait for a database connection.')
uplmDBConnectionCacheMeanWaitTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmDBConnectionCacheMeanWaitTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplmDBConnectionCacheMeanWaitTime.setDescription('Mean wait time in milliseconds for threads waiting on a database connection.')
uplmDBConnectionCacheDeviationOfWaitTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmDBConnectionCacheDeviationOfWaitTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplmDBConnectionCacheDeviationOfWaitTime.setDescription('Standard deviation of uplmDBConnectionCacheMeanWaitTime.')
uplmdMaxMsgClientStreams = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdMaxMsgClientStreams.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdMaxMsgClientStreams.setDescription('Configured max number of busy streams. ')
uplmdOpenAgentStreams = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdOpenAgentStreams.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdOpenAgentStreams.setDescription('Number of streams currently in use. These sometimes get blocked, so always at max with no increase in the next value implies a problem.')
uplmdNumTxnProcessed = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmdNumTxnProcessed.setStatus('mandatory')
if mibBuilder.loadTexts: uplmdNumTxnProcessed.setDescription('Total number of transactions processed by the streams.')
uplmHdtpStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 2))
uplMessengerAirlinkTable = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 2, 1), Integer32())
if mibBuilder.loadTexts: uplMessengerAirlinkTable.setStatus('deprecated')
if mibBuilder.loadTexts: uplMessengerAirlinkTable.setDescription('The table holding airlinks loaded successfully in UPLMessenger.')
uplMessengerAirlinkStatsTable = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 2, 2), Integer32())
if mibBuilder.loadTexts: uplMessengerAirlinkStatsTable.setStatus('deprecated')
if mibBuilder.loadTexts: uplMessengerAirlinkStatsTable.setDescription('The table holding statistics of notifications sent by the messenger.')
uplmStackServiceStats = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 3))
uplMessengerNtfnStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4), )
if mibBuilder.loadTexts: uplMessengerNtfnStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerNtfnStatsTable.setDescription('The table holding statistics on notification services provided by UPLMessenger.')
uplMessengerNtfnStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplmnsIpAddress"), (0, "OPENWAVE-MIB", "uplmnsProcessId"))
if mibBuilder.loadTexts: uplMessengerNtfnStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerNtfnStatsEntry.setDescription('The entry associated with notifications statistics for each UPLMessenger.')
uplmnsIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsIpAddress.setDescription('The ip address of the host running UPLMessenger.')
uplmnsProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsProcessId.setDescription('The process id of the UPLMessenger.')
uplmnsPublicHTTPReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsPublicHTTPReqReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsPublicHTTPReqReceived.setDescription('The number of public HTTP requests the UPLMessenger received since it comes up.')
uplmnsPrivateHTTPReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsPrivateHTTPReqReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsPrivateHTTPReqReceived.setDescription('The number of private HTTP requests the UPLMessenger received since it comes up.')
uplmnsPublicHTTPSReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsPublicHTTPSReqReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsPublicHTTPSReqReceived.setDescription('The number of public HTTP requests the UPLMessenger received since it comes up.')
uplmnsPublicHTTPReqProcessed = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsPublicHTTPReqProcessed.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsPublicHTTPReqProcessed.setDescription('The number of public HTTP requests the UPLMessenger processed successfully since it comes up.')
uplmnsPrivateHTTPReqProcessed = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsPrivateHTTPReqProcessed.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsPrivateHTTPReqProcessed.setDescription('The number of private HTTP requests the UPLMessenger processed successfully since it comes up.')
uplmnsPublicHTTPSReqProcessed = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsPublicHTTPSReqProcessed.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsPublicHTTPSReqProcessed.setDescription('The number of public HTTPS requests the UPLMessenger processed successfully since it comes up.')
uplmnsAvgNtfnsAddedPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsAvgNtfnsAddedPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsAvgNtfnsAddedPerSec.setDescription('The average number of notifications added per second by the UPLMessenger.')
uplmnsAvgNtfnsDeliveredPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsAvgNtfnsDeliveredPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsAvgNtfnsDeliveredPerSec.setDescription('The average number of notifications delivered per second by the UPLMessenger.')
uplmnsAvgNtfnsExpiredPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsAvgNtfnsExpiredPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsAvgNtfnsExpiredPerSec.setDescription('The average number of notifications expired per second by the UPLMessenger.')
uplmnsAvgNtfnsMarkedUnDelvrPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsAvgNtfnsMarkedUnDelvrPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsAvgNtfnsMarkedUnDelvrPerSec.setDescription('The average number of notifications marked undeliverable per second by the UPLMessenger.')
uplmnsNumAddRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumAddRequests.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumAddRequests.setDescription('Total number of notifications submitted for delivery.')
uplmnsNumStatusRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumStatusRequests.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumStatusRequests.setDescription('Total number of requests for notification status submitted.')
uplmnsNumDeleteRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumDeleteRequests.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumDeleteRequests.setDescription('Total number of delete requests submitted.')
uplmnsNumAdded = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumAdded.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumAdded.setDescription('Total number of notifications that were actually added.')
uplmnsNumStatusFound = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumStatusFound.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumStatusFound.setDescription('Total number of status requests that were processed sucessfully.')
uplmnsNumDeleted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumDeleted.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumDeleted.setDescription('Total number of delete requests that were processed sucessfully.')
uplmnsNumExpired = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsNumExpired.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsNumExpired.setDescription('Total number of notification requests that got expired.')
uplmnsCompletedNotifications = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsCompletedNotifications.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsCompletedNotifications.setDescription('Total number of notifications that got completed.')
uplmnsSignalsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 4, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmnsSignalsSent.setStatus('mandatory')
if mibBuilder.loadTexts: uplmnsSignalsSent.setDescription('Number of signal PDUs sent.')
uplMessengerNtfnCacheTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 5), )
if mibBuilder.loadTexts: uplMessengerNtfnCacheTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerNtfnCacheTable.setDescription('The table holding statistics on the notification cache maintained by UPLMessenger.')
uplMessengerNtfnCacheEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 5, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplmncIpAddress"), (0, "OPENWAVE-MIB", "uplmncProcessId"))
if mibBuilder.loadTexts: uplMessengerNtfnCacheEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerNtfnCacheEntry.setDescription('The entry associated with the notifications cache statistics for each UPLMessenger.')
uplmncIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 5, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmncIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplmncIpAddress.setDescription('The ip address of the host running UPLMessenger.')
uplmncProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 5, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmncProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplmncProcessId.setDescription('The process id of the UPLMessenger.')
uplmncTotalNumOfPendingNtfns = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 5, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmncTotalNumOfPendingNtfns.setStatus('mandatory')
if mibBuilder.loadTexts: uplmncTotalNumOfPendingNtfns.setDescription('The number of pending notifications in the cache as maintained by the UPLMessenger.')
uplmncAvgNumOfPendingNtfnsPerSub = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 5, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmncAvgNumOfPendingNtfnsPerSub.setStatus('mandatory')
if mibBuilder.loadTexts: uplmncAvgNumOfPendingNtfnsPerSub.setDescription('The average number of pending notifications per subscriber as maintained by the UPLMessenger.')
uplMessengerHTTPStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6), )
if mibBuilder.loadTexts: uplMessengerHTTPStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerHTTPStatsTable.setDescription('The table holding statistics on HTTP services provided by UPLMessenger.')
uplMessengerHTTPStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplmhIpAddress"), (0, "OPENWAVE-MIB", "uplmhProcessId"))
if mibBuilder.loadTexts: uplMessengerHTTPStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplMessengerHTTPStatsEntry.setDescription('The entry associated with HTTP statistics for each UPLMessenger.')
uplmhIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhIpAddress.setDescription('The ip address of the host running UPLMessenger.')
uplmhProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhProcessId.setDescription('The process id of the UPLMessenger.')
uplmhPublicHTTPMaxConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPMaxConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPMaxConnections.setDescription('The maximum number of HTTP connections that can be possible on the public non-secure interface. Refelects the config variable NumPublicHTTPThreads')
uplmhPublicHTTPOpenConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPOpenConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPOpenConnections.setDescription(' The number of Public HTTP connections that are currently open')
uplmhPublicHTTPMaxThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPMaxThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPMaxThreads.setDescription(' The maximum Public HTTP threads that start up (NumPublicHTTPThreads)')
uplmhPublicHTTPBusyThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPBusyThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPBusyThreads.setDescription(' The Number of public HTTP threads that are currently busy. Busy means when the thread Picks up a entry from the dispatcher quese, Not busy means when the thread goes back to sleep on the dispatcher queu e')
uplmhPublicHTTPTimesAllThreadsBusy = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPTimesAllThreadsBusy.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPTimesAllThreadsBusy.setDescription(' The number of times a message was received and all the public HTTP threads are busy')
uplmhPublicHTTPMaxDispQueueLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPMaxDispQueueLength.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPMaxDispQueueLength.setDescription('The Maximum length of the public HTTP dispatcher queue (MaxPublicHTTPDispatchQLen )')
uplmhPublicHTTPCurrentDispQueueLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPCurrentDispQueueLen.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPCurrentDispQueueLen.setDescription(' The Current length of the public HTTP dispatcher queue')
uplmhPublicHTTPTimesDispQueueFull = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPTimesDispQueueFull.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPTimesDispQueueFull.setDescription(' The number of times a connection was accepted and the public HTTP dispatcher queue was full')
uplmhPrivateHTTPMaxConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPMaxConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPMaxConnections.setDescription('The maximum number of HTTP connections that can be possible on the private non-secure interface ')
uplmhPrivateHTTPOpenConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPOpenConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPOpenConnections.setDescription(' The number of Private HTTP connections that are currently open')
uplmhPrivateHTTPMaxThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPMaxThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPMaxThreads.setDescription(' The maximum Private HTTP threads that start up')
uplmhPrivateHTTPBusyThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPBusyThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPBusyThreads.setDescription(' The Number of private HTTP threads that are currently busy')
uplmhPrivateHTTPTimesAllThreadsBusy = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPTimesAllThreadsBusy.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPTimesAllThreadsBusy.setDescription(' The number of times a message was received and all the private HTTP threads are busy')
uplmhPrivateHTTPMaxDispQueueLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPMaxDispQueueLength.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPMaxDispQueueLength.setDescription('The Maximum length of the private HTTP dispatcher queue')
uplmhPrivateHTTPCurrentDispQueueLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPCurrentDispQueueLen.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPCurrentDispQueueLen.setDescription(' The Current length of the private HTTP dispatcher queue')
uplmhPrivateHTTPTimesDispQueueFull = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPrivateHTTPTimesDispQueueFull.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPrivateHTTPTimesDispQueueFull.setDescription(' The number of times a connection was accepted and the private HTTP dispatcher queue was full')
uplmhPublicHTTPSMaxConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 19), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSMaxConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSMaxConnections.setDescription('The maximum number of HTTP connections that can be possible on the public secure interface')
uplmhPublicHTTPSOpenConnections = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 20), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSOpenConnections.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSOpenConnections.setDescription(' The number of Public secure HTTP connections that are currently open')
uplmhPublicHTTPSMaxThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSMaxThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSMaxThreads.setDescription(' The maximum Public secure HTTP threads that start up')
uplmhPublicHTTPSBusyThreads = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSBusyThreads.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSBusyThreads.setDescription(' The Number of public secure HTTP threads that are currently busy')
uplmhPublicHTTPSTimesAllThreadsBusy = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSTimesAllThreadsBusy.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSTimesAllThreadsBusy.setDescription(' The number of times a message was received and all the public secure HTTP threads are busy')
uplmhPublicHTTPSMaxDispQueueLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSMaxDispQueueLength.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSMaxDispQueueLength.setDescription('The Maximum length of the public secure HTTP dispatcher queue')
uplmhPublicHTTPSCurrentDispQueueLen = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 25), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSCurrentDispQueueLen.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSCurrentDispQueueLen.setDescription(' The Current length of the public secure HTTP dispatcher queue')
uplmhPublicHTTPSTimesDispQueueFull = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 6, 1, 26), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplmhPublicHTTPSTimesDispQueueFull.setStatus('mandatory')
if mibBuilder.loadTexts: uplmhPublicHTTPSTimesDispQueueFull.setDescription(' The number of times a connection was accepted and the public secure HTTP dispatcher queue was full')
uplMessengerTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 20))
uplmTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 4, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplmTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplmTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplmStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,600)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmStartup.setDescription('An event that is generated when the UPLMessenger is started.')
uplmShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,601)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmShutdown.setDescription('An event that is generated when the UPLMessenger is shut down. uplmTrapInfo gives the reason for the shutdown.')
uplmDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,602)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmDatabaseConnectionDown.setDescription('An event that is generated when the UPLMessenger detects that the database connection is down.')
uplmDatabaseConnectionUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,603)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmDatabaseConnectionUp.setDescription('An event that is generated when the UPLMessenger detects that the database connection is back up.')
uplmInvalidConfig = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,604)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmInvalidConfig.setDescription('An event that is generated when the UPLMessenger detects an invalid configuration. uplmTrapInfo gives the configuration name.')
uplmInternalErrors = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,605)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmInternalErrors.setDescription('An event that is generated when the UPLMessenger detects an internal error. uplmTrapInfo gives error name.')
uplmAgentConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,606)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmAgentConnectionDown.setDescription('An event that is generated when the UPLMesssenger detects the message server serving UPLAgents at a configured port is down.')
uplmPublicHTTPServiceStarted = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,607)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmPublicHTTPServiceStarted.setDescription('An event that is generated when the UPLMessenger is starting the Public HTTP service. This is sent as a warning because the service lowers the overall security of the system.')
uplmPublicHTTPServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,608)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmPublicHTTPServiceDown.setDescription('An event that is generated when the UPLMessenger detects the Public HTTP interface is down.')
uplmPrivateHTTPServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,609)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmPrivateHTTPServiceDown.setDescription('An event that is generated when the UPLMessenger detects the Private HTTP interface is down.')
uplmPublicHTTPSServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,610)).setObjects(("OPENWAVE-MIB", "uplmdIpAddress"), ("OPENWAVE-MIB", "uplmdHostName"), ("OPENWAVE-MIB", "uplmdProcessId"), ("OPENWAVE-MIB", "uplmTrapInfo"))
if mibBuilder.loadTexts: uplmPublicHTTPSServiceDown.setDescription('An event that is generated when the UPLMessenger detects the Public secure HTTP interface is down.')
uplWap = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 5))
uplWapTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 5, 20))
uplwTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 5, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplwTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplwTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplwHostName = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 5, 20, 2), DisplayString())
if mibBuilder.loadTexts: uplwHostName.setStatus('optional')
if mibBuilder.loadTexts: uplwHostName.setDescription('The host name of the node running the process which is executing the wap subsystem.')
uplwProcessId = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 5, 20, 3), Integer32())
if mibBuilder.loadTexts: uplwProcessId.setStatus('optional')
if mibBuilder.loadTexts: uplwProcessId.setDescription('The id of the process that is executing the wap subsystem.')
uplwCLIDMappingError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,701)).setObjects(("OPENWAVE-MIB", "uplwHostName"), ("OPENWAVE-MIB", "uplwProcessId"), ("OPENWAVE-MIB", "uplwTrapInfo"))
if mibBuilder.loadTexts: uplwCLIDMappingError.setDescription('An event that is generated when a match is not found in the CLID mapping table by the wap subsystem. uplwProcessId indicates which client is executing the wap subsystem and uplwTrapInfo describes the error.')
uplBillingMgr = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6))
uplBillingMgrDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1), )
if mibBuilder.loadTexts: uplBillingMgrDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplBillingMgrDescriptionTable.setDescription('The table holding UPLBillMan identifying information.')
uplBillingMgrDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplbdIpAddress"), (0, "OPENWAVE-MIB", "uplbdProcessId"))
if mibBuilder.loadTexts: uplBillingMgrDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplBillingMgrDescriptionEntry.setDescription('The entry associated with each UPLBillMan.')
uplbdIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbdIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplbdIpAddress.setDescription('The ip address of the host running UPLBillMan.')
uplbdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplbdProcessId.setDescription('The process id of the UPLBillMan.')
uplbdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplbdHostName.setDescription('The host name of the node running UPLBillMan.')
uplbdPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbdPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplbdPortNumber.setDescription('The port number on which the UPLBillMan listens for incoming billing events to log.')
uplbdStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbdStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplbdStartupTime.setDescription('The date and time the UPLBillMan started.')
uplBillingMgrEventStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2), )
if mibBuilder.loadTexts: uplBillingMgrEventStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplBillingMgrEventStatsTable.setDescription('The table holding statistics on events logged by UPLBillMan.')
uplBillingMgrEventStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplbesIpAddress"), (0, "OPENWAVE-MIB", "uplbesProcessId"))
if mibBuilder.loadTexts: uplBillingMgrEventStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplBillingMgrEventStatsEntry.setDescription('The entry associated with event statistics for each UPLBillMan.')
uplbesIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbesIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplbesIpAddress.setDescription('The ip address of the host running UPLBillMan.')
uplbesProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbesProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplbesProcessId.setDescription('The process id of the UPLBillMan.')
uplbesEventsReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbesEventsReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplbesEventsReceived.setDescription('The number of events received by UPLBillMan since it comes up.')
uplbesEventLogFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbesEventLogFailures.setStatus('mandatory')
if mibBuilder.loadTexts: uplbesEventLogFailures.setDescription('The number of events that failed to be loggeed by UPLBillMan since it comes up.')
uplbesDirectTransferFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbesDirectTransferFailures.setStatus('mandatory')
if mibBuilder.loadTexts: uplbesDirectTransferFailures.setDescription('The the number of events that failed to be directed to a real time transfer interface by UPLBillMan since it comes up.')
uplBillingMgrFileStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3), )
if mibBuilder.loadTexts: uplBillingMgrFileStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplBillingMgrFileStatsTable.setDescription('The table holding statistics on file and disk space used by UPLBillMan.')
uplBillingMgrFileStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplbfsIpAddress"), (0, "OPENWAVE-MIB", "uplbfsProcessId"))
if mibBuilder.loadTexts: uplBillingMgrFileStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplBillingMgrFileStatsEntry.setDescription('The entry associated with file statistics for each UPLBillMan.')
uplbfsIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsIpAddress.setDescription('The ip address of the host running UPLBillMan.')
uplbfsProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsProcessId.setDescription('The process id of the UPLBillMan.')
uplbfsMaxBillingFileSize = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsMaxBillingFileSize.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsMaxBillingFileSize.setDescription('Maximum billing file size in bytes for UPLBillMan.')
uplbfsCompressorName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsCompressorName.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsCompressorName.setDescription('The name of the compression tool used by UPLBillMan if one is used.')
uplbfsBillingFilePath = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsBillingFilePath.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsBillingFilePath.setDescription('Path to the billing file volume for the UPLBillMan process.')
uplbfsFileDiskSpaceUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsFileDiskSpaceUsed.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsFileDiskSpaceUsed.setDescription('Percentage disk space used at the volume uplbfsBillingFilePath.')
uplbfsArchiveFilePath = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsArchiveFilePath.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsArchiveFilePath.setDescription('Path to the archive file volume for the UPLBillMan process.')
uplbfsArchiveDiskSpaceUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 3, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplbfsArchiveDiskSpaceUsed.setStatus('mandatory')
if mibBuilder.loadTexts: uplbfsArchiveDiskSpaceUsed.setDescription('Percentage disk space used at the volume uplbfsArchiveFilePath.')
uplBillingMgrTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 20))
uplbTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 6, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplbTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplbTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplbStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,800)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbStartup.setDescription('An event that is generated when the UPLBillMan is started.')
uplbShutdown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,801)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbShutdown.setDescription('An event that is generated when the UPLBillMan is shut down. uplbTrapInfo gives the reason for the shutdown.')
uplbDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,802)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbDatabaseConnectionDown.setDescription('An event that is generated when the UPLBillMan detects that the database connection is down.')
uplbBillingLogFileError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,803)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbBillingLogFileError.setDescription('An event that is generated when the UPLBillMan fails to open, move or write to the Billing Log. uplbTrapInfo desribes the problem with the Billing Log.')
uplbBillingDirectTransferError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,804)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbBillingDirectTransferError.setDescription('An event that is generated when the UPLBillMan fails to direct to a Billing transfer interface. uplbTrapInfo desribes the problem with the Billing interface.')
uplbDiskSpaceError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,805)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbDiskSpaceError.setDescription('An event that is generated when the UPLBillMan detects a problem with disk space. uplbTrapInfo gives the reason for the problem.')
uplbDiskSpaceLow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,806)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbDiskSpaceLow.setDescription('An event that is generated when the UPLBillMan detects that the disk volume has reached a lower threshold. uplbTrapInfo gives the reason for the problem.')
uplbDiskSpaceCriticallyLow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,807)).setObjects(("OPENWAVE-MIB", "uplbdIpAddress"), ("OPENWAVE-MIB", "uplbdHostName"), ("OPENWAVE-MIB", "uplbdProcessId"), ("OPENWAVE-MIB", "uplbTrapInfo"))
if mibBuilder.loadTexts: uplbDiskSpaceCriticallyLow.setDescription('An event that is generated when the UPLBillMan detects that the disk volume has reached a critical threshold. uplbTrapInfo gives the reason for the problem.')
uplRadiusServer = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7))
uplRadiusServerDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1), )
if mibBuilder.loadTexts: uplRadiusServerDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplRadiusServerDescriptionTable.setDescription('The table holding UPLRadius identifying information.')
uplRadiusServerDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplrsdIpAddress"), (0, "OPENWAVE-MIB", "uplrsdProcessId"))
if mibBuilder.loadTexts: uplRadiusServerDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplRadiusServerDescriptionEntry.setDescription('The entry associated with each UPLRadius.')
uplrsdIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrsdIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplrsdIpAddress.setDescription('The ip address of the host running UPLRadius.')
uplrsdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrsdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplrsdProcessId.setDescription('The process id of the UPLRadius.')
uplrsdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrsdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplrsdHostName.setDescription('The host name of the node running UPLRadius.')
uplrsdPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrsdPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uplrsdPortNumber.setDescription('The port number on which the UPLRadius listens for incoming accounting messages.')
uplrsdStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrsdStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplrsdStartupTime.setDescription('The date and time the UPLRadius started.')
uplRadiusServerStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2), )
if mibBuilder.loadTexts: uplRadiusServerStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplRadiusServerStatsTable.setDescription('The table holding statistics on events logged by UPLRadius.')
uplRadiusServerStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplrssIpAddress"), (0, "OPENWAVE-MIB", "uplrssProcessId"))
if mibBuilder.loadTexts: uplRadiusServerStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplRadiusServerStatsEntry.setDescription('The entry associated with event statistics for each UPLRadius.')
uplrssIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssIpAddress.setDescription('The ip address of the host running UPLRadius.')
uplrssProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssProcessId.setDescription('The process id of the UPLRadius process.')
uplrssRasServiceAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssRasServiceAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssRasServiceAddress.setDescription('The RAS Service Address of the UPLRadius process.')
uplrssAuthenticationStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssAuthenticationStatus.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssAuthenticationStatus.setDescription('The Authentication Status of the UPLRadius process. The status can be ON or OFF')
uplrssStartAccMsgReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssStartAccMsgReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssStartAccMsgReceived.setDescription('The number of START accouting messages received by UPLRadius since it comes up.')
uplrssInterimAccMsgReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssInterimAccMsgReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssInterimAccMsgReceived.setDescription('The number of Interim accouting messages received by UPLRadius since it comes up.')
uplrssStopAccMsgReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssStopAccMsgReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssStopAccMsgReceived.setDescription('The number of STOP accouting messages received by UPLRadius since it comes up.')
uplrssIpMsisdnPairsInserted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssIpMsisdnPairsInserted.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssIpMsisdnPairsInserted.setDescription('The number of IP/MSISDN pairs that have been inserted into the UP.Link database by UPLRadius since it comes up.')
uplrssIpMsisdnPairsUpdated = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssIpMsisdnPairsUpdated.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssIpMsisdnPairsUpdated.setDescription('The number of IP/MSISDN pairs that have been updated on the UP.Link database by UPLRadius since it comes up.')
uplrssIpMsisdnPairsDeleted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplrssIpMsisdnPairsDeleted.setStatus('mandatory')
if mibBuilder.loadTexts: uplrssIpMsisdnPairsDeleted.setDescription('The number of IP/MSISDN pairs that have been deleted from the UP.Link database by UPLRadius since it comes up.')
uplRadiusServerTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 20))
uplrsTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 7, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplrsTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplrsTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplrsStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,900)).setObjects(("OPENWAVE-MIB", "uplrsdIpAddress"), ("OPENWAVE-MIB", "uplrsdHostName"), ("OPENWAVE-MIB", "uplrsdProcessId"), ("OPENWAVE-MIB", "uplrsTrapInfo"))
if mibBuilder.loadTexts: uplrsStartup.setDescription('An event that is generated when the UPLRadius is started.')
uplrsFailedToStart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,901)).setObjects(("OPENWAVE-MIB", "uplrsdIpAddress"), ("OPENWAVE-MIB", "uplrsdHostName"), ("OPENWAVE-MIB", "uplrsdProcessId"), ("OPENWAVE-MIB", "uplrsTrapInfo"))
if mibBuilder.loadTexts: uplrsFailedToStart.setDescription('An event that is generated when the UPLRadius fails to start.')
uplrsDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,902)).setObjects(("OPENWAVE-MIB", "uplrsdIpAddress"), ("OPENWAVE-MIB", "uplrsdHostName"), ("OPENWAVE-MIB", "uplrsdProcessId"), ("OPENWAVE-MIB", "uplrsTrapInfo"))
if mibBuilder.loadTexts: uplrsDatabaseConnectionDown.setDescription('An event that is generated when the UPLRadius detects that the database connection is down.')
uplrsBillingInitError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,903)).setObjects(("OPENWAVE-MIB", "uplrsdIpAddress"), ("OPENWAVE-MIB", "uplrsdHostName"), ("OPENWAVE-MIB", "uplrsdProcessId"), ("OPENWAVE-MIB", "uplrsTrapInfo"))
if mibBuilder.loadTexts: uplrsBillingInitError.setDescription('An event that is generated when the Radius client cannot initialise communication with the Billing Manager.')
uplrsBillingLogError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,904)).setObjects(("OPENWAVE-MIB", "uplrsdIpAddress"), ("OPENWAVE-MIB", "uplrsdHostName"), ("OPENWAVE-MIB", "uplrsdProcessId"), ("OPENWAVE-MIB", "uplrsTrapInfo"))
if mibBuilder.loadTexts: uplrsBillingLogError.setDescription('An event that is generated when a Radius client billing event cannot be logged with the Billing Manager.')
uplCertRequester = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8))
uplCertRequesterDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1), )
if mibBuilder.loadTexts: uplCertRequesterDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uplCertRequesterDescriptionTable.setDescription('The table holding UPLCertRequester identifying information.')
uplCertRequesterDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uplcrdIpAddress"), (0, "OPENWAVE-MIB", "uplcrdProcessId"))
if mibBuilder.loadTexts: uplCertRequesterDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uplCertRequesterDescriptionEntry.setDescription('The entry associated with each UPLCertRequester.')
uplcrdIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplcrdIpAddress.setStatus('mandatory')
if mibBuilder.loadTexts: uplcrdIpAddress.setDescription('The ip address of the host running UPLCertRequester.')
uplcrdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplcrdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uplcrdProcessId.setDescription('The process id of the UPLCertRequester.')
uplcrdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplcrdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uplcrdHostName.setDescription('The host name of the node running UPLCertRequester.')
uplcrdUpdateInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplcrdUpdateInterval.setStatus('mandatory')
if mibBuilder.loadTexts: uplcrdUpdateInterval.setDescription('The interval time between Database updates of certificates by the UPLCertRequester .')
uplcrdRequestAllowance = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplcrdRequestAllowance.setStatus('mandatory')
if mibBuilder.loadTexts: uplcrdRequestAllowance.setDescription('The certificate request allowance time. This is the time, before the expiration date of an existing certificate, that a new certificate is requested by UPLCertRequester.')
uplcrdStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uplcrdStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: uplcrdStartupTime.setDescription('The date and time the UPLCertRequester started.')
uplCertRequesterTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 20))
uplcrTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 2, 1, 8, 20, 1), DisplayString())
if mibBuilder.loadTexts: uplcrTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uplcrTrapInfo.setDescription('Text string that provides additional information about the trap.')
uplcrStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,1200)).setObjects(("OPENWAVE-MIB", "uplcrdIpAddress"), ("OPENWAVE-MIB", "uplcrdHostName"), ("OPENWAVE-MIB", "uplcrdProcessId"), ("OPENWAVE-MIB", "uplcrTrapInfo"))
if mibBuilder.loadTexts: uplcrStartup.setDescription('An event that is generated when the UPLCertRequester is started.')
uplcrFatalError = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,1201)).setObjects(("OPENWAVE-MIB", "uplcrdIpAddress"), ("OPENWAVE-MIB", "uplcrdHostName"), ("OPENWAVE-MIB", "uplcrdProcessId"), ("OPENWAVE-MIB", "uplcrTrapInfo"))
if mibBuilder.loadTexts: uplcrFatalError.setDescription('An event that is generated when an internal exception is caught in UPLCertRequester that may cause the process to fail.')
uplcrCertificateUpdateFailed = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,1202)).setObjects(("OPENWAVE-MIB", "uplcrdIpAddress"), ("OPENWAVE-MIB", "uplcrdHostName"), ("OPENWAVE-MIB", "uplcrdProcessId"), ("OPENWAVE-MIB", "uplcrTrapInfo"))
if mibBuilder.loadTexts: uplcrCertificateUpdateFailed.setDescription('An event that is generated when the UPLCertRequester fails in an attempt to refresh a WTLS certificate.')
uplcrInvalidCertResponse = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 2) + (0,1203)).setObjects(("OPENWAVE-MIB", "uplcrdIpAddress"), ("OPENWAVE-MIB", "uplcrdHostName"), ("OPENWAVE-MIB", "uplcrdProcessId"), ("OPENWAVE-MIB", "uplcrTrapInfo"))
if mibBuilder.loadTexts: uplcrInvalidCertResponse.setDescription('An event that is generated when the UPLCertRequester receives a HTTP response with a content-type other than the WTLS certificate mime type.')
upLinkConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 2))
upLinkStaticInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 2, 2, 1))
upAdmin = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 3))
upAdminTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 3, 20))
upsTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 3, 20, 1), DisplayString())
if mibBuilder.loadTexts: upsTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: upsTrapInfo.setDescription('Text string which provides additional information about the trap.')
upsProxyServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,400)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsProxyServiceDown.setDescription('An event that is generated when the proxy service is down ')
upsProxyServiceSlow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,401)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsProxyServiceSlow.setDescription('An event that is generated when the proxy service is slow')
upsPushServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,402)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsPushServiceDown.setDescription('An event that is generated when the push service is down ')
upsBookmarksServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,403)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsBookmarksServiceDown.setDescription('An event that is generated when the bookmarks service is down ')
upsBookmarksServiceSlow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,404)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsBookmarksServiceSlow.setDescription('An event that is generated when the bookmarks service is slow ')
upsHomePageServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,405)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsHomePageServiceDown.setDescription('An event that is generated when the homepage service is down ')
upsUPWebServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,406)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPWebServiceDown.setDescription('An event that is generated when the UP Web service is down ')
upsUPWebServiceSlow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,407)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPWebServiceSlow.setDescription('An event that is generated when the UP Web service is slow ')
upsUPAdminServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,408)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPAdminServiceDown.setDescription('An event that is generated when the UP Admin service is down ')
upsUPMailServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,409)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPMailServiceDown.setDescription('An event that is generated when the UP Mail service is down ')
upsUPMailServiceSlow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,410)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPMailServiceSlow.setDescription('An event that is generated when the UP Mail service is slow ')
upsUPPimServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,411)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPPimServiceDown.setDescription('An event that is generated when the UP Pim service is down ')
upsUPPimServiceSlow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,412)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPPimServiceSlow.setDescription('An event that is generated when the UP Pim service is slow')
upsHomePageServiceSlow = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,413)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsHomePageServiceSlow.setDescription('An event that is generated when the UP Home Page service is slow')
upsProxyServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,414)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsProxyServiceUp.setDescription('An event that is generated when the proxy service comes back online.')
upsProxyServiceNormal = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,415)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsProxyServiceNormal.setDescription('An event that is generated when the proxy service returns to normal response time.')
upsPushServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,416)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsPushServiceUp.setDescription('An event that is generated when the push service comes back online.')
upsBookmarksServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,417)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsBookmarksServiceUp.setDescription('An event that is generated when the bookmarks service comes back online')
upsBookmarksServiceNormal = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,418)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsBookmarksServiceNormal.setDescription('An event that is generated when the bookmarks service returns to normal response time.')
upsHomePageServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,419)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsHomePageServiceUp.setDescription('An event that is generated when the homepage service comes back online.')
upsUPWebServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,420)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPWebServiceUp.setDescription('An event that is generated when the UP Web service comes back online.')
upsUPWebServiceNormal = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,421)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPWebServiceNormal.setDescription('An event that is generated when the UP Web service returns to normal response time.')
upsUPAdminServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,422)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPAdminServiceUp.setDescription('An event that is generated when the UP Admin service comes back online.')
upsUPMailServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,423)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPMailServiceUp.setDescription('An event that is generated when the UP Mail service comes back online.')
upsUPMailServiceNormal = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,424)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPMailServiceNormal.setDescription('An event that is generated when the UP Mail service returns to normal response time.')
upsUPPimServiceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,425)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPPimServiceUp.setDescription('An event that is generated when the UP Pim service comes back on line.')
upsUPPimServiceNormal = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 3) + (0,426)).setObjects(("OPENWAVE-MIB", "upsTrapInfo"))
if mibBuilder.loadTexts: upsUPPimServiceNormal.setDescription('An event that is generated when the UP Pim service returns to normal response time.')
upPushProxy = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 5))
upPushPap = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1))
upPushPapDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1), )
if mibBuilder.loadTexts: upPushPapDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPapDescriptionTable.setDescription('The table holding UPPushPAP identifying information.')
upPushPapDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uppdPAPIndex"))
if mibBuilder.loadTexts: upPushPapDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPapDescriptionEntry.setDescription('The entry associated with each UPPushPAP.')
uppdPAPIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdPAPIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uppdPAPIndex.setDescription('The index for each PAP process in the SNMP table.')
uppdPAPIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdPAPIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uppdPAPIdentifier.setDescription('The unique identifier for each PAP process.')
uppdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uppdProcessId.setDescription('The process id of the UPPushPAP.')
uppdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uppdHostName.setDescription('The host name of the node running UPPushPAP.')
uppdPublicHTTPPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdPublicHTTPPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uppdPublicHTTPPortNumber.setDescription('The port number on which the UPPushPAP listens for nonsecure public notification services.')
uppdPublicHTTPSPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdPublicHTTPSPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uppdPublicHTTPSPortNumber.setDescription('The port number on which the UPPushPAP listens for secure public notification services.')
uppdPrivateHTTPPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdPrivateHTTPPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uppdPrivateHTTPPortNumber.setDescription('The port number on which the UPPushPAP listens for nonsecure private notification services.')
uppdStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppdStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: uppdStartupTime.setDescription('The date and time the UPPushPAP started.')
upPushPapNtfnStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2), )
if mibBuilder.loadTexts: upPushPapNtfnStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPapNtfnStatsTable.setDescription('The table holding statistics on notification services provided by UPPushPAP.')
upPushPapNtfnStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uppnsPAPIndex"))
if mibBuilder.loadTexts: upPushPapNtfnStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPapNtfnStatsEntry.setDescription('The entry associated with notifications statistics for each UPPushPAP.')
uppnsPAPIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPAPIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPAPIndex.setDescription('The index for each PAP process in the SNMP table.')
uppnsPAPIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPAPIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPAPIdentifier.setDescription('The unique identifier for each PAP process.')
uppnsPublicHTTPReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPublicHTTPReqReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPublicHTTPReqReceived.setDescription('The number of public HTTP requests the UPPushPAP received since it comes up.')
uppnsPrivateHTTPReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPrivateHTTPReqReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPrivateHTTPReqReceived.setDescription('The number of private HTTP requests the UPPushPAP received since it comes up.')
uppnsPublicHTTPSReqReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPublicHTTPSReqReceived.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPublicHTTPSReqReceived.setDescription('The number of public HTTP requests the UPPushPAP received since it comes up.')
uppnsPublicHTTPReqAccepted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPublicHTTPReqAccepted.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPublicHTTPReqAccepted.setDescription('The number of public HTTP requests the UPPushPAP successfully accepted since it comes up.')
uppnsPrivateHTTPReqAccepted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPrivateHTTPReqAccepted.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPrivateHTTPReqAccepted.setDescription('The number of private HTTP requests the UPPushPAP successfully accepted since it comes up.')
uppnsPublicHTTPSReqAccepted = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsPublicHTTPSReqAccepted.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsPublicHTTPSReqAccepted.setDescription('The number of public HTTPS requests the UPPushPAP successfully accepted since it comes up.')
uppnsAvgNtfnsReceivedPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsAvgNtfnsReceivedPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsAvgNtfnsReceivedPerSec.setDescription('The average number of notifications received per second by the UPPushPAP.')
uppnsAvgNtfnsAcceptedPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppnsAvgNtfnsAcceptedPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uppnsAvgNtfnsAcceptedPerSec.setDescription('The average number of notifications accepted per second by the UPPushPAP.')
upPushPapForwardedNtfnsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3), )
if mibBuilder.loadTexts: upPushPapForwardedNtfnsTable.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPapForwardedNtfnsTable.setDescription('The table holding statistics on the notification forwarded by UPPushPAP.')
upPushPapForwardedNtfnsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uppfnPAPIndex"))
if mibBuilder.loadTexts: upPushPapForwardedNtfnsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPapForwardedNtfnsEntry.setDescription('The entry associated with the forwarded notifications statistics for each UPPushPAP.')
uppfnPAPIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppfnPAPIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uppfnPAPIndex.setDescription('The index for each PAP process in the SNMP table.')
uppfnPAPIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppfnPAPIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uppfnPAPIdentifier.setDescription('The unique identifier for each PAP process.')
uppfnPPGForwardedNtfns = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppfnPPGForwardedNtfns.setStatus('mandatory')
if mibBuilder.loadTexts: uppfnPPGForwardedNtfns.setDescription('The number of notifications forwarded to the UPPushPPG process by the UPPushPAP.')
uppfnPPGFailedNtfns = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppfnPPGFailedNtfns.setStatus('mandatory')
if mibBuilder.loadTexts: uppfnPPGFailedNtfns.setDescription('The number of notifications that failed to be forwarded to the UPPushPPG process by the UPPushPAP.')
uppfnMessengerForwardedNtfns = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppfnMessengerForwardedNtfns.setStatus('mandatory')
if mibBuilder.loadTexts: uppfnMessengerForwardedNtfns.setDescription('The number of notifications forwarded to the UPLMessenger process by the UPPushPAP.')
uppfnMessengerFailedNtfns = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 3, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppfnMessengerFailedNtfns.setStatus('mandatory')
if mibBuilder.loadTexts: uppfnMessengerFailedNtfns.setDescription('The number of notifications that failed to be forwarded to the UPLMessenger process by the UPPushPAP.')
upPushPapTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 20))
uppTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 5, 1, 20, 1), DisplayString())
if mibBuilder.loadTexts: uppTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uppTrapInfo.setDescription('Text string that provides additional information about the trap.')
uppStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1000)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppStartup.setDescription('An event that is generated when the UPPushPAP is started.')
uppShutDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1001)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppShutDown.setDescription('An event that is generated when the UPPushPAP is shutting down. uppTrapInfo gives the reason for the shutdown.')
uppFailToStart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1002)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppFailToStart.setDescription('An event that is generated when the UPPushPAP has failed to start-up. uppTrapInfo gives the reason for the shutdown.')
uppDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1003)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppDatabaseConnectionDown.setDescription('An event that is generated when the UPPushPAP detects that the database connection is down.')
uppInternalErrors = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1006)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppInternalErrors.setDescription('An event that is generated when the UPPushPAP detects an internal error. uppTrapInfo gives error name.')
uppPublicHTTPServiceStarted = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1007)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPublicHTTPServiceStarted.setDescription('An event that is generated when the UPPushPAP is starting the Public HTTP service. This is sent as a warning because the service lowers the overall security of the system.')
uppPublicHTTPServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1008)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPublicHTTPServiceDown.setDescription('An event that is generated when the UPPushPAP detects the Public HTTP interface is down.')
uppPrivateHTTPServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1009)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPrivateHTTPServiceDown.setDescription('An event that is generated when the UPPushPAP detects the Private HTTP interface is down.')
uppPublicHTTPSServiceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1010)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPublicHTTPSServiceDown.setDescription('An event that is generated when the UPPushPAP detects the Public secure HTTP interface is down.')
uppPPGInterfaceDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1011)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPPGInterfaceDown.setDescription('An event that is generated when the UPPushPAP detects a PPG interface is down. uppTrapInfo gives the PPG identifier')
uppPPGInterfaceUp = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1012)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPPGInterfaceUp.setDescription('An event that is generated when the UPPushPAP detects a PPG interface is up. uppTrapInfo gives the PPG identifier')
uppPAPSuspended = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1013)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPAPSuspended.setDescription('An event that is generated when the UPPushPAP process has been suspended.')
uppPAPResumed = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1014)).setObjects(("OPENWAVE-MIB", "uppdPAPIdentifier"), ("OPENWAVE-MIB", "uppdHostName"), ("OPENWAVE-MIB", "uppdProcessId"), ("OPENWAVE-MIB", "uppTrapInfo"))
if mibBuilder.loadTexts: uppPAPResumed.setDescription('An event that is generated when the UPPushPAP has resumed.')
upPushPpg = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2))
upPushPpgDescriptionTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1), )
if mibBuilder.loadTexts: upPushPpgDescriptionTable.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPpgDescriptionTable.setDescription('The table holding UPPushPPG identifying information.')
upPushPpgDescriptionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uppgdPPGIndex"))
if mibBuilder.loadTexts: upPushPpgDescriptionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPpgDescriptionEntry.setDescription('The entry associated with each UPPushPPG.')
uppgdPPGIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgdPPGIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uppgdPPGIndex.setDescription('The index for each PPG process in the SNMP table.')
uppgdPPGIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgdPPGIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uppgdPPGIdentifier.setDescription('The unique identifier for each PPG process.')
uppgdProcessId = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgdProcessId.setStatus('mandatory')
if mibBuilder.loadTexts: uppgdProcessId.setDescription('The process id of the UPPushPPG.')
uppgdHostName = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgdHostName.setStatus('mandatory')
if mibBuilder.loadTexts: uppgdHostName.setDescription('The host name of the node running UPPushPPG.')
uppgdMsgServerPortNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgdMsgServerPortNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uppgdMsgServerPortNumber.setDescription('The port number on which the UPPushPPG connects to the proxy to deliver notifications.')
uppgdStartupTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgdStartupTime.setStatus('mandatory')
if mibBuilder.loadTexts: uppgdStartupTime.setDescription('The date and time the UPPushPPG started.')
upPushPpgNtfnStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2), )
if mibBuilder.loadTexts: upPushPpgNtfnStatsTable.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPpgNtfnStatsTable.setDescription('The table holding statistics on notification services provided by UPPushPPG.')
upPushPpgNtfnStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2, 1), ).setIndexNames((0, "OPENWAVE-MIB", "uppgnsPPGIndex"))
if mibBuilder.loadTexts: upPushPpgNtfnStatsEntry.setStatus('mandatory')
if mibBuilder.loadTexts: upPushPpgNtfnStatsEntry.setDescription('The entry associated with notifications statistics for each UPPushPPG.')
uppgnsPPGIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgnsPPGIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uppgnsPPGIndex.setDescription('The index for each PPG process in the SNMP table.')
uppgnsPPGIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgnsPPGIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uppgnsPPGIdentifier.setDescription('The unique identifier for each PPG process.')
uppgnsTotalNumOfPendingNtfns = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgnsTotalNumOfPendingNtfns.setStatus('mandatory')
if mibBuilder.loadTexts: uppgnsTotalNumOfPendingNtfns.setDescription('The total number of pending notifications in the cache as maintained by the UPPushPPG.')
uppgnsAvgNtfnsDeliveredPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgnsAvgNtfnsDeliveredPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uppgnsAvgNtfnsDeliveredPerSec.setDescription('The average number of notifications delivered per second by the UPPushPPG.')
uppgnsAvgNtfnsMarkedUnDelvrPerSec = MibTableColumn((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uppgnsAvgNtfnsMarkedUnDelvrPerSec.setStatus('mandatory')
if mibBuilder.loadTexts: uppgnsAvgNtfnsMarkedUnDelvrPerSec.setDescription('The average number of notifications marked undeliverable per second by the UPPushPPG.')
upPushPpgTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 20))
uppgTrapInfo = MibScalar((1, 3, 6, 1, 4, 1, 1900, 4, 5, 2, 20, 1), DisplayString())
if mibBuilder.loadTexts: uppgTrapInfo.setStatus('optional')
if mibBuilder.loadTexts: uppgTrapInfo.setDescription('Text string that provides additional information about the trap.')
uppgStartup = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1100)).setObjects(("OPENWAVE-MIB", "uppgdPPGIdentifier"), ("OPENWAVE-MIB", "uppgdHostName"), ("OPENWAVE-MIB", "uppgdProcessId"), ("OPENWAVE-MIB", "uppgTrapInfo"))
if mibBuilder.loadTexts: uppgStartup.setDescription('An event that is generated when the UPPushPPG is started.')
upgFailToStart = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1101)).setObjects(("OPENWAVE-MIB", "uppgdPPGIdentifier"), ("OPENWAVE-MIB", "uppgdHostName"), ("OPENWAVE-MIB", "uppgdProcessId"), ("OPENWAVE-MIB", "uppgTrapInfo"))
if mibBuilder.loadTexts: upgFailToStart.setDescription('An event that is generated when the UPPushPPG has failed to start-up. uppTrapInfo gives the reason for the shutdown.')
uppgDatabaseConnectionDown = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1102)).setObjects(("OPENWAVE-MIB", "uppgdPPGIdentifier"), ("OPENWAVE-MIB", "uppgdHostName"), ("OPENWAVE-MIB", "uppgdProcessId"), ("OPENWAVE-MIB", "uppgTrapInfo"))
if mibBuilder.loadTexts: uppgDatabaseConnectionDown.setDescription('An event that is generated when the UPPushPPG detects that the database connection is down.')
uppgInternalErrors = NotificationType((1, 3, 6, 1, 4, 1, 1900, 4, 5) + (0,1105)).setObjects(("OPENWAVE-MIB", "uppgdPPGIdentifier"), ("OPENWAVE-MIB", "uppgdHostName"), ("OPENWAVE-MIB", "uppgdProcessId"), ("OPENWAVE-MIB", "uppgTrapInfo"))
if mibBuilder.loadTexts: uppgInternalErrors.setDescription('An event that is generated when the UPPushPPG detects an internal error. uppgTrapInfo gives error name.')
services = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 5))
upMail = MibIdentifier((1, 3, 6, 1, 4, 1, 1900, 5, 1))
mibBuilder.exportSymbols("OPENWAVE-MIB", upiInitFailToStart=upiInitFailToStart, uplAgentLimitedResourceTable=uplAgentLimitedResourceTable, uplMessengerNtfnStatsTable=uplMessengerNtfnStatsTable, uplrNbRouterConnectionUp=uplrNbRouterConnectionUp, upsUPWebServiceUp=upsUPWebServiceUp, uplMessengerNtfnCacheEntry=uplMessengerNtfnCacheEntry, uplrsdHostName=uplrsdHostName, uplcrdIpAddress=uplcrdIpAddress, uplrdStartUpTime=uplrdStartUpTime, uplmdIpAddress=uplmdIpAddress, upldUplAgentsDisconnected=upldUplAgentsDisconnected, uplmDBMinConnections=uplmDBMinConnections, uplmdOpenAgentStreams=uplmdOpenAgentStreams, systems=systems, uplMessenger=uplMessenger, uppnsPAPIdentifier=uppnsPAPIdentifier, uplaRequestsDropped=uplaRequestsDropped, uplAgentTrapInfo=uplAgentTrapInfo, uppfnPPGFailedNtfns=uppfnPPGFailedNtfns, uplmhProcessId=uplmhProcessId, uppTrapInfo=uppTrapInfo, upiInitChildProcessEntry=upiInitChildProcessEntry, uplmnsAvgNtfnsAddedPerSec=uplmnsAvgNtfnsAddedPerSec, uplaSilentErrors=uplaSilentErrors, uplaErrorClass=uplaErrorClass, uplmhPrivateHTTPCurrentDispQueueLen=uplmhPrivateHTTPCurrentDispQueueLen, uplrssAuthenticationStatus=uplrssAuthenticationStatus, upipChildProcessExePath=upipChildProcessExePath, uplrssInterimAccMsgReceived=uplrssInterimAccMsgReceived, uppgdStartupTime=uppgdStartupTime, uplDispatcher=uplDispatcher, uplrsFailedToStart=uplrsFailedToStart, uplaKeyErrors=uplaKeyErrors, uplaRadiusClientStats=uplaRadiusClientStats, uplmhPublicHTTPSMaxDispQueueLength=uplmhPublicHTTPSMaxDispQueueLength, uplwProcessId=uplwProcessId, uplmDBConnectionCacheMeanWaitTime=uplmDBConnectionCacheMeanWaitTime, uplaOtherErrors=uplaOtherErrors, upipChildProcessType=upipChildProcessType, uplAgentWapErrorStatsSummaryTable=uplAgentWapErrorStatsSummaryTable, uplmnsIpAddress=uplmnsIpAddress, uplRadiusServerDescriptionEntry=uplRadiusServerDescriptionEntry, upiInitChildProcessTable=upiInitChildProcessTable, upPushPapDescriptionEntry=upPushPapDescriptionEntry, uplBillingMgrDescriptionEntry=uplBillingMgrDescriptionEntry, uplbfsFileDiskSpaceUsed=uplbfsFileDiskSpaceUsed, uplmAgentConnectionDown=uplmAgentConnectionDown, uplmDatabaseConnectionDown=uplmDatabaseConnectionDown, upiChildProcessesBelowMinimum=upiChildProcessesBelowMinimum, uplAgentDescriptionTable=uplAgentDescriptionTable, upsUPPimServiceNormal=upsUPPimServiceNormal, uplMessengerDescriptionTable=uplMessengerDescriptionTable, uplmhPublicHTTPSBusyThreads=uplmhPublicHTTPSBusyThreads, uplaHostName=uplaHostName, uplrProtocolError=uplrProtocolError, upPushPpgNtfnStatsTable=upPushPpgNtfnStatsTable, uplBillingMgrEventStatsEntry=uplBillingMgrEventStatsEntry, uplmhPublicHTTPOpenConnections=uplmhPublicHTTPOpenConnections, uplmhPrivateHTTPMaxConnections=uplmhPrivateHTTPMaxConnections, upitChildProcessHostName=upitChildProcessHostName, uplAgentAirLinkStatsTable=uplAgentAirLinkStatsTable, uppdPublicHTTPSPortNumber=uppdPublicHTTPSPortNumber, upldDatabaseConnectionDown=upldDatabaseConnectionDown, uplaEncryptedSessions=uplaEncryptedSessions, uppnsPublicHTTPReqReceived=uppnsPublicHTTPReqReceived, uplbesEventsReceived=uplbesEventsReceived, upPushPap=upPushPap, uplHdtpStats=uplHdtpStats, uplAgentWapWTPTransactionStatsTable=uplAgentWapWTPTransactionStatsTable, upipInitProcessId=upipInitProcessId, upiInitStats=upiInitStats, uppPPGInterfaceUp=uppPPGInterfaceUp, uplrssRasServiceAddress=uplrssRasServiceAddress, upsBookmarksServiceDown=upsBookmarksServiceDown, uppfnMessengerForwardedNtfns=uppfnMessengerForwardedNtfns, uplBillingMgrEventStatsTable=uplBillingMgrEventStatsTable, uplAgentTransactionStatsEntry=uplAgentTransactionStatsEntry, uppdPublicHTTPPortNumber=uppdPublicHTTPPortNumber, uplmnsNumStatusFound=uplmnsNumStatusFound, upldSubscribersLoaded=upldSubscribersLoaded, uplAgentStackServiceMeanTableItemsDeviation=uplAgentStackServiceMeanTableItemsDeviation, uplbfsArchiveDiskSpaceUsed=uplbfsArchiveDiskSpaceUsed, uplassAgentIdentifier=uplassAgentIdentifier, uplRadiusServerStatsTable=uplRadiusServerStatsTable, uplAgentWebAccessStatsEntry=uplAgentWebAccessStatsEntry, uplaWapAbortTransaction=uplaWapAbortTransaction, upAdminTrapInfo=upAdminTrapInfo, uplAgentWapTransactionStatsEntry=uplAgentWapTransactionStatsEntry, uplbdPortNumber=uplbdPortNumber, uplAgentWapErrorStatsDetailTable=uplAgentWapErrorStatsDetailTable, uplmhPublicHTTPSMaxThreads=uplmhPublicHTTPSMaxThreads, uppgTrapInfo=uppgTrapInfo, uplmnsNumDeleted=uplmnsNumDeleted, uplbfsArchiveFilePath=uplbfsArchiveFilePath, uppPublicHTTPSServiceDown=uppPublicHTTPSServiceDown, uplDispatcherTrapInfo=uplDispatcherTrapInfo, uplaSessionErrors=uplaSessionErrors, upPushPpgNtfnStatsEntry=upPushPpgNtfnStatsEntry, upipsChildProcessesStarted=upipsChildProcessesStarted, uppnsPrivateHTTPReqReceived=uppnsPrivateHTTPReqReceived, upidInitProcessType=upidInitProcessType, uplaStartUpTime=uplaStartUpTime, uplBillingMgr=uplBillingMgr, upsUPMailServiceDown=upsUPMailServiceDown, uplaasAgentIdentifier=uplaasAgentIdentifier, uplawssAgentIdentifier=uplawssAgentIdentifier, upipsInitProcessId=upipsInitProcessId, uplaDeviationOfTransactionLife=uplaDeviationOfTransactionLife, uplAgentErrorStatsSummaryTable=uplAgentErrorStatsSummaryTable, uplAgent=uplAgent, uplaWapResultTpdus=uplaWapResultTpdus, uppnsPublicHTTPSReqAccepted=uppnsPublicHTTPSReqAccepted, upipChildProcessState=upipChildProcessState, upsBookmarksServiceUp=upsBookmarksServiceUp, uplMessengerTrapInfo=uplMessengerTrapInfo, upldTrapInfo=upldTrapInfo, upipChildProcessStatus=upipChildProcessStatus, uplmnsPublicHTTPSReqProcessed=uplmnsPublicHTTPSReqProcessed, uplRadiusServerDescriptionTable=uplRadiusServerDescriptionTable, uppPAPResumed=uppPAPResumed, uppfnMessengerFailedNtfns=uppfnMessengerFailedNtfns, uplmdStartupTime=uplmdStartupTime, upipInitProcessType=upipInitProcessType, uplrssProcessId=uplrssProcessId, uplmhIpAddress=uplmhIpAddress, uplbfsCompressorName=uplbfsCompressorName, uppgdPPGIndex=uppgdPPGIndex, uplbesIpAddress=uplbesIpAddress, uplaOutOfResource=uplaOutOfResource, upsUPPimServiceDown=upsUPPimServiceDown, upPushPpg=upPushPpg, uplmnsNumAddRequests=uplmnsNumAddRequests, uplAgentRadiusClientStatsTable=uplAgentRadiusClientStatsTable, upldOutOfResouce=upldOutOfResouce, uplaWapErrorSeverity=uplaWapErrorSeverity, uplmdNumTxnProcessed=uplmdNumTxnProcessed, uplmInvalidConfig=uplmInvalidConfig, uppgdProcessId=uppgdProcessId, uplmnsNumStatusRequests=uplmnsNumStatusRequests, uplaHttpRequestsSucceeded=uplaHttpRequestsSucceeded, uplmnsPrivateHTTPReqProcessed=uplmnsPrivateHTTPReqProcessed, uplaesAgentIdentifier=uplaesAgentIdentifier, uplmdMaxMsgClientStreams=uplmdMaxMsgClientStreams, uplrHdtpStats=uplrHdtpStats, uplaActiveWapSessions=uplaActiveWapSessions, uplawtsAgentIdentifier=uplawtsAgentIdentifier, uplmdMsgServerPortNumber=uplmdMsgServerPortNumber, upldSuccessfulMappingHits=upldSuccessfulMappingHits, upsHomePageServiceUp=upsHomePageServiceUp, uplmDatabaseConnectionUp=uplmDatabaseConnectionUp, uplaStartup=uplaStartup, upipsInitIpAddr=upipsInitIpAddr, upldUplRadiusConnectionDown=upldUplRadiusConnectionDown, uplmhPublicHTTPMaxDispQueueLength=uplmhPublicHTTPMaxDispQueueLength, uplrsDatabaseConnectionDown=uplrsDatabaseConnectionDown, uppdPAPIndex=uppdPAPIndex, uplMessengerDescriptionEntry=uplMessengerDescriptionEntry, uplMessengerNtfnStatsEntry=uplMessengerNtfnStatsEntry, uplcrdUpdateInterval=uplcrdUpdateInterval, upidInitProcessId=upidInitProcessId, uplaOtherWapErrors=uplaOtherWapErrors, uplmncAvgNumOfPendingNtfnsPerSub=uplmncAvgNumOfPendingNtfnsPerSub, uplmhPublicHTTPSMaxConnections=uplmhPublicHTTPSMaxConnections, uplmhPrivateHTTPBusyThreads=uplmhPrivateHTTPBusyThreads, uppgdMsgServerPortNumber=uppgdMsgServerPortNumber, uplmnsProcessId=uplmnsProcessId, uplrdPortNumber=uplrdPortNumber, upLinkConfig=upLinkConfig, upLinkProcesses=upLinkProcesses, uppgnsAvgNtfnsMarkedUnDelvrPerSec=uppgnsAvgNtfnsMarkedUnDelvrPerSec, uppnsPublicHTTPReqAccepted=uppnsPublicHTTPReqAccepted, uplaSuccessfulMappingHits=uplaSuccessfulMappingHits, upldHostName=upldHostName, uplcrdRequestAllowance=uplcrdRequestAllowance, uplaHttpsRequestsSucceeded=uplaHttpsRequestsSucceeded, uppdProcessId=uppdProcessId, uplbfsBillingFilePath=uplbfsBillingFilePath, uplmShutdown=uplmShutdown, uppgnsPPGIndex=uppgnsPPGIndex, upPushPapNtfnStatsEntry=upPushPapNtfnStatsEntry, uplBillingMgrTrapInfo=uplBillingMgrTrapInfo, uppShutDown=uppShutDown, upiDatabaseConnectionDown=upiDatabaseConnectionDown, uplmhPublicHTTPMaxConnections=uplmhPublicHTTPMaxConnections, uplbBillingLogFileError=uplbBillingLogFileError, upipsChildProcessesRunning=upipsChildProcessesRunning, uplaRepliesTimedOut=uplaRepliesTimedOut, uplaHttpRequestsStarted=uplaHttpRequestsStarted, uplaSessionsStarted=uplaSessionsStarted, uplcrTrapInfo=uplcrTrapInfo, uplBillingMgrFileStatsEntry=uplBillingMgrFileStatsEntry, uplNbRouterDescriptionTable=uplNbRouterDescriptionTable, upAdmin=upAdmin, uppgdHostName=uppgdHostName, upldInvalidConfig=upldInvalidConfig, upsHomePageServiceDown=upsHomePageServiceDown, uplaInternalFatalErrors=uplaInternalFatalErrors, uplAgentAirLinkStatsEntry=uplAgentAirLinkStatsEntry, openwave=openwave, uplNbRouter=uplNbRouter, uplmnsNumDeleteRequests=uplmnsNumDeleteRequests, uppnsPAPIndex=uppnsPAPIndex, upldDatabaseConnectionUp=upldDatabaseConnectionUp, uplcrCertificateUpdateFailed=uplcrCertificateUpdateFailed, upiDatabaseConnectionUp=upiDatabaseConnectionUp, uplmnsAvgNtfnsExpiredPerSec=uplmnsAvgNtfnsExpiredPerSec, upsPushServiceDown=upsPushServiceDown, uplAgentStackServiceWDPPortNumber=uplAgentStackServiceWDPPortNumber, uplaTransactionErrors=uplaTransactionErrors, uplWap=uplWap, uplAgentTransactionStatsTable=uplAgentTransactionStatsTable, uplwHostName=uplwHostName, uplmnsNumAdded=uplmnsNumAdded, upipsChildProcessesDied=upipsChildProcessesDied, upsUPMailServiceNormal=upsUPMailServiceNormal, uplRadiusServerTrapInfo=uplRadiusServerTrapInfo, uplaErrorCount=uplaErrorCount, uplaMeanTransactionLife=uplaMeanTransactionLife, uplDispatcherStats=uplDispatcherStats, uplbBillingDirectTransferError=uplbBillingDirectTransferError, uplcrdProcessId=uplcrdProcessId, uplrBillingLogError=uplrBillingLogError, uplaDatabaseConnectionUp=uplaDatabaseConnectionUp, uplRadiusServerStatsEntry=uplRadiusServerStatsEntry, uppgStartup=uppgStartup, uplaFaxMgrConnectionDown=uplaFaxMgrConnectionDown, uplmhPrivateHTTPMaxDispQueueLength=uplmhPrivateHTTPMaxDispQueueLength, uplmStackServiceStats=uplmStackServiceStats, uplrdProcessId=uplrdProcessId, uplrDatabaseConnectionUp=uplrDatabaseConnectionUp, uplAgentStackServiceEntry=uplAgentStackServiceEntry, uplaMeanRetriesPerThousandTxn=uplaMeanRetriesPerThousandTxn, uplbShutdown=uplbShutdown, uplMessengerHTTPStatsTable=uplMessengerHTTPStatsTable, upsUPAdminServiceDown=upsUPAdminServiceDown, upiInitDescriptionTable=upiInitDescriptionTable, uplbDiskSpaceLow=uplbDiskSpaceLow, upPushPpgTrapInfo=upPushPpgTrapInfo, uplAgentWebAccessStatsTable=uplAgentWebAccessStatsTable, uplaTransactionWapErrors=uplaTransactionWapErrors, upPushPpgDescriptionEntry=upPushPpgDescriptionEntry, uplaDynamicUpdateStarted=uplaDynamicUpdateStarted, uppPAPSuspended=uppPAPSuspended, uplCertRequesterTrapInfo=uplCertRequesterTrapInfo, uplaHttpsDeviationOfResponseTime=uplaHttpsDeviationOfResponseTime, uppPrivateHTTPServiceDown=uppPrivateHTTPServiceDown, uplbDatabaseConnectionDown=uplbDatabaseConnectionDown, uplmPublicHTTPSServiceDown=uplmPublicHTTPSServiceDown, uppdPAPIdentifier=uppdPAPIdentifier, upldState=upldState, upPushPapTrapInfo=upPushPapTrapInfo, upldUplAgentsLoaded=upldUplAgentsLoaded, uppdStartupTime=uppdStartupTime, uppfnPAPIndex=uppfnPAPIndex)
mibBuilder.exportSymbols("OPENWAVE-MIB", uplmhPublicHTTPTimesDispQueueFull=uplmhPublicHTTPTimesDispQueueFull, uplaTransactionsSucceeded=uplaTransactionsSucceeded, uplAgentWapErrorStatsSummaryEntry=uplAgentWapErrorStatsSummaryEntry, upsProxyServiceUp=upsProxyServiceUp, uplaKeyExchanges=uplaKeyExchanges, uplmhPublicHTTPSOpenConnections=uplmhPublicHTTPSOpenConnections, upiInitShutdown=upiInitShutdown, uplMessengerAirlinkStatsTable=uplMessengerAirlinkStatsTable, uplmdHostName=uplmdHostName, uplrSMSCConnectionUp=uplrSMSCConnectionUp, uplaRepliesDelivered=uplaRepliesDelivered, uplrsdProcessId=uplrsdProcessId, uplrClientIpAddress=uplrClientIpAddress, upldPortNumber=upldPortNumber, uppfnPAPIdentifier=uppfnPAPIdentifier, uplcrInvalidCertResponse=uplcrInvalidCertResponse, uplrdIpAddress=uplrdIpAddress, upPushPapForwardedNtfnsEntry=upPushPapForwardedNtfnsEntry, upipChildProcessIpAddr=upipChildProcessIpAddr, uplrStartup=uplrStartup, uplmnsAvgNtfnsMarkedUnDelvrPerSec=uplmnsAvgNtfnsMarkedUnDelvrPerSec, uplBillingMgrFileStatsTable=uplBillingMgrFileStatsTable, uplmnsNumExpired=uplmnsNumExpired, uplmStartup=uplmStartup, uplAgentStackServiceName=uplAgentStackServiceName, uplwCLIDMappingError=uplwCLIDMappingError, upgFailToStart=upgFailToStart, uplmnsPublicHTTPReqReceived=uplmnsPublicHTTPReqReceived, uplaWapErrorCode=uplaWapErrorCode, upldUplAgentConnectionDown=upldUplAgentConnectionDown, uplDispatcherDescription=uplDispatcherDescription, upipChildProcessStartTime=upipChildProcessStartTime, upsUPPimServiceUp=upsUPPimServiceUp, uplaTotalMappingTableHits=uplaTotalMappingTableHits, uplawsAgentIdentifier=uplawsAgentIdentifier, uplrInternalError=uplrInternalError, uplAgentSessionStatsEntry=uplAgentSessionStatsEntry, uplaTrapInfo=uplaTrapInfo, upldUplAgentId=upldUplAgentId, uplAgentStackServiceMeanTableItems=uplAgentStackServiceMeanTableItems, uplaBillingInitError=uplaBillingInitError, upsUPAdminServiceUp=upsUPAdminServiceUp, uppStartup=uppStartup, uplrssIpAddress=uplrssIpAddress, uplaHttpMeanResponseTime=uplaHttpMeanResponseTime, upsProxyServiceNormal=upsProxyServiceNormal, uplNbRouterAirlinkTable=uplNbRouterAirlinkTable, uppFailToStart=uppFailToStart, uplaRequestsReceived=uplaRequestsReceived, uplmnsPublicHTTPSReqReceived=uplmnsPublicHTTPSReqReceived, uplbStartup=uplbStartup, uplaDynamicUpdateStopped=uplaDynamicUpdateStopped, uplAgentStackServiceMeanGarbageCollectTimeDeviation=uplAgentStackServiceMeanGarbageCollectTimeDeviation, uplaTotalErrors=uplaTotalErrors, upipChildProcessHostName=upipChildProcessHostName, uppnsAvgNtfnsAcceptedPerSec=uppnsAvgNtfnsAcceptedPerSec, uplaProcessId=uplaProcessId, uplrTrapInfo=uplrTrapInfo, uplmHdtpStats=uplmHdtpStats, upiChildProcessFailToStart=upiChildProcessFailToStart, upldStartup=upldStartup, uplwTrapInfo=uplwTrapInfo, upsProxyServiceSlow=upsProxyServiceSlow, uplrsTrapInfo=uplrsTrapInfo, uplaDeviationOfResponseTime=uplaDeviationOfResponseTime, uplrsdPortNumber=uplrsdPortNumber, uplaHttpsRequestsStarted=uplaHttpsRequestsStarted, upiAllChildProcessesRestart=upiAllChildProcessesRestart, uplaProtoSessions=uplaProtoSessions, uplRadiusServer=uplRadiusServer, uplmhPublicHTTPTimesAllThreadsBusy=uplmhPublicHTTPTimesAllThreadsBusy, upldRequestsReceived=upldRequestsReceived, uplaMeanResponseTime=uplaMeanResponseTime, uplaSessionsSucceeded=uplaSessionsSucceeded, uplMessengerNtfnCacheTable=uplMessengerNtfnCacheTable, uplAgentStackServiceLoaded=uplAgentStackServiceLoaded, uppnsPrivateHTTPReqAccepted=uppnsPrivateHTTPReqAccepted, uplmhPublicHTTPSTimesAllThreadsBusy=uplmhPublicHTTPSTimesAllThreadsBusy, upidInitStartupTime=upidInitStartupTime, uplrsdIpAddress=uplrsdIpAddress, uplaBillingLogError=uplaBillingLogError, upsProxyServiceDown=upsProxyServiceDown, uplmhPrivateHTTPOpenConnections=uplmhPrivateHTTPOpenConnections, uppDatabaseConnectionDown=uppDatabaseConnectionDown, uplmdPublicHTTPPortNumber=uplmdPublicHTTPPortNumber, uplCertRequesterDescriptionEntry=uplCertRequesterDescriptionEntry, uplbTrapInfo=uplbTrapInfo, uppnsPublicHTTPSReqReceived=uppnsPublicHTTPSReqReceived, upiChildProcessShutdown=upiChildProcessShutdown, uplrdHostName=uplrdHostName, uplmncTotalNumOfPendingNtfns=uplmncTotalNumOfPendingNtfns, uplbDiskSpaceCriticallyLow=uplbDiskSpaceCriticallyLow, upipInitIpAddr=upipInitIpAddr, uplAgentErrorStatsDetailEntry=uplAgentErrorStatsDetailEntry, uplarcsAgentIdentifier=uplarcsAgentIdentifier, uplrClientHostName=uplrClientHostName, uplrClientProcessId=uplrClientProcessId, uplrSMSCConnectionDown=uplrSMSCConnectionDown, upsBookmarksServiceNormal=upsBookmarksServiceNormal, uplAgentStackServiceMeanBucketChainLength=uplAgentStackServiceMeanBucketChainLength, uplmhPrivateHTTPTimesAllThreadsBusy=uplmhPrivateHTTPTimesAllThreadsBusy, uplmhPublicHTTPSTimesDispQueueFull=uplmhPublicHTTPSTimesDispQueueFull, uplbdIpAddress=uplbdIpAddress, uplAgentStackServiceAdaptorThreads=uplAgentStackServiceAdaptorThreads, uplaTransactionsActive=uplaTransactionsActive, uplmdPrivateHTTPPortNumber=uplmdPrivateHTTPPortNumber, upldRequestsDropped=upldRequestsDropped, uplmDBConnectionCacheThreadWaits=uplmDBConnectionCacheThreadWaits, uplrBillingInitError=uplrBillingInitError, uplbfsMaxBillingFileSize=uplbfsMaxBillingFileSize, uplbdHostName=uplbdHostName, upPushPapNtfnStatsTable=upPushPapNtfnStatsTable, upiNoChildProcess=upiNoChildProcess, uplbdProcessId=uplbdProcessId, upsUPPimServiceSlow=upsUPPimServiceSlow, uplNbRouterAirlinkStatsTable=uplNbRouterAirlinkStatsTable, upldUplAgentConnectionUp=upldUplAgentConnectionUp, uplAgentStackServiceMeanBucketChainLengthDeviation=uplAgentStackServiceMeanBucketChainLengthDeviation, uplbdStartupTime=uplbdStartupTime, uplrNbRouterConnectionDown=uplrNbRouterConnectionDown, uplmnsAvgNtfnsDeliveredPerSec=uplmnsAvgNtfnsDeliveredPerSec, uppInternalErrors=uppInternalErrors, uplmnsPrivateHTTPReqReceived=uplmnsPrivateHTTPReqReceived, uplaDeviationOfRetriesPTTxn=uplaDeviationOfRetriesPTTxn, uplaHttpDeviationOfResponseTime=uplaHttpDeviationOfResponseTime, uplaWapErrorName=uplaWapErrorName, upldFailedMappingHits=upldFailedMappingHits, uplrsBillingLogError=uplrsBillingLogError, upsUPWebServiceSlow=upsUPWebServiceSlow, upipChildProcessStopTime=upipChildProcessStopTime, uplWapTrapInfo=uplWapTrapInfo, uppgInternalErrors=uppgInternalErrors, upiInitDescriptionEntry=upiInitDescriptionEntry, uplcrStartup=uplcrStartup, uplmncIpAddress=uplmncIpAddress, upiInitChildProcessStatsEntry=upiInitChildProcessStatsEntry, uplaActiveSessions=uplaActiveSessions, upipChildProcessExeArgs=upipChildProcessExeArgs, uplAgentDescriptionEntry=uplAgentDescriptionEntry, uplaSessionWapErrors=uplaSessionWapErrors, uplrShutdown=uplrShutdown, upitChildProcessId=upitChildProcessId, uplrsBillingInitError=uplrsBillingInitError, uplaDeviceErrors=uplaDeviceErrors, uplAgentStackServiceTableSize=uplAgentStackServiceTableSize, uplmdPublicHTTPSPortNumber=uplmdPublicHTTPSPortNumber, uppPublicHTTPServiceStarted=uppPublicHTTPServiceStarted, uplAgentWapSessionStatsEntry=uplAgentWapSessionStatsEntry, upitChildProcessType=upitChildProcessType, uplrssIpMsisdnPairsUpdated=uplrssIpMsisdnPairsUpdated, uplaErrorCode=uplaErrorCode, uppgDatabaseConnectionDown=uppgDatabaseConnectionDown, uplaWapSessionsStarted=uplaWapSessionsStarted, upiChildProcessStart=upiChildProcessStart, upldStartUpTime=upldStartUpTime, uppdPrivateHTTPPortNumber=uppdPrivateHTTPPortNumber, uppPublicHTTPServiceDown=uppPublicHTTPServiceDown, uplbesEventLogFailures=uplbesEventLogFailures, uplNbRouterDescriptionEntry=uplNbRouterDescriptionEntry, uplmPublicHTTPServiceStarted=uplmPublicHTTPServiceStarted, uplAgentStackServiceTableMeanNumberItemsGarbageCollected=uplAgentStackServiceTableMeanNumberItemsGarbageCollected, uplAgentWapWSPSessionStatsTable=uplAgentWapWSPSessionStatsTable, upidInitHostName=upidInitHostName, uplbfsIpAddress=uplbfsIpAddress, uplmdProcessId=uplmdProcessId, uplmncProcessId=uplmncProcessId, upsUPWebServiceNormal=upsUPWebServiceNormal, uplaShutdown=uplaShutdown, uplmhPublicHTTPSCurrentDispQueueLen=uplmhPublicHTTPSCurrentDispQueueLen, upldShutdown=upldShutdown, uppPPGInterfaceDown=uppPPGInterfaceDown, upPushPpgDescriptionTable=upPushPpgDescriptionTable, uplasstAgentIdentifier=uplasstAgentIdentifier, upMail=upMail, uplaHttpsMeanResponseTime=uplaHttpsMeanResponseTime, uplrsdStartupTime=uplrsdStartupTime, uplAgentWapErrorStatsDetailEntry=uplAgentWapErrorStatsDetailEntry, uplmhPublicHTTPMaxThreads=uplmhPublicHTTPMaxThreads, uplAgentStackServiceIdentifier=uplAgentStackServiceIdentifier, uplaDatabaseConnectionDown=uplaDatabaseConnectionDown, upidInitVersion=upidInitVersion, uplmhPublicHTTPBusyThreads=uplmhPublicHTTPBusyThreads, upPushProxy=upPushProxy, uplrStackServiceStats=uplrStackServiceStats, uplAgentStackServiceMeanGarbageCollectTime=uplAgentStackServiceMeanGarbageCollectTime, upLinkStaticInfo=upLinkStaticInfo, uplAgentProxyStats=uplAgentProxyStats, uplNbRouterTrapInfo=uplNbRouterTrapInfo, uplmnsPublicHTTPReqProcessed=uplmnsPublicHTTPReqProcessed, uplrClientConnectionDown=uplrClientConnectionDown, upsBookmarksServiceSlow=upsBookmarksServiceSlow, uplmPrivateHTTPServiceDown=uplmPrivateHTTPServiceDown, uppgnsPPGIdentifier=uppgnsPPGIdentifier, uplaFailedMappingHits=uplaFailedMappingHits, uplaErrorSeverity=uplaErrorSeverity, upipChildProcessId=upipChildProcessId, uplaWapStats=uplaWapStats, uppdHostName=uppdHostName, uplawesAgentIdentifier=uplawesAgentIdentifier, uplAgentStackServiceTable=uplAgentStackServiceTable, uplrClientConnectionUp=uplrClientConnectionUp, uplmnsSignalsSent=uplmnsSignalsSent, uplcrdStartupTime=uplcrdStartupTime, uplbesDirectTransferFailures=uplbesDirectTransferFailures, uplbesProcessId=uplbesProcessId, uplAgentRadiusClientStatsEntry=uplAgentRadiusClientStatsEntry, upipsChildProcessType=upipsChildProcessType, uplaInvalidConfig=uplaInvalidConfig, uppgnsAvgNtfnsDeliveredPerSec=uppgnsAvgNtfnsDeliveredPerSec, uplaErrorName=uplaErrorName, uplMessengerAirlinkTable=uplMessengerAirlinkTable, services=services, uppgdPPGIdentifier=uppgdPPGIdentifier, uplaWapErrorCount=uplaWapErrorCount, uplrssStartAccMsgReceived=uplrssStartAccMsgReceived, uplaMessengerConnectionDown=uplaMessengerConnectionDown, uplaAgentIdentifier=uplaAgentIdentifier, upsUPMailServiceUp=upsUPMailServiceUp, upiInit=upiInit, upiAllChildProcessesStop=upiAllChildProcessesStop, uplbDiskSpaceError=uplbDiskSpaceError, uppgnsTotalNumOfPendingNtfns=uppgnsTotalNumOfPendingNtfns, uplaRequestsNotValid=uplaRequestsNotValid, uplaTotalWapErrors=uplaTotalWapErrors, uplatsAgentIdentifier=uplatsAgentIdentifier, upiInitTrapInfo=upiInitTrapInfo, uplAgentErrorStatsDetailTable=uplAgentErrorStatsDetailTable, uplcrFatalError=uplcrFatalError, upldProcessId=upldProcessId, upsUPWebServiceDown=upsUPWebServiceDown, uplaWapInvokeTpdus=uplaWapInvokeTpdus, DisplayString=DisplayString, upLink=upLink, uplAgentStackServiceTableMeanNumberItemsGarbageCollectedDeviatn=uplAgentStackServiceTableMeanNumberItemsGarbageCollectedDeviatn, uplrssStopAccMsgReceived=uplrssStopAccMsgReceived, uppnsAvgNtfnsReceivedPerSec=uppnsAvgNtfnsReceivedPerSec, uppfnPPGForwardedNtfns=uppfnPPGForwardedNtfns, uplrDatabaseConnectionDown=uplrDatabaseConnectionDown, uplmhPrivateHTTPMaxThreads=uplmhPrivateHTTPMaxThreads, upldTotalMappingTableHits=upldTotalMappingTableHits, uplmhPrivateHTTPTimesDispQueueFull=uplmhPrivateHTTPTimesDispQueueFull, uplAgentSessionStatsTable=uplAgentSessionStatsTable, uplrssIpMsisdnPairsDeleted=uplrssIpMsisdnPairsDeleted, upPushPapDescriptionTable=upPushPapDescriptionTable, uplmDBConnectionCacheDeviationOfWaitTime=uplmDBConnectionCacheDeviationOfWaitTime, upPushPapForwardedNtfnsTable=upPushPapForwardedNtfnsTable, uplmTrapInfo=uplmTrapInfo, upipsInitProcessType=upipsInitProcessType, uplbfsProcessId=uplbfsProcessId, uplmnsCompletedNotifications=uplmnsCompletedNotifications, upsHomePageServiceSlow=upsHomePageServiceSlow, uplmDBMaxConnections=uplmDBMaxConnections, upitTrapInfo=upitTrapInfo, uplmPublicHTTPServiceDown=uplmPublicHTTPServiceDown)
mibBuilder.exportSymbols("OPENWAVE-MIB", uplaedAgentIdentifier=uplaedAgentIdentifier, upidInitIpAddr=upidInitIpAddr, uplAgentStackServiceAppProtoName=uplAgentStackServiceAppProtoName, upiInitChildProcessStatsTable=upiInitChildProcessStatsTable, uplaweAgentIdentifier=uplaweAgentIdentifier, uplCertRequester=uplCertRequester, uplcrdHostName=uplcrdHostName, uplmInternalErrors=uplmInternalErrors, upldKeyExchanges=upldKeyExchanges, upsPushServiceUp=upsPushServiceUp, uplmhPublicHTTPCurrentDispQueueLen=uplmhPublicHTTPCurrentDispQueueLen, uplBillingMgrDescriptionTable=uplBillingMgrDescriptionTable, uplaWapErrorClass=uplaWapErrorClass, upsUPMailServiceSlow=upsUPMailServiceSlow, uplDispRadiusClientStats=uplDispRadiusClientStats, uplMessengerHTTPStatsEntry=uplMessengerHTTPStatsEntry, uplrsStartup=uplrsStartup, uplCertRequesterDescriptionTable=uplCertRequesterDescriptionTable, uplAgentErrorStatsSummaryEntry=uplAgentErrorStatsSummaryEntry, upsTrapInfo=upsTrapInfo, uplrssIpMsisdnPairsInserted=uplrssIpMsisdnPairsInserted, uplaTransactionsStarted=uplaTransactionsStarted, uplaRequestsDuplicated=uplaRequestsDuplicated, uplaStackServiceStats=uplaStackServiceStats)
| 138.181299 | 11,912 | 0.787729 |
4a23555eff788fa6188ab6355ad62a5e2404a9ab | 23,524 | py | Python | appengine/swarming/swarming_bot/api/platforms/win.py | hixio-mh/luci-py | 5e2606b0db8f5dac25e9f84fdf71d103da393894 | [
"Apache-2.0"
] | null | null | null | appengine/swarming/swarming_bot/api/platforms/win.py | hixio-mh/luci-py | 5e2606b0db8f5dac25e9f84fdf71d103da393894 | [
"Apache-2.0"
] | 1 | 2022-03-02T11:45:18.000Z | 2022-03-02T11:45:18.000Z | appengine/swarming/swarming_bot/api/platforms/win.py | hixio-mh/luci-py | 5e2606b0db8f5dac25e9f84fdf71d103da393894 | [
"Apache-2.0"
] | 1 | 2021-07-23T15:32:55.000Z | 2021-07-23T15:32:55.000Z | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Windows specific utility functions."""
import ctypes
import logging
import os
import platform
import re
import string
import subprocess
import sys
import six
from utils import tools
from api.platforms import common
from api.platforms import gpu
## Private stuff.
_WIN32_CLIENT_NAMES = {
u'5.0': u'2000',
u'5.1': u'XP',
u'5.2': u'XP',
u'6.0': u'Vista',
u'6.1': u'7',
u'6.2': u'8',
u'6.3': u'8.1',
u'10.0': u'10',
}
_WIN32_SERVER_NAMES = {
u'5.2': u'2003Server',
u'6.0': u'2008Server',
u'6.1': u'2008ServerR2',
u'6.2': u'2012Server',
u'6.3': u'2012ServerR2',
u'10.0': u'Server',
}
@tools.cached
def _get_mount_points():
"""Returns the list of 'fixed' drives in format 'X:\\'."""
ctypes.windll.kernel32.GetDriveTypeW.argtypes = (ctypes.c_wchar_p,)
ctypes.windll.kernel32.GetDriveTypeW.restype = ctypes.c_ulong
DRIVE_FIXED = 3
# https://msdn.microsoft.com/library/windows/desktop/aa364939.aspx
return [
u'%s:\\' % letter
for letter in string.ascii_lowercase
if ctypes.windll.kernel32.GetDriveTypeW(letter + ':\\') == DRIVE_FIXED
]
def _get_disk_info(mount_point):
"""Returns total and free space on a mount point in Mb."""
total_bytes = ctypes.c_ulonglong(0)
free_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(mount_point), None, ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes))
return {
u'free_mb': round(free_bytes.value / 1024. / 1024., 1),
u'size_mb': round(total_bytes.value / 1024. / 1024., 1),
}
@tools.cached
def _get_win32com():
"""Returns an uninitialized WMI client."""
try:
import pythoncom
from win32com import client # pylint: disable=F0401
return client, pythoncom
except ImportError:
# win32com is included in pywin32, which is an optional package that is
# installed by Swarming devs. If you find yourself needing it to run without
# pywin32, for example in cygwin, please send us a CL with the
# implementation that doesn't use pywin32.
return None, None
@tools.cached
def _get_wmi_wbem():
"""Returns a WMI client connected to localhost ready to do queries."""
client, _ = _get_win32com()
if not client:
return None
wmi_service = client.Dispatch('WbemScripting.SWbemLocator')
return wmi_service.ConnectServer('.', 'root\\cimv2')
@tools.cached
def _get_wmi_wbem_for_storage():
"""
Returns a WMI client connected to localhost ready to do queries for storage.
"""
client, pythoncom = _get_win32com()
if not client:
return None
wmi_service = client.Dispatch('WbemScripting.SWbemLocator')
try:
return wmi_service.ConnectServer('.', 'Root\\Microsoft\\Windows\\Storage')
except pythoncom.com_error:
return None
# Regexp for _get_os_numbers()
_CMD_RE = r'\[version (\d+\.\d+)\.(\d+(?:\.\d+|))\]'
@tools.cached
def _get_os_numbers():
"""Returns the normalized OS version and build numbers as strings.
Actively work around AppCompat version lie shim.
Returns:
- 5.1, 6.1, etc. There is no way to distinguish between Windows 7
and Windows Server 2008R2 since they both report 6.1.
- build number, like '10240'. Mostly relevant on Windows 10.
"""
# Windows is lying to us until python adds to its manifest:
# <supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
# and it doesn't.
# So ask nicely to cmd.exe instead, which will always happily report the right
# version. Here's some sample output:
# - XP: Microsoft Windows XP [Version 5.1.2600]
# - Win10: Microsoft Windows [Version 10.0.10240]
# - Win7 or Win2K8R2: Microsoft Windows [Version 6.1.7601]
# - Win1709: Microsoft Windows [Version 10.0.16299.19]
#
# Some locale (like fr_CA) use a lower case 'version'.
out = subprocess.check_output(['cmd.exe', '/c', 'ver']).strip().decode()
match = re.search(_CMD_RE, out, re.IGNORECASE)
if not match:
# Failed to start cmd.exe, that's really bad. Return a dummy value to not
# crash.
logging.error('Failed to run cmd.exe /c ver:\n%s', out)
return '0.0', '0'
return match.group(1), match.group(2)
def _is_topmost_window(hwnd):
"""Returns True if |hwnd| is a topmost window."""
ctypes.windll.user32.GetWindowLongW.restype = ctypes.c_long # LONG
ctypes.windll.user32.GetWindowLongW.argtypes = [
ctypes.c_void_p, # HWND
ctypes.c_int
]
# -20 is GWL_EXSTYLE
ex_styles = ctypes.windll.user32.GetWindowLongW(hwnd, -20)
# 8 is WS_EX_TOPMOST
return bool(ex_styles & 8)
def _get_window_class(hwnd):
"""Returns the class name of |hwnd|."""
ctypes.windll.user32.GetClassNameW.restype = ctypes.c_int
ctypes.windll.user32.GetClassNameW.argtypes = [
ctypes.c_void_p, # HWND
ctypes.c_wchar_p,
ctypes.c_int
]
name = ctypes.create_unicode_buffer(257)
name_len = ctypes.windll.user32.GetClassNameW(hwnd, name, len(name))
if name_len <= 0 or name_len >= len(name):
raise ctypes.WinError(descr='GetClassNameW failed; %s' %
ctypes.FormatError())
return name.value
## Public API.
def from_cygwin_path(path):
"""Converts an absolute cygwin path to a standard Windows path."""
if not path.startswith('/cygdrive/'):
logging.error('%s is not a cygwin path', path)
return None
# Remove the cygwin path identifier.
path = path[len('/cygdrive/'):]
# Add : after the drive letter.
path = path[:1] + ':' + path[1:]
return path.replace('/', '\\')
def to_cygwin_path(path):
"""Converts an absolute standard Windows path to a cygwin path."""
if len(path) < 2 or path[1] != ':':
# TODO(maruel): Accept \\?\ and \??\ if necessary.
logging.error('%s is not a win32 path', path)
return None
return '/cygdrive/%s/%s' % (path[0].lower(), path[3:].replace('\\', '/'))
@tools.cached
def get_os_version_number():
"""Returns the normalized OS version number as a string.
Returns:
- '5.1', '6.1', '10.0', etc. There is no way to distinguish between Windows
7 and Windows Server 2008R2 since they both report 6.1.
"""
return _get_os_numbers()[0]
@tools.cached
def get_client_versions():
"""Gets the client versions (or client equivalent for server).
Returns:
A list of client versions (or client equivalent for server).
E.g. '10' for Windows 10 and Windows Server 2016.
"""
version_nubmer = get_os_version_number()
if version_nubmer in _WIN32_CLIENT_NAMES:
return [_WIN32_CLIENT_NAMES[version_nubmer]]
return []
@tools.cached
def get_os_version_names():
"""Returns the marketing/user-friendly names of the OS.
The return value contains the base marketing name, e.g. Vista, 10, or
2008Server. For Windows Server starting with 2016, this value is always
"Server".
For versions released before Windows 10, the return value also contains the
name with the service pack, e.g. 7-SP1 or 2012ServerR2-SP0.
For Windows 10 and Windows Server starting with 2016, the return value
includes "10-" or "Server-" followed by one or more parts of the build number.
E.g. for Windows 10 with build number 18362.207, the return value includes
10-18362, 10-18362.207. For Windows Server 2019 with build number 17763.557,
the return value includes Server-17763, Server-17763.557.
"""
# Python keeps a local map in platform.py and it is updated at newer python
# release. Since our python release is a bit old, do not rely on it.
is_server = sys.getwindowsversion().product_type != 1
lookup = _WIN32_SERVER_NAMES if is_server else _WIN32_CLIENT_NAMES
version_number, build_number = _get_os_numbers()
marketing_name = lookup.get(version_number, version_number)
if version_number == u'10.0':
rv = [marketing_name]
# Windows 10 doesn't have service packs, the build number now is the
# reference number. More discussion in
# https://docs.google.com/document/d/1iF1tbc1oedCQ9J6aL7sHeuaayY3bs52fuvKxvLLZ0ig
if '.' in build_number:
major_version = build_number.split(u'.')[0]
rv.append(u'%s-%s' % (marketing_name, major_version))
rv.append(u'%s-%s' % (marketing_name, build_number))
rv.sort()
return rv
service_pack = platform.win32_ver()[2] or u'SP0'
return [marketing_name, u'%s-%s' % (marketing_name, service_pack)]
def get_startup_dir():
# Do not use environment variables since it wouldn't work reliably on cygwin.
# TODO(maruel): Stop hardcoding the values and use the proper function
# described below. Postponed to a later CL since I'll have to spend quality
# time on Windows to ensure it works well.
# https://msdn.microsoft.com/library/windows/desktop/bb762494.aspx
# CSIDL_STARTUP = 7
# https://msdn.microsoft.com/library/windows/desktop/bb762180.aspx
# shell.SHGetFolderLocation(NULL, CSIDL_STARTUP, NULL, NULL, string)
if get_os_version_number() == u'5.1':
startup = 'Start Menu\\Programs\\Startup'
else:
# Vista+
startup = (
'AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup')
# On cygwin 1.5, which is still used on some bots, '~' points inside
# c:\\cygwin\\home so use USERPROFILE.
return '%s\\%s\\' % (
os.environ.get('USERPROFILE', 'DUMMY, ONLY USED IN TESTS'), startup)
def get_disks_info():
"""Returns disk infos on all mount point in Mb."""
return {p: _get_disk_info(p) for p in _get_mount_points()}
@tools.cached
def get_audio():
"""Returns audio device as listed by WMI."""
wbem = _get_wmi_wbem()
if not wbem:
return None
# https://msdn.microsoft.com/library/aa394463.aspx
return [
device.Name
for device in wbem.ExecQuery('SELECT * FROM Win32_SoundDevice')
if device.Status == 'OK'
]
@tools.cached
def get_visual_studio_versions():
"""Retrieves all installed Visual Studio versions.
The returned version list is sorted such that the first element is the highest
version number.
Returns:
A list of Visual Studio version strings.
"""
from six.moves import winreg
try:
k = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Wow6432Node\\Microsoft\\VSCommon')
# pylint: disable=undefined-variable
except WindowsError:
return None
try:
versions = []
for i in range(winreg.QueryInfoKey(k)[0]):
sub_key = winreg.EnumKey(k, i)
if re.match(r'\d+\.\d+', sub_key):
versions.append(sub_key)
return sorted(versions, key=float, reverse=True)
finally:
k.Close()
@tools.cached
def get_cpuinfo():
# Ironically, the data returned by WMI is mostly worthless.
# Another option is IsProcessorFeaturePresent().
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms724482.aspx
from six.moves import winreg
k = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0')
try:
identifier, _ = winreg.QueryValueEx(k, 'Identifier')
match = re.match(r'^.+ Family (\d+) Model (\d+) Stepping (\d+)$',
identifier)
name, _ = winreg.QueryValueEx(k, 'ProcessorNameString')
vendor, _ = winreg.QueryValueEx(k, 'VendorIdentifier')
return {
u'model': [
int(match.group(1)),
int(match.group(2)),
int(match.group(3))
],
u'name':
name,
u'vendor':
vendor,
}
finally:
k.Close()
def get_cpu_type_with_wmi():
# Get CPU architecture type using WMI.
# This is a fallback for when platform.machine() returns None.
# References:
# https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-processor#properties
# https://source.winehq.org/source/include/winnt.h#L680
# https://github.com/puppetlabs/facter/blob/2.x/lib/facter/hardwaremodel.rb#L28
wbem = _get_wmi_wbem()
if not wbem:
return None
_, pythoncom = _get_win32com()
try:
q = 'SELECT Architecture, Level, AddressWidth FROM Win32_Processor'
for cpu in wbem.ExecQuery(q):
def intel_arch():
arch_level = min(cpu.Level, 6)
return 'i%d86' % arch_level # e.g. i386, i686
if cpu.Architecture == 10: # PROCESSOR_ARCHITECTURE_IA32_ON_WIN64
return 'i686'
if cpu.Architecture == 9: # PROCESSOR_ARCHITECTURE_AMD64
if cpu.AddressWidth == 32:
return intel_arch()
return 'amd64'
if cpu.Architecture == 0: # PROCESSOR_ARCHITECTURE_INTEL
return intel_arch()
except pythoncom.com_error as e:
# This generally happens when this is called as the host is shutting down.
logging.error('get_cpu_type_with_wmi(): %s', e)
# Unknown or exception.
return None
def get_gpu():
"""Returns video device as listed by WMI.
Not cached as the GPU driver may change underneat.
"""
wbem = _get_wmi_wbem()
if not wbem:
return None, None
_, pythoncom = _get_win32com()
dimensions = set()
state = set()
# https://msdn.microsoft.com/library/aa394512.aspx
try:
for device in wbem.ExecQuery('SELECT * FROM Win32_VideoController'):
# The string looks like:
# PCI\VEN_15AD&DEV_0405&SUBSYS_040515AD&REV_00\3&2B8E0B4B&0&78
pnp_string = device.PNPDeviceID
ven_id = u'UNKNOWN'
dev_id = u'UNKNOWN'
match = re.search(r'VEN_([0-9A-F]{4})', pnp_string)
if match:
ven_id = match.group(1).lower()
match = re.search(r'DEV_([0-9A-F]{4})', pnp_string)
if match:
dev_id = match.group(1).lower()
dev_name = device.VideoProcessor or u''
version = device.DriverVersion or u''
ven_name, dev_name = gpu.ids_to_names(
ven_id, u'Unknown', dev_id, dev_name)
dimensions.add(ven_id)
dimensions.add(u'%s:%s' % (ven_id, dev_id))
if version:
dimensions.add(u'%s:%s-%s' % (ven_id, dev_id, version))
state.add(u'%s %s %s' % (ven_name, dev_name, version))
else:
state.add(u'%s %s' % (ven_name, dev_name))
except pythoncom.com_error as e:
# This generally happens when this is called as the host is shutting down.
logging.error('get_gpu(): %s', e)
return sorted(dimensions), sorted(state)
@tools.cached
def get_integrity_level():
"""Returns the integrity level of the current process as a string.
TODO(maruel): It'd be nice to make it work on cygwin. The problem is that
ctypes.windll is unaccessible and it is not known to the author how to use
stdcall convention through ctypes.cdll.
"""
if get_os_version_number() == u'5.1':
# Integrity level is Vista+.
return None
mapping = {
0x0000: u'untrusted',
0x1000: u'low',
0x2000: u'medium',
0x2100: u'medium high',
0x3000: u'high',
0x4000: u'system',
0x5000: u'protected process',
}
# This was specifically written this way to work on cygwin except for the
# windll part. If someone can come up with a way to do stdcall on cygwin, that
# would be appreciated.
BOOL = ctypes.c_long
DWORD = ctypes.c_ulong
HANDLE = ctypes.c_void_p
class SID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
('Sid', ctypes.c_void_p),
('Attributes', DWORD),
]
class TOKEN_MANDATORY_LABEL(ctypes.Structure):
_fields_ = [
('Label', SID_AND_ATTRIBUTES),
]
TOKEN_READ = DWORD(0x20008)
# Use the same casing as in the C declaration:
# https://msdn.microsoft.com/library/windows/desktop/aa379626.aspx
TokenIntegrityLevel = ctypes.c_int(25)
ERROR_INSUFFICIENT_BUFFER = 122
# All the functions used locally. First open the process' token, then query
# the SID to know its integrity level.
ctypes.windll.kernel32.GetLastError.argtypes = ()
ctypes.windll.kernel32.GetLastError.restype = DWORD
ctypes.windll.kernel32.GetCurrentProcess.argtypes = ()
ctypes.windll.kernel32.GetCurrentProcess.restype = ctypes.c_void_p
ctypes.windll.advapi32.OpenProcessToken.argtypes = (HANDLE, DWORD,
ctypes.POINTER(HANDLE))
ctypes.windll.advapi32.OpenProcessToken.restype = BOOL
ctypes.windll.advapi32.GetTokenInformation.argtypes = (HANDLE, ctypes.c_long,
ctypes.c_void_p, DWORD,
ctypes.POINTER(DWORD))
ctypes.windll.advapi32.GetTokenInformation.restype = BOOL
ctypes.windll.advapi32.GetSidSubAuthorityCount.argtypes = [ctypes.c_void_p]
ctypes.windll.advapi32.GetSidSubAuthorityCount.restype = ctypes.POINTER(
ctypes.c_ubyte)
ctypes.windll.advapi32.GetSidSubAuthority.argtypes = (ctypes.c_void_p, DWORD)
ctypes.windll.advapi32.GetSidSubAuthority.restype = ctypes.POINTER(DWORD)
# First open the current process token, query it, then close everything.
token = ctypes.c_void_p()
proc_handle = ctypes.windll.kernel32.GetCurrentProcess()
if not ctypes.windll.advapi32.OpenProcessToken(proc_handle, TOKEN_READ,
ctypes.byref(token)):
logging.error('Failed to get process\' token')
return None
if token.value == 0:
logging.error('Got a NULL token')
return None
try:
# The size of the structure is dynamic because the TOKEN_MANDATORY_LABEL
# used will have the SID appened right after the TOKEN_MANDATORY_LABEL in
# the heap allocated memory block, with .Label.Sid pointing to it.
info_size = DWORD()
if ctypes.windll.advapi32.GetTokenInformation(token, TokenIntegrityLevel,
ctypes.c_void_p(), info_size,
ctypes.byref(info_size)):
logging.error('GetTokenInformation() failed expectation')
return None
if info_size.value == 0:
logging.error('GetTokenInformation() returned size 0')
return None
if ctypes.windll.kernel32.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
logging.error('GetTokenInformation(): Unknown error: %d',
ctypes.windll.kernel32.GetLastError())
return None
token_info = TOKEN_MANDATORY_LABEL()
ctypes.resize(token_info, info_size.value)
if not ctypes.windll.advapi32.GetTokenInformation(
token, TokenIntegrityLevel, ctypes.byref(token_info), info_size,
ctypes.byref(info_size)):
logging.error(
'GetTokenInformation(): Unknown error with buffer size %d: %d',
info_size.value,
ctypes.windll.kernel32.GetLastError())
return None
p_sid_size = ctypes.windll.advapi32.GetSidSubAuthorityCount(
token_info.Label.Sid)
res = ctypes.windll.advapi32.GetSidSubAuthority(
token_info.Label.Sid, p_sid_size.contents.value - 1)
value = res.contents.value
return mapping.get(value) or u'0x%04x' % value
finally:
ctypes.windll.kernel32.CloseHandle(token)
@tools.cached
def get_physical_ram():
"""Returns the amount of installed RAM in Mb, rounded to the nearest number.
"""
# https://msdn.microsoft.com/library/windows/desktop/aa366589.aspx
class MemoryStatusEx(ctypes.Structure):
_fields_ = [
('dwLength', ctypes.c_ulong),
('dwMemoryLoad', ctypes.c_ulong),
('dwTotalPhys', ctypes.c_ulonglong),
('dwAvailPhys', ctypes.c_ulonglong),
('dwTotalPageFile', ctypes.c_ulonglong),
('dwAvailPageFile', ctypes.c_ulonglong),
('dwTotalVirtual', ctypes.c_ulonglong),
('dwAvailVirtual', ctypes.c_ulonglong),
('dwAvailExtendedVirtual', ctypes.c_ulonglong),
]
stat = MemoryStatusEx()
stat.dwLength = ctypes.sizeof(MemoryStatusEx) # pylint: disable=W0201
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
return int(round(stat.dwTotalPhys / 1024. / 1024.))
def get_uptime():
"""Return uptime for Windows 7 and later.
Excludes sleep time.
"""
val = ctypes.c_ulonglong(0)
if ctypes.windll.kernel32.QueryUnbiasedInterruptTime(ctypes.byref(val)) != 0:
return val.value / 10000000.
return 0.
def get_reboot_required():
"""Returns True if the system should be rebooted to apply updates.
This is not guaranteed to notice all conditions that could require reboot.
"""
# Based on https://stackoverflow.com/a/45717438
k = None
from six.moves import winreg
try:
k = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\WindowsUpdate\\'
'Auto Update\\RebootRequired')
_, num_values, _ = winreg.QueryInfoKey(k)
return num_values > 0
except WindowsError: # pylint: disable=undefined-variable
# This error very likely means the RebootRequired key does not exist,
# meaning reboot is not required.
return False
finally:
if k:
k.Close()
@tools.cached
def get_ssd():
"""Returns a list of SSD disks."""
wbem = _get_wmi_wbem_for_storage()
if not wbem:
return ()
# https://docs.microsoft.com/en-us/previous-versions/windows/desktop/stormgmt/msft-physicaldisk
try:
return sorted(
d.DeviceId for d in wbem.ExecQuery('SELECT * FROM MSFT_PhysicalDisk')
if d.MediaType == 4
)
except AttributeError:
return ()
def list_top_windows():
"""Returns a list of the class names of topmost windows.
Windows owned by the shell are ignored.
"""
# The function prototype of EnumWindowsProc.
window_enum_proc_prototype = ctypes.WINFUNCTYPE(
ctypes.c_long, # BOOL
ctypes.c_void_p, # HWND
ctypes.c_void_p) # LPARAM
# Set up various user32 functions that are needed.
ctypes.windll.user32.EnumWindows.restype = ctypes.c_long # BOOL
ctypes.windll.user32.EnumWindows.argtypes = [
window_enum_proc_prototype,
ctypes.py_object
]
ctypes.windll.user32.IsWindowVisible.restype = ctypes.c_long # BOOL
ctypes.windll.user32.IsWindowVisible.argtypes = [ctypes.c_void_p] # HWND
ctypes.windll.user32.IsIconic.restype = ctypes.c_long # BOOL
ctypes.windll.user32.IsIconic.argtypes = [ctypes.c_void_p] # HWND
out = []
def on_window(hwnd, lparam): # pylint: disable=unused-argument
"""Evaluates |hwnd| to determine whether or not it is a topmost window.
In case |hwnd| is a topmost window, its class name is added to the
collection of topmost window class names to return.
"""
# Dig deeper into visible, non-iconified, topmost windows.
if (ctypes.windll.user32.IsWindowVisible(hwnd) and
not ctypes.windll.user32.IsIconic(hwnd) and
_is_topmost_window(hwnd)):
# Fetch the class name and make sure it's not owned by the Windows shell.
class_name = _get_window_class(hwnd)
if (class_name and
class_name not in ['Button', 'Shell_TrayWnd',
'Shell_SecondaryTrayWnd']):
out.append(class_name)
return 1
ctypes.windll.user32.EnumWindows(window_enum_proc_prototype(on_window), None)
return out
@tools.cached
def get_computer_system_info():
"""Return a named tuple, which lists the following params from the WMI class
Win32_ComputerSystemProduct:
name, vendor, version, uuid
"""
wbem = _get_wmi_wbem()
if not wbem:
return None
info = None
# https://msdn.microsoft.com/en-us/library/aa394105
for device in wbem.ExecQuery('SELECT * FROM Win32_ComputerSystemProduct'):
info = common.ComputerSystemInfo(
name=device.Name,
vendor=device.Vendor,
version=device.Version,
serial=device.IdentifyingNumber)
return info
| 33.462304 | 97 | 0.682622 |
4a2355645635ae3f52184b9127fc3590ce385342 | 7,621 | py | Python | desertbot/ircbase.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-20T17:10:10.000Z | 2021-11-17T18:58:04.000Z | desertbot/ircbase.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 109 | 2015-08-20T13:16:35.000Z | 2022-01-21T19:40:35.000Z | desertbot/ircbase.py | Helle-Daryd/DesertBot | 0b497db135a4c08dfbdb59108f830ba12fdc6465 | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-03-29T05:55:01.000Z | 2021-02-05T19:19:39.000Z | """
Copyright © 2012-2014 Desert Bus for Hope Engineering Team
Copyright © 2015-2020 Jacob Riddle (ElementalAlchemist)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL JONAS OBRIST BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import unicodedata
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from twisted.protocols.basic import LineOnlyReceiver
class ModeType(Enum):
LIST = 0,
PARAM_SET = 1,
PARAM_SET_UNSET = 2,
NO_PARAM = 3
# Taken from txircd:
# https://github.com/ElementalAlchemist/txircd/blob/26dd2ee9d21b846cbd33cd5bd6e8abe7df712034/txircd/ircbase.py
class IRCBase(LineOnlyReceiver):
delimiter = b"\n" # Default to splitting by \n, and then we'll also split \r in the handler
def lineReceived(self, data: bytes) -> None:
for lineRaw in data.split(b"\r"):
line = lineRaw.decode("utf-8", "replace")
line = unicodedata.normalize("NFC", line)
command, params, prefix, tags = self._parseLine(line)
if command:
self.handleCommand(command, params, prefix, tags)
def _parseLine(self, line: str) -> Union[Tuple[str, List[str], str, Dict[str, Optional[str]]], Tuple[None, None, None, None]]:
line = line.replace("\0", "")
if not line:
return None, None, None, None
if line[0] == "@":
if " " not in line:
return None, None, None, None
tagLine, line = line.split(" ", 1)
tags = self._parseTags(tagLine[1:])
else:
tags = {}
prefix = None
if line[0] == ":":
if " " not in line:
return None, None, None, None
prefix, line = line.split(" ", 1)
prefix = prefix[1:]
if " :" in line:
linePart, lastParam = line.split(" :", 1)
else:
linePart = line
lastParam = None
if not linePart:
return None, None, None, None
if " " in linePart:
command, paramLine = linePart.split(" ", 1)
params = paramLine.split(" ")
else:
command = linePart
params = []
while "" in params:
params.remove("")
if lastParam is not None:
params.append(lastParam)
return command.upper(), params, prefix, tags
def _parseTags(self, tagLine: str) -> Dict[str, Optional[str]]:
tags = {}
for tagval in tagLine.split(";"):
if not tagval:
continue
if "=" in tagval:
tag, escapedValue = tagval.split("=", 1)
escaped = False
valueChars = []
for char in escapedValue:
if escaped:
if char == "\\":
valueChars.append("\\")
elif char == ":":
valueChars.append(";")
elif char == "r":
valueChars.append("\r")
elif char == "n":
valueChars.append("\n")
elif char == "s":
valueChars.append(" ")
else:
valueChars.append(char)
escaped = False
continue
if char == "\\":
escaped = True
continue
valueChars.append(char)
value = "".join(valueChars)
else:
tag = tagval
value = None
tags[tag] = value
return tags
def handleCommand(self, command: str, params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> None:
pass
def sendMessage(self, command: str, *params: str, **kw: Any) -> None:
if "tags" in kw:
tags = self._buildTagString(kw["tags"])
else:
tags = None
if "prefix" in kw:
prefix = kw["prefix"]
else:
prefix = None
if "alwaysPrefixLastParam" in kw:
alwaysPrefixLastParam = kw["alwaysPrefixLastParam"]
else:
alwaysPrefixLastParam = False
params = list(params)
if params:
for param in params[:-1]:
for badChar in (" ", "\r", "\n", "\0"):
if badChar in param:
raise ValueError("Illegal character {!r} found in parameter {!r}".format(badChar, param))
if param and param[0] == ":":
raise ValueError("Parameter {!r} formatted like a final parameter, but it isn't last".format(param))
for badChar in ("\r", "\n", "\0"):
if badChar in params[-1]:
raise ValueError("Illegal character {!r} found in parameter {!r}".format(badChar, params[-1]))
if alwaysPrefixLastParam or not params[-1] or " " in params[-1] or params[-1][0] == ":":
params[-1] = ":{}".format(params[-1])
lineToSend = ""
if tags:
lineToSend += "@{} ".format(tags)
if prefix:
lineToSend += ":{} ".format(prefix)
lineToSend += "{} {}".format(command, " ".join(params))
self.sendLine(lineToSend.replace("\0", ""))
def _buildTagString(self, tags: Dict[str, Optional[str]]) -> str:
tagList = []
for tag, value in tags.items():
for char in tag:
if not char.isalnum() and char not in ("-", "/", "."):
raise ValueError("Illegal character {!r} found in key {!r}".format(char, tag))
if value is None:
tagList.append(tag)
else:
if "\0" in value:
raise ValueError("Illegal character '\\0' found in value for key {!r}".format(tag))
escapedValue = value.replace("\\", "\\\\").replace(";", "\\:").replace(" ", "\\s").replace("\r", "\\r").replace("\n", "\\n")
tagList.append("{}={}".format(tag, escapedValue))
return ";".join(tagList)
def sendLine(self, line: str) -> None:
self.transport.write("{}\r\n".format(line).encode("utf-8"))
| 40.973118 | 140 | 0.552421 |
4a23557cb86462fac2fb1d4e34038074dd9e18b5 | 823 | py | Python | redbot/syntax/__init__.py | Malvoz/redbot | 0edef8d4efefddde49d36cd97e471fc187837169 | [
"MIT"
] | 167 | 2015-01-07T16:34:56.000Z | 2022-02-20T15:20:06.000Z | redbot/syntax/__init__.py | Malvoz/redbot | 0edef8d4efefddde49d36cd97e471fc187837169 | [
"MIT"
] | 180 | 2015-02-01T01:37:53.000Z | 2022-02-17T04:32:01.000Z | redbot/syntax/__init__.py | Malvoz/redbot | 0edef8d4efefddde49d36cd97e471fc187837169 | [
"MIT"
] | 32 | 2015-05-20T21:00:13.000Z | 2022-02-16T10:14:15.000Z | import re
import sys
import types
__all__ = [
"rfc3986",
"rfc5234",
"rfc5322",
"rfc5646",
"rfc5987",
"rfc5988",
"rfc7230",
"rfc7231",
"rfc7232",
"rfc7233",
"rfc7234",
"rfc7235",
]
def check_regex() -> None:
"""Grab all the regex in this module."""
for module_name in __all__:
full_name = "redbot.syntax.%s" % module_name
__import__(full_name)
module = sys.modules[full_name]
for attr_name in dir(module):
attr_value = getattr(module, attr_name, None)
if isinstance(attr_value, bytes):
try:
re.compile(attr_value, re.VERBOSE)
except re.error as why:
print("*", module_name, attr_name, why)
if __name__ == "__main__":
check_regex()
| 21.657895 | 59 | 0.55407 |
4a23570c3a08e4a952749b028739cc9c4be0d158 | 152 | py | Python | __Courses__/Python - Introduction to Python Programming - Udacity/C2. Data Structures/format().py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Courses__/Python - Introduction to Python Programming - Udacity/C2. Data Structures/format().py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | __Courses__/Python - Introduction to Python Programming - Udacity/C2. Data Structures/format().py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | length, width, height = 52, 40, 100
print("The dimensions are {{ {} x {} x {} }}".format(length, width, height))
# The dimensions are { 52 x 40 x 100 } | 38 | 76 | 0.625 |
4a235755796edca8737a15a588797c2c676659e1 | 3,482 | py | Python | strings.py | fossabot/one_poker_bot | 5dbe36ba590ba6da8035f42b9a36348052473ca2 | [
"Unlicense"
] | null | null | null | strings.py | fossabot/one_poker_bot | 5dbe36ba590ba6da8035f42b9a36348052473ca2 | [
"Unlicense"
] | 1 | 2018-12-17T18:15:24.000Z | 2018-12-17T18:15:24.000Z | strings.py | fossabot/one_poker_bot | 5dbe36ba590ba6da8035f42b9a36348052473ca2 | [
"Unlicense"
] | 1 | 2018-12-17T18:13:05.000Z | 2018-12-17T18:13:05.000Z | # -*- encoding: utf 8 -*-
class Strings:
TOKEN = "___YOURBOTSTOKEN___" #write here your bot's token
GREETINGS = "`Welcome to One Poker. If you wish to participate press the Play button. `"
RULES = "One Poker is a game in which two people play following these rules:\n1. Both of you will receive 10 lives, you can bet a minimum of 1 life in each round and a maximum of the lives you have at that moment.\n2.The game uses three decks of poker without the joker card, when . \n 3. Each player will receive 2 cards and will have in his hand always 2 cards. \n 4. The value of the cards is determined by its number, being DOWN cards: 2, 3, 4, 5, 6, 7 and UP cards: 8, 9, 10, J, Q, K, A. \n 5. The winning card is always the highest with the exception of the 2 that wins the Ace. \n 6. Once players receive their cards, both of them will be informed if they have UP or DOWN cards in their hand. \n 7. The game ends when one of the players loses all the lives.\n "
HELP = "Here is a list of all the commands Mother Sophie has:\n /disclaimer\n /freeasinfreedom - Link to the code in github.\n help - This command.\n /participate - Starts a new game.\n /quit - Players cannot quit One Poker irl but here you can.\n /rules\n /scores - Display how the game goes so far, including cards, victories and bets. \n /start\n /status - UPs and DOWNS\n /zawa\n"
DISCLAIMER = "I do not own any of names or refences made of the manga neither the original gameplay idea of One Poker."
FREEDOM = "If you want a copy of the code, you can have it in my github: https://github.com/cryogenic-dreams/one_poker_bot/"
NOT_STARTED = "`Game has not started yet.`"
ENTRY = "`Entry completed.\n%s: 10 Lives.`"
GAME_STARTS = "`The game will start now.\nEach player will receive two cards.`"
ALREADY = "`There are already two people playing.`"
ROUND = "`Hand %i:`"
CARDS = "`These are your cards.`"
SELECT_CARD = "`Please select your card.`"
MENU0 = "`Choose from the menu.`"
MENU1 = "Select 1st card"
MENU2 = "Select 2nd card"
CARD_SELECTED = "`{}\'s card received.`"
CARD_SELECTED2 = "`{} has already selected a card.`"
SELECTION_COMPLETED = "`Card selection has been completed.\nNow proceeding to betting phase.`"
QUESTION = "`Check or bet?`"
NOT_PLAYER = "`You are not a player.`"
INPUT_BET = "`Input lives to bet.`"
CHECK = "`Both players check.\nNow revealing the cards.`"
P_FOLDS = "`%s folds.`"
P_CHECKS = "`%s checks.`"
P_CALLS = "`%s calls.`"
LIVES_BET = "`%s bets %i lives.`"
CALL_RAISE_FOLD = "`Call, raise or fold?`"
BET_COMPL = "`Betting complete.`"
BET2 = "`The betting phase has been completed.`"
WIN = "`The winner is %s.`"
P_QUITS = "`%s quits. Game ends.`"
END = "`Game ends.`"
R_LIFE = "`Will you bet your own life?`"
R_GAME = "`A red life has been detected.\n Now registering...\nGame extension confirmed! Red life accepted!\n%s\'s lives now set to one.\nNow engaging the game extension.`"
SCORES = "`Hand %i:\n[%s(bet: %i)-%s | %s(bet:%i)-%s]\nRESULT: %s`"
#for fun
ZAWAZAWA = "ざわ... ざわ..."
DOTS = "💬"
NATURAL = "It\'s only natural."
MEAN = "What does it mean to gamble? What does it..."
PRESSURE = "THE PRESSURE... it\'s crushing!"
BUT = "But..."
STILL = "Still!"
KUYASHII = "KUYASHII"
CATCH = [ZAWAZAWA, NATURAL, MEAN, KUYASHII, STILL, BUT, PRESSURE, ZAWAZAWA, ZAWAZAWA, DOTS]#extra zawas, just because
ZAWA_ON = "`Zawa mode switched ON`"
ZAWA_OFF = "`Zawa mode switched OFF`"
NOISE = "*kzzt*"
| 64.481481 | 770 | 0.681218 |
4a2357760196e3ef5e13d8da80d279e39cc98f4d | 659 | py | Python | 6-NLP/1-Introduction-to-NLP/bot.py | GDaglio/ML-For-Beginners | 0b7ad551455490d0ca46f6de1469ee64ee08fb10 | [
"MIT"
] | null | null | null | 6-NLP/1-Introduction-to-NLP/bot.py | GDaglio/ML-For-Beginners | 0b7ad551455490d0ca46f6de1469ee64ee08fb10 | [
"MIT"
] | null | null | null | 6-NLP/1-Introduction-to-NLP/bot.py | GDaglio/ML-For-Beginners | 0b7ad551455490d0ca46f6de1469ee64ee08fb10 | [
"MIT"
] | null | null | null | import random
random_responses = ["That is quite interesting, please tell me more.",
"I see. Do go on.",
"Why do you say that?",
"Funny weather we've been having, isn't it?",
"Let's change the subject.",
"Did you catch the game last night?"]
num_replies = len(random_responses)
user_input = ""
print("Hi, I am a bot!")
while user_input != "stop":
user_input = input()
if user_input != "stop":
reply_idx = random.randint(0, num_replies)
print(random_responses[reply_idx])
else:
print("User has stopped interacting") | 29.954545 | 70 | 0.564492 |
4a23599aba3f23af75e4b7bbe17d84f2f2a857e0 | 8,801 | py | Python | src/dieter/parser.py | catseye/Dieter | 8824138c150db2ec6f03559461fd5b60500a54b7 | [
"BSD-3-Clause"
] | null | null | null | src/dieter/parser.py | catseye/Dieter | 8824138c150db2ec6f03559461fd5b60500a54b7 | [
"BSD-3-Clause"
] | null | null | null | src/dieter/parser.py | catseye/Dieter | 8824138c150db2ec6f03559461fd5b60500a54b7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
parser.py -- parser for the Dieter programming language.
$Id: parser.py 382 2010-01-28 23:40:43Z cpressey $
"""
import dieter.ast as ast
class Parser(object):
"""
A recursive-descent parser for Dieter.
"""
def __init__(self, scanner):
"""
Creates a new Parser object. The passed-in scanner is expected
to be compatible with a Scanner object.
"""
self.scanner = scanner
def Dieter(self):
program = ast.Program()
while self.scanner.token in ["order", "module", "forward"]:
if self.scanner.token == "order":
ordering = self.Ordering()
program.add_ordering(ordering)
elif self.scanner.token == "module":
module = self.Module()
program.add_module(module)
elif self.scanner.token == "forward":
forward = self.Forward()
program.add_forward(forward)
else:
self.error("expected order, module or forward, found " +
self.scanner.token)
return program
def Ordering(self):
self.scanner.expect("order")
qual1 = self.scanner.grab()
self.scanner.expect("<")
qual2 = self.scanner.grab()
ordering = ast.Ordering(qual1, qual2)
return ordering
def Module(self):
self.scanner.expect("module")
name = self.scanner.grab()
fails = False
if self.scanner.token == "fails":
self.scanner.expect("fails")
fails = True
module = ast.Module(name, fails)
while self.scanner.token == "var":
self.scanner.expect("var")
vdecl = self.VarDecl()
module.add_local(vdecl)
while self.scanner.token == "procedure":
pdecl = self.ProcDecl()
module.add_proc(pdecl)
self.scanner.expect("end")
return module
def Forward(self):
self.scanner.expect("forward")
name = self.scanner.grab()
type_expr = ast.ProcTypeExpr([], None)
self.scanner.expect("(")
if self.scanner.token != ")":
arg_type_expr = self.TypeExpr()
type_expr.add_arg_type_expr(arg_type_expr)
while self.scanner.token == ",":
self.scanner.expect(",")
arg_type_expr = self.TypeExpr()
type_expr.add_arg_type_expr(arg_type_expr)
self.scanner.expect(")")
self.scanner.expect(":")
type_expr.return_type_expr = self.TypeExpr()
fwd = ast.FwdDecl(name, type_expr)
return fwd
def VarDecl(self):
name = self.scanner.grab()
self.scanner.expect(":")
type_expr = self.TypeExpr()
var = ast.VarDecl(name, type_expr)
return var
def ProcDecl(self):
self.scanner.expect("procedure")
name = self.scanner.grab()
proc = ast.ProcDecl(name)
self.scanner.expect("(")
if self.scanner.token != ")":
arg = self.VarDecl()
proc.add_arg(arg)
while self.scanner.token == ",":
self.scanner.expect(",")
arg = self.VarDecl()
proc.add_arg(arg)
self.scanner.expect(")")
self.scanner.expect(":")
type_expr = self.TypeExpr()
proc.set_return_type_expr(type_expr)
while self.scanner.token == "var":
self.scanner.expect("var")
vdecl = self.VarDecl()
proc.add_local(vdecl)
stmt = self.Statement()
proc.set_body(stmt)
return proc
def Statement(self):
stmt = None
if self.scanner.token == "begin":
stmt = ast.CompoundStatement()
self.scanner.expect("begin")
while self.scanner.token != "end":
step = self.Statement()
stmt.add_step(step)
self.scanner.expect("end")
elif self.scanner.token == "if":
self.scanner.expect("if")
test = self.Expr()
self.scanner.expect("then")
then_stmt = self.Statement()
else_stmt = None
if self.scanner.token == "else":
self.scanner.expect("else")
else_stmt = self.Statement()
stmt = ast.IfStatement(test, then_stmt, else_stmt)
elif self.scanner.token == "while":
self.scanner.expect("while")
test = self.Expr()
self.scanner.expect("do")
body = self.Statement()
stmt = ast.WhileStatement(test, body)
elif self.scanner.token == "return":
self.scanner.expect("return")
if self.scanner.token == "final":
self.scanner.expect("final")
expr = self.Expr()
stmt = ast.ReturnStatement(expr)
else:
name = self.scanner.grab()
if self.scanner.token == "(":
self.scanner.expect("(")
stmt = ast.CallStatement(name)
if self.scanner.token != ")":
expr = self.Expr()
stmt.add_arg(expr)
while self.scanner.token == ",":
self.scanner.expect(",")
expr = self.Expr()
stmt.add_arg(expr)
self.scanner.expect(")")
else:
stmt = ast.AssignStatement(name)
if self.scanner.token == "[":
self.scanner.expect("[")
index = self.Expr()
stmt.set_index(index)
self.scanner.expect("]")
self.scanner.expect(":=")
expr = self.Expr()
stmt.set_expr(expr)
return stmt
def Expr(self):
expr = None
if self.scanner.token == "(":
self.scanner.expect("(")
expr = self.Expr()
self.scanner.expect(")")
elif self.scanner.token == "bestow":
self.scanner.expect("bestow")
name = self.scanner.grab()
sub = self.Expr()
expr = ast.BestowExpr(name, sub)
elif self.scanner.token == "super":
self.scanner.expect("super")
expr = ast.SuperExpr()
elif self.scanner.toktype == "int":
value = self.scanner.tokval
self.scanner.grab()
expr = ast.IntConstExpr(value)
elif self.scanner.toktype == "string":
value = self.scanner.tokval
self.scanner.grab()
expr = ast.StringConstExpr(value)
else:
name = self.scanner.grab()
if self.scanner.token == "(":
expr = ast.CallExpr(name)
self.scanner.expect("(")
if self.scanner.token != ")":
sub = self.Expr()
expr.add_arg(sub)
while self.scanner.token == ",":
self.scanner.expect(",")
sub = self.Expr()
expr.add_arg(sub)
self.scanner.expect(")")
else:
expr = ast.VarRefExpr(name)
if self.scanner.token == "[":
self.scanner.expect("[")
index = self.Expr()
expr.set_index(index)
self.scanner.expect("]")
return expr
def TypeExpr(self):
quals = []
# XXX would be better to have 'forward qualifier'
while self.scanner.token not in [
u"void", u"bool", u"int", u"rat", u"string", u"ref", u"map", u"♥"
]:
name = self.scanner.grab()
quals.append(name)
type_expr = self.BareTypeExpr()
for qual in quals:
type_expr = ast.QualifiedTypeExpr(qual, type_expr)
return type_expr
def BareTypeExpr(self):
token = self.scanner.token
if token in ["void", "bool", "int", "rat", "string","ref"]:
self.scanner.scan()
return ast.PrimitiveTypeExpr(token)
elif token == "map":
self.scanner.scan()
from_type_expr = None
if self.scanner.token == "from":
self.scanner.expect("from")
from_type_expr = self.TypeExpr()
self.scanner.expect("to")
to_type_expr = self.TypeExpr()
return ast.MapTypeExpr(to_type_expr, from_type_expr)
elif token == u"♥":
self.scanner.scan()
name = self.scanner.grab()
return ast.TypeVariableExpr(name)
else:
self.scanner.error("expected valid type expression")
| 35.631579 | 81 | 0.506533 |
4a2359d79ee49c96a85c8acb885798b60827137c | 30,416 | py | Python | chat_downloader/sites/facebook.py | Leodmanx2/chat-downloader | 285da713f57c099840ddf89d492549821c891fda | [
"MIT"
] | 306 | 2021-02-03T12:19:14.000Z | 2022-03-31T02:07:20.000Z | chat_downloader/sites/facebook.py | z1001123/chat-downloader | ef68e28ac5898ed3e424ca1b9fbbbe73377cbb1e | [
"MIT"
] | 79 | 2021-02-03T11:48:29.000Z | 2022-03-28T19:14:18.000Z | chat_downloader/sites/facebook.py | z1001123/chat-downloader | ef68e28ac5898ed3e424ca1b9fbbbe73377cbb1e | [
"MIT"
] | 53 | 2021-02-09T21:55:11.000Z | 2022-03-28T21:17:40.000Z | from .common import (
Chat,
BaseChatDownloader,
Remapper as r,
Image
)
from ..utils.core import (
remove_prefixes,
multi_get,
seconds_to_time,
camel_case_split,
ensure_seconds,
attempts,
regex_search,
base64_encode,
)
from ..utils.timed_utils import interruptible_sleep
from ..errors import (
SiteError,
VideoUnavailable,
LoginRequired
)
from ..debugging import (log, debug_log)
import json
import re
from json.decoder import JSONDecodeError
from requests.exceptions import RequestException
class FacebookError(SiteError):
"""Raised when an error occurs with a Facebook video."""
pass
class RateLimitError(FacebookError):
"""Raised when the user has been rate-limited."""
pass
class FacebookChatDownloader(BaseChatDownloader):
_FB_HOMEPAGE = 'https://www.facebook.com'
_INITIAL_DATR_REGEX = r'_js_datr\",\"([^\"]+)'
_INITIAL_LSD_REGEX = r'<input.*?name=\"lsd\".*?value=\"([^\"]+)[^>]*>'
def __init__(self, **kwargs):
super().__init__(**kwargs)
# update headers for all subsequent FB requests
self.update_session_headers({
'Content-Type': 'application/x-www-form-urlencoded',
'Referer': self._FB_HOMEPAGE, # Required'
})
initial_data = self._session_get(self._FB_HOMEPAGE).text
datr = regex_search(initial_data, self._INITIAL_DATR_REGEX)
if not datr:
raise FacebookError(f'Unable to set datr cookie: {initial_data}')
self.set_cookie_value('.facebook.com', 'datr', datr)
self.set_cookie_value('.facebook.com', 'wd', '1920x1080')
lsd = regex_search(initial_data, self._INITIAL_LSD_REGEX)
if not lsd:
raise FacebookError(f'Unable to set lsd cookie: {initial_data}')
self.lsd = lsd
self.update_session_headers({
'x-fb-lsd': lsd,
'upgrade-insecure-requests': '1',
'cache-control': 'max-age=0'
})
_NAME = 'facebook.com'
# Regex provided by youtube-dl
_VALID_URLS = {
'_get_chat_by_video_id': r'''(?x)
(?:
https?://
(?:[\w-]+\.)?(?:facebook\.com)/
(?:[^#]*?\#!/)?
(?:[^/]+/videos/(?:[^/]+/)?|video\.php\?v=)
)
(?P<id>[0-9]+)
'''
}
_TESTS = [
{
'name': 'Get chat messages from past gaming broadcast',
'params': {
'url': 'https://www.facebook.com/disguisedtoast/videos/3629284013844544/',
'max_messages': 100
},
'expected_result': {
'messages_condition': lambda messages: 0 < len(messages) <= 100,
}
},
{
'name': 'Get chat messages from gaming clip',
'params': {
'url': 'https://www.facebook.com/disguisedtoast/videos/1170480696709027/',
'max_messages': 100
},
'expected_result': {
'messages_condition': lambda messages: 0 < len(messages) <= 100,
}
},
{
'name': 'Get chat messages from short gaming video',
'params': {
'url': 'https://www.facebook.com/disguisedtoast/videos/333201981735004/',
'max_messages': 100
},
'expected_result': {
'messages_condition': lambda messages: 0 < len(messages) <= 100,
}
},
{
'name': 'Get chat messages from long gaming video',
'params': {
'url': 'https://www.facebook.com/disguisedtoast/videos/918814568681983/',
'start_time': 60,
'end_time': 150
},
'expected_result': {
'messages_condition': lambda messages: len(messages) > 0,
}
},
{
'name': 'Get chat messages from video page',
'params': {
'url': 'https://www.facebook.com/video.php?v=570133851026337',
'max_messages': 100
},
'expected_result': {
'messages_condition': lambda messages: 0 < len(messages) <= 100,
}
},
{
'name': 'Get chat messages from short video',
'params': {
'url': 'https://www.facebook.com/338233632988842/videos/958020308373031',
'max_messages': 100
},
'expected_result': {
'messages_condition': lambda messages: 0 < len(messages) <= 100,
}
},
{
'name': 'Get chat messages from past broadcast',
'params': {
'url': 'https://www.facebook.com/kolaolootulive/videos/553617129001138/',
'start_time': 567,
'end_time': 1234
},
'expected_result': {
'messages_condition': lambda messages: len(messages) > 0,
}
},
# Check for errors
{
'name': 'Video unavailable or private',
'params': {
'url': 'https://www.facebook.com/SRAVS.Gaming/videos/512714596679251/',
},
'expected_result': {
'error': VideoUnavailable,
}
},
]
_GRAPH_API = _FB_HOMEPAGE + '/api/graphql/'
def _graphql_request(self, program_params, retry_on_error=True, **post_kwargs):
data = {
'av': '0',
'__user': '0',
'__a': '1',
'__comet_req': '1',
'lsd': self.lsd,
'server_timestamps': 'true',
'__csr': '',
'dpr': '1',
'__ccg': 'MODERATE',
}
data.update(post_kwargs.pop('data', {}))
post_kwargs['data'] = data
max_attempts = program_params.get('max_attempts')
for attempt_number in attempts(max_attempts):
try:
response = self._session_post(self._GRAPH_API, **post_kwargs)
response_json = response.json()
# Check for errors
for error in response_json.get('errors') or []:
if error.get('code') == 1675004:
raise RateLimitError(f'Rate limit exceeded: {error}')
return response_json
except JSONDecodeError as e:
self.retry(attempt_number, error=e, **program_params,
text=f'Unable to parse JSON: `{response.text}`')
except RequestException as e:
self.retry(attempt_number, error=e, **program_params)
except RateLimitError as e:
if retry_on_error:
self.retry(attempt_number, error=e, **program_params)
else:
raise e
_VIDEO_TITLE_REGEX = r'<meta\s+name=["\'].*title["\']\s+content=["\']([^"\']+)["\']\s*/>'
def _get_initial_info(self, video_id, program_params):
# Get metadata
data = {
'variables': json.dumps({
'upNextVideoID': video_id,
}),
'doc_id': '4730353697015342'
}
json_data = self._graphql_request(program_params, data=data)
video_data = multi_get(json_data, 'data', 'upNextVideoData')
if not video_data:
log('debug', json_data)
raise VideoUnavailable('Video unavailable')
return {
'status': 'live' if video_data.get('is_live_streaming') else 'past',
'video_type': 'video',
'broadcast_status': video_data.get('broadcast_status'),
'title': video_data.get('title_with_fallback'),
'username': multi_get(video_data, 'owner', 'name'),
'start_time': video_data.get('publish_time') * 1000000,
'duration': video_data.get('playable_duration')
}
@staticmethod
def _parse_feedback(feedback):
new_feedback = {}
edges = multi_get(feedback, 'top_reactions', 'edges')
if not edges:
return new_feedback
new_feedback['reaction_types'] = []
for edge in edges:
node = edge.get('node')
reaction_item = {
'key': node.get('key'),
'id': node.get('id'),
'name': node.get('reaction_type'),
'count': edge.get('reaction_count')
}
new_feedback['reaction_types'].append(reaction_item)
new_feedback['total_count'] = multi_get(feedback, 'reactors', 'count')
new_feedback['total_count_reduced'] = multi_get(
feedback, 'reactors', 'count_reduced')
return new_feedback
@staticmethod
def _get_text(item):
return item.get('text') if item else None
@staticmethod
def _parse_image(item):
return Image(item.get('uri'), item.get('width'), item.get('height')).json()
@staticmethod
def _get_uri(item):
return item.get('uri')
@staticmethod
def _get_url(item):
return item.get('url')
@staticmethod
def _parse_attachment_info(original_item):
if isinstance(original_item, (list, tuple)) and len(original_item) > 0:
original_item = original_item[0]
if not original_item:
return {}
item = r.remap_dict(
original_item, FacebookChatDownloader._TARGET_MEDIA_REMAPPING)
# VideoTipJarPayment
quantity = item.get('quantity')
if quantity:
item['text'] = f"Sent {quantity} Star{'s' if quantity != 1 else ''}"
# For photos:
blurred_image = item.pop('blurred_image', None)
massive_image = item.pop('massive_image', None)
if blurred_image and massive_image:
item['text'] = Image(blurred_image, massive_image.get(
'width'), massive_image.get('height')).json()
# style_infos
donation_comment_text = item.pop('donation_comment_text', None)
if donation_comment_text:
entity = multi_get(donation_comment_text,
'ranges', 0, 'entity') or {}
item = r.remap_dict(
entity, FacebookChatDownloader._TARGET_MEDIA_REMAPPING)
item['text'] = donation_comment_text.get('text')
# DEBUGGING
original_type_name = original_item.get('__typename')
if original_type_name not in FacebookChatDownloader._KNOWN_ATTACHMENT_TYPES:
debug_log(
f'Unknown attachment type: {original_type_name}',
original_item,
item
)
return item
@staticmethod
def _parse_target(media):
item = {}
return item
@staticmethod
def _parse_author_badges(item):
keys = (('badge_asset', 'small'), ('information_asset', 'colour'))
icons = list(map(lambda x: Image(
FacebookChatDownloader._FB_HOMEPAGE + item.get(x[0]), 24, 24, x[1]).json(), keys))
icons.append(
Image(item.get('multiple_badge_asset'), 36, 36, 'large').json())
return {
'title': item.get('text'),
'alternative_title': item.get('information_title'),
'description': item.get('information_description'),
'icons': icons,
# badge_asset
# multiple_badge_asset
# information_asset
'icon_name': item.get('identity_badge_type')
}
@staticmethod
def _parse_attachment_renderer(item):
attachment = multi_get(item, 'style_type_renderer',
'attachment') or item.get('attachment')
if not attachment:
debug_log(f'No attachment: {item}')
return {}
return FacebookChatDownloader._parse_attachment(attachment)
@staticmethod
def _parse_attachment(attachment):
parsed = r.remap_dict(
attachment, FacebookChatDownloader._ATTACHMENT_REMAPPING)
for key in ('target', 'media', 'style_infos'):
if parsed.get(key) == {}:
parsed.pop(key)
missing_keys = attachment.keys() - FacebookChatDownloader._KNOWN_ATTACHMENT_KEYS
if missing_keys:
debug_log(
f'Missing attachment keys: {missing_keys}',
attachment,
parsed
)
return parsed
_ATTACHMENT_REMAPPING = {
'url': 'url', # facebook redirect url,
'source': r('source', _get_text),
'title_with_entities': r('title', _get_text),
'target': r('target', _parse_attachment_info),
'media': r('media', _parse_attachment_info),
'style_infos': r('style_infos', _parse_attachment_info),
'attachment_text': r('text', _get_text),
'__typename': 'type',
'story_url': 'story_url',
'story_attachment_link_renderer': r('story_attachment', _parse_attachment_renderer),
'web_link': r('web_link', _get_url)
# 'sticker_image': r('sticker_image', _get_uri),
}
_IGNORE_ATTACHMENT_KEYS = [
'tracking',
'action_links',
'third_party_media_info'
]
_KNOWN_ATTACHMENT_KEYS = set(
list(_ATTACHMENT_REMAPPING.keys()) + _IGNORE_ATTACHMENT_KEYS)
_TARGET_MEDIA_REMAPPING = {
'id': 'id',
'__typename': r('type', camel_case_split),
'fallback_image': r('image', _parse_image),
'is_playable': 'is_playable',
'url': 'url',
'mobileUrl': 'mobile_url',
# Sticker
'pack': 'pack',
'label': 'label',
'image': r('image', _parse_image),
# VideoTipJarPayment
'stars_image_on_star_quantity': 'icon',
'spark_quantity': 'quantity',
# Page
'name': 'name',
'category_name': 'category',
'address': 'address',
'overall_star_rating': 'overall_star_rating',
'profile_picture': r('profile_picture', _get_uri),
# Photo
'accessibility_caption': 'accessibility_caption',
'blurred_image': r('blurred_image', _get_uri),
'massive_image': 'massive_image',
# FundraiserForStoryDonationAttachmentStyleInfo
'donation_comment_text': 'donation_comment_text'
}
_KNOWN_ATTACHMENT_TYPES = [
'Sticker',
'VideoTipJarPayment',
'Page',
'Video',
'Group',
'ProfilePicAttachmentMedia',
'User',
'Photo',
'ExternalUrl',
'GenericAttachmentMedia',
'ChatCommandResult',
'CommentMessageInfo',
'FundraiserForStoryDonationAttachmentStyleInfo',
'Event'
]
_REMAPPING = {
'id': 'message_id',
'community_moderation_state': 'community_moderation_state',
# attachments
'author': 'author',
'feedback': r('reactions', _parse_feedback),
'created_time': r('timestamp', lambda x: x * 1000000),
'upvote_downvote_total': 'upvote_downvote_total',
'is_author_banned_by_content_owner': 'is_author_banned',
'is_author_original_poster': 'is_author_original_poster',
'is_author_bot': 'is_author_bot',
'is_author_non_coworker': 'is_author_non_coworker',
# if banned, ban_action?
'comment_parent': 'comment_parent',
'edit_history': r('number_of_edits', lambda x: x.get('count')),
'timestamp_in_video': 'timestamp_in_video',
'written_while_video_was_live': 'written_while_video_was_live',
'translatability_for_viewer': r('message_dialect', lambda x: x.get('source_dialect_name')),
'url': 'message_url',
'body': r('message', _get_text),
'identity_badges_web': r('author_badges', lambda x: list(map(FacebookChatDownloader._parse_author_badges, x))),
'attachments': r('attachments', lambda x: list(map(FacebookChatDownloader._parse_attachment_renderer, x)))
}
_AUTHOR_REMAPPING = {
'id': 'id',
'name': 'name',
'__typename': r('type', camel_case_split),
'url': 'url',
'is_verified': 'is_verified',
'gender': r('gender', lambda x: x.lower()),
'short_name': 'short_name'
}
@ staticmethod
def _parse_node(node, parse_time=False, start_time=None):
info = r.remap_dict(node, FacebookChatDownloader._REMAPPING)
author_info = info.pop('author', {})
BaseChatDownloader._move_to_dict(
info, 'author', create_when_empty=True)
info['author'] = r.remap_dict(
author_info, FacebookChatDownloader._AUTHOR_REMAPPING)
if 'profile_picture_depth_0' in author_info:
info['author']['images'] = []
for size in ((0, 32), (1, 24)):
url = multi_get(
author_info, f'profile_picture_depth_{size[0]}', 'uri')
info['author']['images'].append(
Image(url, size[1], size[1]).json())
# author_badges = info.pop('author_badges', None)
# if author_badges:
# info['author']['badges'] = author_badges
in_reply_to = info.pop('comment_parent', None)
if isinstance(in_reply_to, dict) and in_reply_to:
info['in_reply_to'] = FacebookChatDownloader._parse_node(
in_reply_to, parse_time, start_time)
if parse_time:
timestamp_in_video = info.get('timestamp_in_video')
if timestamp_in_video is not None:
if start_time is None:
info['time_in_seconds'] = timestamp_in_video
else:
info['time_in_seconds'] = (
info['timestamp'] - start_time)/1e6
info['time_text'] = seconds_to_time(info['time_in_seconds'])
message = info.get('message')
if message:
info['message'] = message
info['message_type'] = 'text_message'
else:
info.pop('message', None) # remove if empty
# remove the following if empty:
if info.get('reactions') == {}:
info.pop('reactions')
if info.get('attachments') == []:
info.pop('attachments')
return info
def _get_live_chat_messages_by_video_id(self, video_id, params):
buffer_size = 25 # max num comments returned by api call
# cursor = ''
variables = {
'videoID': video_id
}
data = {
'variables': json.dumps(variables),
'doc_id': '4889623951078943', # specifies what API call this is?
# 'cursor' : cursor
# &first=12&after=<end_cursor>
}
first_try = True
last_ids = []
while True:
json_data = self._graphql_request(params, data=data)
feedback = multi_get(json_data, 'data', 'video', 'feedback')
if not feedback:
log('debug', f'No feedback: {json_data}')
continue
top_level_comments = multi_get(
json_data, 'data', 'video', 'feedback', 'top_level_comments')
errors = json_data.get('errors')
if errors:
# TODO will usually resume getting chat..
# maybe add timeout?
log('debug', f'Errors detected: {errors}')
continue
if not top_level_comments:
log('debug', f'No top level comments: {json_data}')
continue
# Parse items:
parsed_items = []
for edge in top_level_comments.get('edges') or []:
node = edge.get('node')
if not node:
log('debug', f'No node found in edge: {edge}')
continue
parsed_items.append(FacebookChatDownloader._parse_node(node))
# Sort items
parsed_items.sort(key=lambda x: x['timestamp'])
# TODO - get pagination working
# page_info = top_level_comments.get('page_info')
# after = page_info.get('end_cursor')
# has_next_page = page_info.get('has_next_page')
num_to_add = 0
for item in parsed_items:
comment_id = item.get('message_id')
# remove items that have already been parsed
if comment_id in last_ids:
continue
last_ids.append(comment_id)
last_ids = last_ids[-buffer_size:] # force x items
# TODO determine whether to add or not (message types/groups)
num_to_add += 1
yield item
if num_to_add == 0:
time_to_sleep = 1
log('debug',
f'No actions, sleeping for {time_to_sleep} seconds')
interruptible_sleep(time_to_sleep)
# got 25 items, and this isn't the first one
if num_to_add >= buffer_size and not first_try:
log(
'warning',
'Messages may be coming in faster than requests are being made.'
)
if first_try:
first_try = False
def _get_chat_from_vod(self, feedback_id, stream_start_time, end_time, params):
# method 1 - only works for vods. Guaranteed to get all, but can't choose start time
# ordered by timestamp
data = {
'fb_api_req_friendly_name': 'CometUFICommentsProviderPaginationQuery',
'doc_id': '4310877875602018'
}
ordering = 'LIVE_STREAMING' # 'TOPLEVEL' 'RANKED_THREADED' 'CHRONOLOGICAL'
variables = {
'feedLocation': 'TAHOE',
'feedbackID': feedback_id,
'feedbackSource': 41,
'last': 50, # max step is 50
'includeHighlightedComments': True,
'includeNestedComments': True,
'initialViewOption': ordering,
'topLevelViewOption': ordering,
'viewOption': ordering
}
before = None
while True:
variables['before'] = before
variables['isPaginating'] = before is not None
data['variables'] = json.dumps(variables)
json_data = self._graphql_request(params, retry_on_error=False, data=data)
info = multi_get(json_data, 'data', 'feedback')
if not info:
log('debug', f'No feedback: {json_data}')
break
display_comments = info.get('display_comments')
edges = display_comments.get('edges') or []
parsed_items = []
for edge in reversed(edges):
node = edge.get('node')
if not node:
log('debug', f'No node found in edge: {edge}')
continue
parsed = FacebookChatDownloader._parse_node(
node, True, stream_start_time)
time_in_seconds = parsed.get('time_in_seconds')
if time_in_seconds is None:
continue
if time_in_seconds > end_time:
return
parsed_items.append(parsed)
parsed_items.sort(key=lambda x: x['timestamp'])
yield from parsed_items
page_info = display_comments.get('page_info') or {}
if not page_info.get('has_previous_page'):
break
before = page_info.get('start_cursor')
def _get_chat_from_video(self, feedback_id, start_time, end_time, params):
# method 2 - works for all videos, but sometimes misses comments
# max messages is 30 per minute
# ordered by time_in_seconds
log('debug', 'Running method 2')
data = {
'fb_api_req_friendly_name': 'CometLiveVODCommentListRefetchableQuery',
'doc_id': '3941623715965411'
}
# By default, Facebook gets messages by the minute
time_increment = 600 # 10 mins
next_start_time = max(start_time, 0)
variables = {
'id': feedback_id
}
while True:
next_end_time = min(next_start_time + time_increment, end_time)
variables['afterTime'] = next_start_time
variables['beforeTime'] = next_end_time
data['variables'] = json.dumps(variables)
json_data = self._graphql_request(params, data=data)
edges = multi_get(json_data, 'data', 'node',
'video_timestamped_comments', 'edges') or []
for edge in edges:
node = edge.get('node')
if not node:
log('debug', f'No node found in edge: {edge}')
continue
yield FacebookChatDownloader._parse_node(node, True)
if next_end_time >= end_time:
return
next_start_time = next_end_time
def _get_chat_replay_messages_by_video_id(self, video_id, max_duration, initial_info, params):
feedback_id = base64_encode(f'feedback:{video_id}')
broadcast_status = initial_info.get('broadcast_status')
stream_start_time = initial_info.get('start_time')
start_time = ensure_seconds(params.get('start_time'), 0)
end_time = min(ensure_seconds(params.get(
'end_time'), float('inf')), max_duration)
success = False
if broadcast_status == 'VOD_READY' and params.get('start_time') is None:
# no start time, get until end_time
log('debug', 'Running method 1')
try:
for i in self._get_chat_from_vod(feedback_id, stream_start_time, end_time, params):
time_in_seconds = i.get('time_in_seconds')
if time_in_seconds is not None:
start_time = max(start_time, time_in_seconds)
yield i
success = True
except RateLimitError as e:
pass
if not success: # Fallback
yield from self._get_chat_from_video(feedback_id, start_time, end_time, params)
def _get_chat_by_video_id(self, match, params):
return self.get_chat_by_video_id(match.group('id'), params)
def get_chat_by_video_id(self, video_id, params):
initial_info = self._get_initial_info(video_id, params)
start_time = params.get('start_time')
end_time = params.get('end_time')
# if start or end time specified, use chat replay...
# The tool works for both active and finished live streams.
# if start/end time are specified, vods will be prioritised
# if is live stream and no start/end time specified
if initial_info.get('status') == 'live' and not start_time and not end_time:
generator = self._get_live_chat_messages_by_video_id(
video_id, params)
else:
max_duration = initial_info.get('duration', float('inf'))
generator = self._get_chat_replay_messages_by_video_id(
video_id, max_duration, initial_info, params)
return Chat(
generator,
id=video_id,
**initial_info
)
_STREAM_PAGE = 'https://www.facebook.com/gaming/browse/live/?s=VIEWERS&language=ALL_LANG'
def generate_urls(self, **kwargs):
yield from self._generate_live(kwargs.get('livestream_limit'), **kwargs)
yield from self._generate_videos(kwargs.get('vod_limit'), **kwargs)
yield from self._generate_clips(kwargs.get('clip_limit'), **kwargs)
def _generate_live(self, limit, **kwargs):
# https://www.facebook.com/gaming/browse/live/?s=VIEWERS&language=ALL_LANG
return self._generate_urls('live', limit, **kwargs)
def _generate_videos(self, limit, **kwargs):
# https://www.facebook.com/gaming/videos
return self._generate_urls('videos', limit, **kwargs)
def _generate_clips(self, limit, **kwargs):
# https://www.facebook.com/gaming/clips/
return self._generate_urls('clips', limit, **kwargs)
def _generate_urls(self, video_type, limit, **kwargs):
max_attempts = 10
program_params = {
'max_attempts': max_attempts
}
step = 8
if video_type in ('live', 'videos'):
variables = {
'count': step,
'params': {
'following': None, # False
'game_id': None,
'language': 'ALL_LANG',
'remove_following': True,
'sort_order': 'VIEWERS' # None 'SUGGESTED'
}
}
key = 'top_live' if video_type == 'live' else 'top_was_live'
else: # video_type == 'clips':
variables = {
'count': step,
'following': False,
'game_id': None,
'streamer_id': None,
'sort_order': 'VIEWERS' # None 'SUGGESTED'
}
key = 'top_weekly_clips'
doc_ids = {
'live': 3843810065738698,
'videos': 4591277870888795,
'clips': 3586924904747093
}
doc_id = doc_ids.get(video_type)
data = {
'doc_id': doc_id
}
count = 0
while True:
data['variables'] = json.dumps(variables)
json_data = self._graphql_request(program_params, data=data)
top_live = multi_get(json_data, 'data', 'gaming_video', key)
if not top_live:
log('debug', f'No data found: {json_data}')
return
edges = top_live.get('edges') or []
for edge in edges:
if count >= limit:
return
url = multi_get(edge, 'node', 'url')
if url:
yield url
count += 1
page_info = top_live.get('page_info')
has_next_page = page_info.get('has_next_page')
if not has_next_page:
break
variables['cursor'] = page_info.get('end_cursor')
| 31.849215 | 119 | 0.553163 |
4a2359e14e50723d7696cb828fe387b3a29efcc2 | 369 | py | Python | tests/integration/modules/ard.py | mosen/salt-osx | 818d4ae89bb2853b28999a8ddb883c0fe1b1a657 | [
"MIT"
] | 68 | 2015-02-11T00:53:54.000Z | 2021-11-06T16:07:17.000Z | tests/integration/modules/ard.py | Jaharmi/salt-osx | f6db606f04846d45935f3ed729e6243441cee360 | [
"MIT"
] | 13 | 2016-01-05T00:01:34.000Z | 2022-03-18T23:44:21.000Z | tests/integration/modules/ard.py | Jaharmi/salt-osx | f6db606f04846d45935f3ed729e6243441cee360 | [
"MIT"
] | 19 | 2015-04-09T20:58:55.000Z | 2020-11-04T06:39:06.000Z | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class ArdTest(integration.ModuleCase):
pass
if __name__ == '__main__':
from integration import run_tests
run_tests(ArdTest)
| 18.45 | 49 | 0.747967 |
4a2359fc4a878b23f4458b2558106493d0a2250d | 1,845 | py | Python | .history/bar_20211223230946.py | tjxj/pdf2docx | 3338a95f1a971de8caf0369fa3ce794d2d6d57cd | [
"Apache-2.0"
] | null | null | null | .history/bar_20211223230946.py | tjxj/pdf2docx | 3338a95f1a971de8caf0369fa3ce794d2d6d57cd | [
"Apache-2.0"
] | null | null | null | .history/bar_20211223230946.py | tjxj/pdf2docx | 3338a95f1a971de8caf0369fa3ce794d2d6d57cd | [
"Apache-2.0"
] | null | null | null | #Creating and Manipulating PDFs with pdfrw
# https://www.blog.pythonlibrary.org/2018/06/06/creating-and-manipulating-pdfs-with-pdfrw/
# Extract certain types of information from a PDF
# Splitting PDFs
# Merging / Concatenating PDFs
# Rotating pages
# Creating overlays or watermarks
# Scaling pages
# Combining the use of pdfrw and ReportLab
import base64
import tempfile
from pdf2docx import Converter
import streamlit as st
from pdf2image import convert_from_path
from pathlib import Path
def show_pdf(file_path:str):
"""Show the PDF in Streamlit
That returns as html component
Parameters
----------
file_path : [str]
Uploaded PDF file path
"""
with open(file_path, "rb") as f:
base64_pdf = base64.b64encode(f.read()).decode("utf-8")
pdf_display = f'<embed src="data:application/pdf;base64,{base64_pdf}" width="100%" height="1000" type="application/pdf">'
st.markdown(pdf_display, unsafe_allow_html=True)
def main():
"""Streamlit application
"""
st.title("PDF file uplodaer")
uploaded_file = st.file_uploader("Choose your .pdf file", type="pdf")
if uploaded_file is not None:
# Make temp file path from uploaded file
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
st.markdown("## Original PDF file")
fp = Path(tmp_file.name)
fp.write_bytes(uploaded_file.getvalue())
st.write(show_pdf(tmp_file.name))
imgs = Converter(tmp_file.name)
docx = imgs.convert(start=0, end=None)
base64_docx = base64.b64encode(docx).decode("utf-8")
st.markdown(f"Converted images from PDF")
st.download_button(label= '请下载word文档',data=base64_docx,mime="application/octet-stream")
#st.image(imgs)
if __name__ == "__main__":
main() | 29.758065 | 125 | 0.676965 |
4a235a474e94db163004d97223150de004cc5d1d | 5,058 | py | Python | virtual/Lib/site-packages/dash_html_components/Shadow.py | LeonZly90/LeonZly90 | 935a658814632beca84cab0af6c048dd762f8c56 | [
"MIT"
] | 2 | 2021-07-18T11:39:56.000Z | 2021-11-06T17:13:05.000Z | venv/Lib/site-packages/dash_html_components/Shadow.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | venv/Lib/site-packages/dash_html_components/Shadow.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | null | null | null | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Shadow(Component):
"""A Shadow component.
Shadow is a wrapper for the <shadow> HTML5 element.
DEPRECATED: <shadow> is included for completeness, but should be avoided
as it is not supported by all browsers and may be removed at any time from
those that do support it.
For detailed attribute info see:
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/shadow
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional):
The children of this component.
- id (string; optional):
The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the components
in an app.
- accessKey (string; optional):
Keyboard shortcut to activate or add focus to the element.
- aria-* (string; optional):
A wildcard aria attribute.
- className (string; optional):
Often used with CSS to style elements with common properties.
- contentEditable (string; optional):
Indicates whether the element's content is editable.
- contextMenu (string; optional):
Defines the ID of a <menu> element which will serve as the
element's context menu.
- data-* (string; optional):
A wildcard data attribute.
- dir (string; optional):
Defines the text direction. Allowed values are ltr (Left-To-Right)
or rtl (Right-To-Left).
- draggable (string; optional):
Defines whether the element can be dragged.
- hidden (a value equal to: 'hidden', 'HIDDEN' | boolean; optional):
Prevents rendering of given element, while keeping child elements,
e.g. script elements, active.
- key (string; optional):
A unique identifier for the component, used to improve performance
by React.js while rendering components See
https://reactjs.org/docs/lists-and-keys.html for more info.
- lang (string; optional):
Defines the language used in the element.
- loading_state (dict; optional):
Object that holds the loading state object coming from
dash-renderer.
`loading_state` is a dict with keys:
- component_name (string; optional):
Holds the name of the component that is loading.
- is_loading (boolean; optional):
Determines if the component is loading or not.
- prop_name (string; optional):
Holds which property is loading.
- n_clicks (number; default 0):
An integer that represents the number of times that this element
has been clicked on.
- n_clicks_timestamp (number; default -1):
An integer that represents the time (in ms since 1970) at which
n_clicks changed. This can be used to tell which button was
changed most recently.
- role (string; optional):
The ARIA role attribute.
- spellCheck (string; optional):
Indicates whether spell checking is allowed for the element.
- style (dict; optional):
Defines CSS styles which will override styles previously set.
- tabIndex (string; optional):
Overrides the browser's default tab order and follows the one
specified instead.
- title (string; optional):
Text to be displayed in a tooltip when hovering over the element."""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, loading_state=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'accessKey', 'aria-*', 'className', 'contentEditable', 'contextMenu', 'data-*', 'dir', 'draggable', 'hidden', 'key', 'lang', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'role', 'spellCheck', 'style', 'tabIndex', 'title']
self._type = 'Shadow'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_properties = ['children', 'id', 'accessKey', 'aria-*', 'className', 'contentEditable', 'contextMenu', 'data-*', 'dir', 'draggable', 'hidden', 'key', 'lang', 'loading_state', 'n_clicks', 'n_clicks_timestamp', 'role', 'spellCheck', 'style', 'tabIndex', 'title']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Shadow, self).__init__(children=children, **args)
| 41.801653 | 586 | 0.708976 |
4a235acb68464acb56bfd1d4b928bc8f2ecbc959 | 3,407 | py | Python | preprocessing/generate_lidar.py | fnaser/pseudo_lidar | 9987331f68ff3d0f4d04ad47fc7632ae2a154674 | [
"MIT"
] | null | null | null | preprocessing/generate_lidar.py | fnaser/pseudo_lidar | 9987331f68ff3d0f4d04ad47fc7632ae2a154674 | [
"MIT"
] | null | null | null | preprocessing/generate_lidar.py | fnaser/pseudo_lidar | 9987331f68ff3d0f4d04ad47fc7632ae2a154674 | [
"MIT"
] | null | null | null | import argparse
import os
import numpy as np
import scipy.misc as ssc
import open3d
import kitti_util
def project_disp_to_points(calib, disp, max_high):
disp[disp < 0] = 0
baseline = 0.54
mask = disp > 0
depth = calib.f_u * baseline / (disp + 1. - mask)
rows, cols = depth.shape
c, r = np.meshgrid(np.arange(cols), np.arange(rows))
points = np.stack([c, r, depth])
points = points.reshape((3, -1))
points = points.T
points = points[mask.reshape(-1)]
cloud = calib.project_image_to_velo(points)
valid = (cloud[:, 0] >= 0) & (cloud[:, 2] < max_high)
return cloud[valid]
def project_depth_to_points(calib, depth, max_high):
print(depth.shape)
print(depth[0, 0, 1, 1])
_, _, rows, cols = depth.shape
depth = depth.reshape(rows, cols)
print(depth.shape)
print(depth[1, 1])
c, r = np.meshgrid(np.arange(cols), np.arange(rows))
points = np.stack([c, r, depth])
points = points.reshape((3, -1))
points = points.T
cloud = calib.project_image_to_velo(points)
valid = (cloud[:, 0] >= 0) & (cloud[:, 2] < max_high)
return cloud[valid]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate Libar')
parser.add_argument('--calib_dir', type=str,
default='~/Kitti/object/training/calib')
parser.add_argument('--disparity_dir', type=str,
default='~/Kitti/object/training/predicted_disparity')
parser.add_argument('--save_dir', type=str,
default='~/Kitti/object/training/predicted_velodyne')
parser.add_argument('--max_high', type=int, default=1)
parser.add_argument('--is_depth', action='store_true')
args = parser.parse_args()
assert os.path.isdir(args.disparity_dir)
assert os.path.isdir(args.calib_dir)
if not os.path.isdir(args.save_dir):
os.makedirs(args.save_dir)
disps = [x for x in os.listdir(args.disparity_dir) if x[-3:] == 'png' or x[-3:] == 'npy' or x[-3:] == "jpg"]
disps = sorted(disps)
for fn in disps:
predix = fn[:-4]
calib_file = '{}/{}.txt'.format(args.calib_dir, predix)
print(calib_file)
calib = kitti_util.Calibration(calib_file)
# disp_map = ssc.imread(args.disparity_dir + '/' + fn) / 256.
if fn[-3:] == 'png' or fn[-3:] == "jpg":
disp_map = ssc.imread(args.disparity_dir + '/' + fn)
elif fn[-3:] == 'npy':
disp_map = np.load(args.disparity_dir + '/' + fn)
else:
assert False
if not args.is_depth:
disp_map = (disp_map*256).astype(np.uint16)/256.
lidar = project_disp_to_points(calib, disp_map, args.max_high)
else:
disp_map = (disp_map).astype(np.float32)/256.
lidar = project_depth_to_points(calib, disp_map, args.max_high)
# pad 1 in the intensity dimension
# lidar = np.concatenate([lidar, np.ones((lidar.shape[0], 1))], 1)
lidar = lidar.astype(np.float32)
print(lidar.shape)
pcd = open3d.geometry.PointCloud()
pcd.points = open3d.utility.Vector3dVector(lidar)
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
open3d.visualization.draw_geometries([pcd])
lidar.tofile('{}/{}.bin'.format(args.save_dir, predix))
print('Finish Depth {}'.format(predix))
| 36.244681 | 112 | 0.605225 |
4a235b46857e869e7787ee5f69e628270f936d68 | 267 | py | Python | tests/dummy.py | paolostivanin/exam | 27dc53a703349ec09433a6b989d6fc32ad523c0b | [
"MIT"
] | 46 | 2015-01-31T10:27:51.000Z | 2020-11-28T19:04:11.000Z | tests/dummy.py | paolostivanin/exam | 27dc53a703349ec09433a6b989d6fc32ad523c0b | [
"MIT"
] | 7 | 2015-02-16T17:14:20.000Z | 2022-01-05T04:21:27.000Z | tests/dummy.py | paolostivanin/exam | 27dc53a703349ec09433a6b989d6fc32ad523c0b | [
"MIT"
] | 10 | 2015-02-08T22:47:58.000Z | 2020-10-12T09:51:19.000Z | #: Module purely exists to test patching things.
thing = True
it = lambda: False
def get_thing():
global thing
return thing
def get_it():
global it
return it
def get_prop():
return ThingClass.prop
class ThingClass(object):
prop = True
| 12.136364 | 48 | 0.670412 |
4a235b95562229ac11ec8da94aee3392df5193b0 | 6,007 | py | Python | test/functional/zmq_test.py | zzbolt/zzboltcoin | 99bf8ef246e4e96ade612a4ad270bf2554082314 | [
"MIT"
] | null | null | null | test/functional/zmq_test.py | zzbolt/zzboltcoin | 99bf8ef246e4e96ade612a4ad270bf2554082314 | [
"MIT"
] | null | null | null | test/functional/zmq_test.py | zzbolt/zzboltcoin | 99bf8ef246e4e96ade612a4ad270bf2554082314 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ API."""
import configparser
import os
import struct
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import (assert_equal,
bytes_to_hex_str,
hash256,
)
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_nodes(self):
# Try to import python3-zmq. Skip this test if the import fails.
try:
import zmq
except ImportError:
raise SkipTest("python3-zmq module not available.")
# Check that bitcoin has been built with ZMQ enabled
config = configparser.ConfigParser()
if not self.options.configfile:
self.options.configfile = os.path.dirname(__file__) + "/../config.ini"
config.read_file(open(self.options.configfile))
if not config["components"].getboolean("ENABLE_ZMQ"):
raise SkipTest("zzboltcoind has not been built with zmq enabled.")
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.set(zmq.RCVTIMEO, 60000)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"rawtx")
ip_address = "tcp://127.0.0.1:28332"
self.zmqSubSocket.connect(ip_address)
self.extra_args = [['-zmqpubhashblock=%s' % ip_address, '-zmqpubhashtx=%s' % ip_address,
'-zmqpubrawblock=%s' % ip_address, '-zmqpubrawtx=%s' % ip_address], []]
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
try:
self._zmq_test()
finally:
# Destroy the zmq context
self.log.debug("Destroying zmq context")
self.zmqContext.destroy(linger=None)
def _zmq_test(self):
genhashes = self.nodes[0].generate(1)
self.sync_all()
self.log.info("Wait for tx")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
txhash = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashtx
# rawtx
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"rawtx")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on rawtx
# Check that the rawtx hashes to the hashtx
assert_equal(hash256(body), txhash)
self.log.info("Wait for block")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashblock")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) # must be sequence 0 on hashblock
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) # blockhash from generate must be equal to the hash received over zmq
# rawblock
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"rawblock")
body = msg[1]
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, 0) #must be sequence 0 on rawblock
# Check the hash of the rawblock's header matches generate
assert_equal(genhashes[0], bytes_to_hex_str(hash256(body[:80])))
self.log.info("Generate 10 blocks (and 10 coinbase txes)")
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
zmqRawHashed = []
blockcount = 0
for x in range(n * 4):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
blockcount += 1
if topic == b"rawblock":
zmqRawHashed.append(bytes_to_hex_str(hash256(body[:80])))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount)
for x in range(n):
assert_equal(genhashes[x], zmqHashes[x]) # blockhash from generate must be equal to the hash received over zmq
assert_equal(genhashes[x], zmqRawHashed[x])
self.log.info("Wait for tx from second node")
# test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"hashtx")
body = msg[1]
hashZMQ = bytes_to_hex_str(body)
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount + 1)
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
assert_equal(topic, b"rawtx")
body = msg[1]
hashedZMQ = bytes_to_hex_str(hash256(body))
msgSequence = struct.unpack('<I', msg[-1])[-1]
assert_equal(msgSequence, blockcount+1)
assert_equal(hashRPC, hashZMQ) # txid from sendtoaddress must be equal to the hash received over zmq
assert_equal(hashRPC, hashedZMQ)
if __name__ == '__main__':
ZMQTest().main()
| 39.006494 | 123 | 0.611287 |
4a235cf1280c7f706ef866a37d435aa931518196 | 1,944 | py | Python | 2/q2_svm_python/svmlin.py | danikhani/ML-I_WS2020 | e96555f3993b48511249d1677b39ae4e589e7bdb | [
"MIT"
] | null | null | null | 2/q2_svm_python/svmlin.py | danikhani/ML-I_WS2020 | e96555f3993b48511249d1677b39ae4e589e7bdb | [
"MIT"
] | null | null | null | 2/q2_svm_python/svmlin.py | danikhani/ML-I_WS2020 | e96555f3993b48511249d1677b39ae4e589e7bdb | [
"MIT"
] | null | null | null | import numpy as np
# might need to add path to mingw-w64/bin for cvxopt to work
#import os
#os.environ["PATH"] += os.pathsep + ...
from cvxopt import matrix as cvxopt_matrix
from cvxopt import solvers as cvxopt_solvers
def svmlin(X, t, C):
# Linear SVM Classifier
#
# INPUT:
# X : the dataset (num_samples x dim)
# t : labeling (num_samples x 1)
# C : penalty factor for slack variables (scalar)
#
# OUTPUT:
# alpha : output of quadprog function (num_samples x 1)
# sv : support vectors (boolean) (1 x num_samples)
# w : parameters of the classifier (1 x dim)
# b : bias of the classifier (scalar)
# result : result of classification (1 x num_samples)
# slack : points inside the margin (boolean) (1 x num_samples)
#####Insert your code here for subtask 2a#####
m, n = X.shape
y = t.reshape(-1, 1) * 1.
X_dash = y * X
H = np.dot(X_dash, X_dash.T) * 1.
P = cvxopt_matrix(H)
q = cvxopt_matrix(-np.ones((m, 1)))
G = cvxopt_matrix(-np.eye(m))
h = cvxopt_matrix(np.zeros(m))
A = cvxopt_matrix(y.reshape(1, -1))
b = cvxopt_matrix(np.zeros(1))
#n = X.shape[0]
#q = (-1)* np.ones(n)
#G = np.vstack([-np.eye(n), np.eye(n)])
#A = t
#b = 0
#h = np.hstack([0,C])
#P = np.full((n,n),0)
#for i in range(n):
# for j in range(n):
# P[i,j] = t[i]*t[j]*np.dot(X[i],X[j])
a= cvxopt_solvers.qp(P,q,G,h,A,b)
alphas = np.array(sol['x'])
# w parameter in vectorized form
w = ((y * alphas).T @ X).reshape(-1, 1)
# Selecting the set of indices S corresponding to non zero parameters
S = (alphas > 1e-4).flatten()
# Computing b
b = y[S] - np.dot(X[S], w)
alpha, sv, w, b, result, slack = svmlin(train['data'],train['label'],C)
return alpha, sv, w, b, result, slack
| 29.454545 | 75 | 0.552469 |
4a235d6a6e6193c8d43c3904845a874971e757d7 | 1,539 | py | Python | flappy_app/alex.py | Alexelofu/sqlite | 3cf9b212d06607256790a8b6983dc8cf6d79bcef | [
"MIT"
] | null | null | null | flappy_app/alex.py | Alexelofu/sqlite | 3cf9b212d06607256790a8b6983dc8cf6d79bcef | [
"MIT"
] | null | null | null | flappy_app/alex.py | Alexelofu/sqlite | 3cf9b212d06607256790a8b6983dc8cf6d79bcef | [
"MIT"
] | null | null | null | import pandas as pd
from flask import Flask
app = Flask(__name__)
@app.route("/head", methods = ['POST', 'GET'])
def head():
df = pd.read_csv('http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv')
return df.head().to_html()
@app.route("/tail", methods = ['POST', 'GET'])
def tail():
df = pd.read_csv('http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv')
return df.tail().to_html()
@app.route("/describe", methods = ['POST', 'GET'])
def describe():
df = pd.read_csv('http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv')
return df.describe().to_html()
if __name__ == "__main__":
app.run()
# @app.route('/view_data', methods = ['POST'])
# def get_head_tail_info():
# # get the cleaned dataset
# read_file = pd.read_csv('http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/train.csv')
# # get the form submit button whose name is head and value is head
# if request.form.get("head") == "head":
# # show just the head
# return read_file.head().to_html()
# # get the form submit button whose name is tail and value is tail
# elif request.form.get("tail") == "tail":
# # show just the tail
# return read_file.tail().to_html()
# # get the form submit button whose name is info and value is info
# elif request.form.get('info') == "info":
# # return the dataset description
# return read_file.describe().to_html() | 33.456522 | 101 | 0.621183 |
4a2363116ac806f3279573bb86f6399200073731 | 5,949 | py | Python | pytorch_helper_bot/examples/imagenette/main.py | imazerty/imet-collection-2019 | d89cd7a5cea44f3bcbf13e07edb9d2fe4a7b0141 | [
"Apache-2.0"
] | 6 | 2019-07-02T07:53:32.000Z | 2019-08-08T01:13:26.000Z | pytorch_helper_bot/examples/imagenette/main.py | imazerty/imet-collection-2019 | d89cd7a5cea44f3bcbf13e07edb9d2fe4a7b0141 | [
"Apache-2.0"
] | 2 | 2019-07-21T07:30:05.000Z | 2019-11-20T09:15:30.000Z | pytorch_helper_bot/examples/imagenette/main.py | imazerty/imet-collection-2019 | d89cd7a5cea44f3bcbf13e07edb9d2fe4a7b0141 | [
"Apache-2.0"
] | 2 | 2019-08-08T01:13:29.000Z | 2019-11-19T18:08:17.000Z | import argparse
import json
from pathlib import Path
from dataclasses import dataclass
import pandas as pd
import torch
from torch import nn, cuda
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import CosineAnnealingLR
from helperbot import (
BaseBot, WeightDecayOptimizerWrapper, TriangularLR,
GradualWarmupScheduler, LearningRateSchedulerCallback,
MixUpCallback, Top1Accuracy, TopKAccuracy
)
from helperbot.loss import MixUpSoftmaxLoss
from models import get_seresnet_model, get_densenet_model
from dataset import TrainDataset, N_CLASSES, DATA_ROOT, build_dataframe_from_folder
from transforms import train_transform, test_transform
try:
from apex import amp
APEX_AVAILABLE = True
except ModuleNotFoundError:
APEX_AVAILABLE = False
CACHE_DIR = Path('./data/cache/')
CACHE_DIR.mkdir(exist_ok=True, parents=True)
MODEL_DIR = Path('./data/cache/')
MODEL_DIR.mkdir(exist_ok=True, parents=True)
NO_DECAY = [
'bias', 'bn1.weight', 'bn2.weight', 'bn3.weight'
]
def make_loader(args, ds_class, df: pd.DataFrame, image_transform, drop_last=False, shuffle=False) -> DataLoader:
return DataLoader(
ds_class(df, image_transform, debug=args.debug),
shuffle=shuffle,
batch_size=args.batch_size,
num_workers=args.workers,
drop_last=drop_last
)
@dataclass
class ImageClassificationBot(BaseBot):
checkpoint_dir: Path = CACHE_DIR / "model_cache/"
log_dir: Path = MODEL_DIR / "logs/"
def __post_init__(self):
super().__post_init__()
self.loss_format = "%.6f"
self.metrics = (Top1Accuracy(), TopKAccuracy(k=3))
self.monitor_metric = "accuracy"
def extract_prediction(self, x):
return x
def train_from_scratch(args, model, train_loader, valid_loader, criterion):
n_steps = len(train_loader) * args.epochs
optimizer = WeightDecayOptimizerWrapper(
torch.optim.Adam(
[
{
'params': [p for n, p in model.named_parameters()
if not any(nd in n for nd in NO_DECAY)],
},
{
'params': [p for n, p in model.named_parameters()
if any(nd in n for nd in NO_DECAY)],
}
],
weight_decay=0,
lr=args.lr
),
weight_decay=[1e-1, 0],
change_with_lr=True
)
if args.debug:
print(
"No decay:",
[n for n, p in model.named_parameters()
if any(nd in n for nd in NO_DECAY)]
)
if args.amp:
if not APEX_AVAILABLE:
raise ValueError("Apex is not installed!")
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.amp
)
callbacks = [
LearningRateSchedulerCallback(
# TriangularLR(
# optimizer, 100, ratio=4, steps_per_cycle=n_steps
# )
GradualWarmupScheduler(
optimizer, 100, len(train_loader),
after_scheduler=CosineAnnealingLR(
optimizer, n_steps - len(train_loader)
)
)
)
]
if args.mixup_alpha:
callbacks.append(MixUpCallback(
alpha=args.mixup_alpha, softmax_target=True))
bot = ImageClassificationBot(
model=model, train_loader=train_loader,
val_loader=valid_loader, clip_grad=10.,
optimizer=optimizer, echo=True,
criterion=criterion,
avg_window=len(train_loader) // 5,
callbacks=callbacks,
pbar=True, use_tensorboard=True,
use_amp=(args.amp != '')
)
bot.train(
n_steps,
log_interval=len(train_loader) // 6,
snapshot_interval=len(train_loader) // 2,
# early_stopping_cnt=8,
min_improv=1e-2,
keep_n_snapshots=1
)
bot.remove_checkpoints(keep=1)
bot.load_model(bot.best_performers[0][1])
torch.save(bot.model.state_dict(), CACHE_DIR /
f"final_weights.pth")
bot.remove_checkpoints(keep=0)
def main():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--batch-size', type=int, default=32)
arg('--lr', type=float, default=2e-3)
arg('--workers', type=int, default=4)
arg('--epochs', type=int, default=5)
arg('--mixup-alpha', type=float, default=0)
arg('--arch', type=str, default='seresnext50')
arg('--amp', type=str, default='')
arg('--debug', action='store_true')
args = parser.parse_args()
train_dir = DATA_ROOT / 'train'
valid_dir = DATA_ROOT / 'val'
use_cuda = cuda.is_available()
if args.arch == 'seresnext50':
model = get_seresnet_model(
arch="se_resnext50_32x4d",
n_classes=N_CLASSES, pretrained=False)
elif args.arch == 'seresnext101':
model = get_seresnet_model(
arch="se_resnext101_32x4d",
n_classes=N_CLASSES, pretrained=False)
elif args.arch.startswith("densenet"):
model = get_densenet_model(arch=args.arch)
else:
raise ValueError("No such model")
if use_cuda:
model = model.cuda()
criterion = MixUpSoftmaxLoss(nn.CrossEntropyLoss())
(CACHE_DIR / 'params.json').write_text(
json.dumps(vars(args), indent=4, sort_keys=True))
df_train, class_map = build_dataframe_from_folder(train_dir)
df_valid = build_dataframe_from_folder(valid_dir, class_map)
train_loader = make_loader(
args, TrainDataset, df_train, train_transform, drop_last=True, shuffle=True)
valid_loader = make_loader(
args, TrainDataset, df_valid, test_transform, shuffle=False)
print(f'{len(train_loader.dataset):,} items in train, '
f'{len(valid_loader.dataset):,} in valid')
train_from_scratch(args, model, train_loader, valid_loader, criterion)
if __name__ == '__main__':
main()
| 31.643617 | 113 | 0.634729 |
4a2363c81f53de066f94b84111fd031f453101ac | 89 | py | Python | src/PyANN/__init__.py | matt-plank/Py-ANN | 34f87312821fb710c4bcbba316074af541d37d9a | [
"MIT"
] | null | null | null | src/PyANN/__init__.py | matt-plank/Py-ANN | 34f87312821fb710c4bcbba316074af541d37d9a | [
"MIT"
] | null | null | null | src/PyANN/__init__.py | matt-plank/Py-ANN | 34f87312821fb710c4bcbba316074af541d37d9a | [
"MIT"
] | null | null | null | from .utils import *
from .functions import *
from .layers import *
from .model import *
| 17.8 | 24 | 0.730337 |
4a2365706a965c7444edf637400ee365c15bf302 | 394 | py | Python | src/sales/migrations/0007_rename_seller_product_archive_id_saleproduct_seller_product_archive.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | 2 | 2021-08-30T22:58:32.000Z | 2021-12-12T10:47:52.000Z | src/sales/migrations/0007_rename_seller_product_archive_id_saleproduct_seller_product_archive.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | null | null | null | src/sales/migrations/0007_rename_seller_product_archive_id_saleproduct_seller_product_archive.py | evis-market/web-interface-backend | f8930ff1c009ad18e522ab29680b4bcd50a6020e | [
"MIT"
] | 1 | 2021-08-22T19:12:44.000Z | 2021-08-22T19:12:44.000Z | # Generated by Django 3.2.7 on 2021-10-22 18:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sales', '0006_saleproduct'),
]
operations = [
migrations.RenameField(
model_name='saleproduct',
old_name='seller_product_archive_id',
new_name='seller_product_archive',
),
]
| 20.736842 | 49 | 0.616751 |
4a23659ebb9f9542bd2236f8619059c860df9542 | 11,657 | py | Python | test/decorators.py | daka1510/qiskit-ibm-provider | 74fdae4457e6f15ba445a8742a25ac7a714ea51b | [
"Apache-2.0"
] | null | null | null | test/decorators.py | daka1510/qiskit-ibm-provider | 74fdae4457e6f15ba445a8742a25ac7a714ea51b | [
"Apache-2.0"
] | null | null | null | test/decorators.py | daka1510/qiskit-ibm-provider | 74fdae4457e6f15ba445a8742a25ac7a714ea51b | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Decorators for using with IBM Provider unit tests.
Environment variables used by the decorators:
* QISKIT_IBM_API_TOKEN: default API token to use.
* QISKIT_IBM_API_URL: default API url to use.
* QISKIT_IBM_HGP: default hub/group/project to use.
* QISKIT_IBM_PRIVATE_HGP: hub/group/project to use for private jobs.
* QISKIT_IBM_DEVICE: default device to use.
* QISKIT_IBM_USE_STAGING_CREDENTIALS: True if use staging credentials.
* QISKIT_IBM_STAGING_API_TOKEN: staging API token to use.
* QISKIT_IBM_STAGING_API_URL: staging API url to use.
* QISKIT_IBM_STAGING_HGP: staging hub/group/project to use.
* QISKIT_IBM_STAGING_DEVICE: staging device to use.
* QISKIT_IBM_STAGING_PRIVATE_HGP: staging hub/group/project to use for private jobs.
"""
import os
from functools import wraps
from typing import Tuple
from unittest import SkipTest
from qiskit.test.testing_options import get_test_options
from qiskit_ibm_provider import IBMProvider
from qiskit_ibm_provider import least_busy
from qiskit_ibm_provider.credentials import Credentials, discover_credentials
def requires_qe_access(func):
"""Decorator that signals that the test uses the online API.
It involves:
* determines if the test should be skipped by checking environment
variables.
* if the `QISKIT_IBM_USE_STAGING_CREDENTIALS` environment variable is
set, it reads the credentials from an alternative set of environment
variables.
* if the test is not skipped, it reads `qe_token` and `qe_url` from
environment variables or qiskitrc.
* if the test is not skipped, it appends `qe_token` and `qe_url` as
arguments to the test function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
def _wrapper(obj, *args, **kwargs):
if get_test_options()["skip_online"]:
raise SkipTest("Skipping online tests")
credentials = _get_credentials()
kwargs.update({"qe_token": credentials.token, "qe_url": credentials.url})
return func(obj, *args, **kwargs)
return _wrapper
def requires_providers(func):
"""Decorator that signals the test uses the online API, via a public and premium hgp.
This decorator delegates into the `requires_qe_access` decorator and appends a provider,
an open access hub/group/project and a premium hub/group/project to the decorated function.
Args:
func (callable): Test function to be decorated.
Returns:
callable: The decorated function.
"""
@wraps(func)
@requires_qe_access
def _wrapper(*args, **kwargs):
qe_token = kwargs.pop("qe_token")
qe_url = kwargs.pop("qe_url")
provider = IBMProvider(qe_token, qe_url)
# Get open access hgp
open_hgp = provider._get_hgp()
# Get a premium hgp
premium_hub, premium_group, premium_project = _get_custom_hgp()
if not all([premium_hub, premium_group, premium_project]):
raise SkipTest(
"Requires both the open access and premium hub/group/project."
)
kwargs.update(
{
"provider": provider,
"hgps": {
"open_hgp": {
"hub": open_hgp.credentials.hub,
"group": open_hgp.credentials.group,
"project": open_hgp.credentials.project,
},
"premium_hgp": {
"hub": premium_hub,
"group": premium_group,
"project": premium_project,
},
},
}
)
return func(*args, **kwargs)
return _wrapper
def requires_provider(func):
"""Decorator that signals the test uses the online API, via a custom hub/group/project.
This decorator delegates into the `requires_qe_access` decorator, but
instead of the credentials it appends a `provider` argument to the decorated
function. It also appends the custom `hub`, `group` and `project` arguments.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
def _wrapper(*args, **kwargs):
token = kwargs.pop("qe_token")
url = kwargs.pop("qe_url")
provider = IBMProvider(token, url)
hub, group, project = _get_custom_hgp()
kwargs.update(
{"provider": provider, "hub": hub, "group": group, "project": project}
)
return func(*args, **kwargs)
return _wrapper
def requires_private_provider(func):
"""Decorator that signals the test requires a hub/group/project for private jobs.
This decorator appends `provider`, `hub`, `group` and `project` arguments to the decorated
function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
def _wrapper(*args, **kwargs):
token = kwargs.pop("qe_token")
url = kwargs.pop("qe_url")
provider = IBMProvider(token, url)
hub, group, project = _get_private_hgp()
kwargs.update(
{"provider": provider, "hub": hub, "group": group, "project": project}
)
return func(*args, **kwargs)
return _wrapper
def requires_device(func):
"""Decorator that retrieves the appropriate backend to use for testing.
It involves:
* Enable the account using credentials obtained from the
`requires_qe_access` decorator.
* Use the backend specified by `QISKIT_IBM_STAGING_DEVICE` if
`QISKIT_IBM_USE_STAGING_CREDENTIALS` is set, otherwise use the backend
specified by `QISKIT_IBM_DEVICE`.
* if device environment variable is not set, use the least busy
real backend.
* appends arguments `backend` to the decorated function.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
def _wrapper(obj, *args, **kwargs):
backend_name = (
os.getenv("QISKIT_IBM_STAGING_DEVICE", None)
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", "")
else os.getenv("QISKIT_IBM_DEVICE", None)
)
_backend = _get_backend(
qe_token=kwargs.pop("qe_token"),
qe_url=kwargs.pop("qe_url"),
backend_name=backend_name,
)
kwargs.update({"backend": _backend})
return func(obj, *args, **kwargs)
return _wrapper
def requires_runtime_device(func):
"""Decorator that retrieves the appropriate backend to use for testing.
Args:
func (callable): test function to be decorated.
Returns:
callable: the decorated function.
"""
@wraps(func)
@requires_qe_access
def _wrapper(obj, *args, **kwargs):
backend_name = (
os.getenv("QISKIT_IBM_STAGING_RUNTIME_DEVICE", None)
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", "")
else os.getenv("QISKIT_IBM_RUNTIME_DEVICE", None)
)
if not backend_name:
raise SkipTest("Runtime device not specified")
_backend = _get_backend(
qe_token=kwargs.pop("qe_token"),
qe_url=kwargs.pop("qe_url"),
backend_name=backend_name,
)
kwargs.update({"backend": _backend})
return func(obj, *args, **kwargs)
return _wrapper
def _get_backend(qe_token, qe_url, backend_name):
"""Get the specified backend."""
provider = IBMProvider(qe_token, qe_url)
_backend = None
hub, group, project = _get_custom_hgp()
if backend_name:
_backend = provider.get_backend(
name=backend_name, hub=hub, group=group, project=project
)
else:
_backend = least_busy(
provider.backends(
simulator=False, min_num_qubits=5, hub=hub, group=group, project=project
)
)
if not _backend:
raise Exception("Unable to find a suitable backend.")
return _backend
def _get_credentials():
"""Finds the credentials for a specific test and options.
Returns:
Credentials: set of credentials
Raises:
Exception: When the credential could not be set and they are needed
for that set of options.
"""
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", ""):
# Special case: instead of using the standard credentials mechanism,
# load them from different environment variables. This assumes they
# will always be in place, as is used by the CI setup.
return Credentials(
token=os.getenv("QISKIT_IBM_STAGING_API_TOKEN"),
url=os.getenv("QISKIT_IBM_STAGING_API_URL"),
auth_url=os.getenv("QISKIT_IBM_STAGING_API_URL"),
)
# Attempt to read the standard credentials.
discovered_credentials, _ = discover_credentials()
if discovered_credentials:
# Decide which credentials to use for testing.
if len(discovered_credentials) > 1:
try:
# Attempt to use IBM Quantum credentials.
return discovered_credentials[(None, None, None)]
except KeyError:
pass
# Use the first available credentials.
return list(discovered_credentials.values())[0]
raise Exception("Unable to locate valid credentials.")
def _get_custom_hgp() -> Tuple[str, str, str]:
"""Get a custom hub/group/project
Gets the hub/group/project set in QISKIT_IBM_STAGING_HGP for staging env or
QISKIT_IBM_HGP for production env.
Returns:
Tuple of custom hub/group/project or ``None`` if not set.
"""
hub = None
group = None
project = None
hgp = (
os.getenv("QISKIT_IBM_STAGING_HGP", None)
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", "")
else os.getenv("QISKIT_IBM_HGP", None)
)
if hgp:
hub, group, project = hgp.split("/")
return hub, group, project
def _get_private_hgp() -> Tuple[str, str, str]:
"""Get a private hub/group/project
Gets the hub/group/project set in QISKIT_IBM_STAGING_PRIVATE_HGP for staging env or
QISKIT_IBM_PRIVATE_HGP for production env.
Returns:
Tuple of custom hub/group/project or ``None`` if not set.
Raises:
SkipTest: requires private provider
"""
hub = None
group = None
project = None
hgp = (
os.getenv("QISKIT_IBM_STAGING_PRIVATE_HGP", None)
if os.getenv("QISKIT_IBM_USE_STAGING_CREDENTIALS", "")
else os.getenv("QISKIT_IBM_PRIVATE_HGP", None)
)
if not hgp:
raise SkipTest("Requires private provider.")
hub, group, project = hgp.split("/")
return hub, group, project
| 33.401146 | 95 | 0.640474 |
4a2366212abce17d549575f3be126873e77c2a4c | 10,255 | py | Python | log/classification/pointnet_cls_Real_10_norm_z/train_dense_classification.py | congw112358/RAL2022_Code | 5e275cb8b389be745fee1285f58732456146f7af | [
"MIT"
] | null | null | null | log/classification/pointnet_cls_Real_10_norm_z/train_dense_classification.py | congw112358/RAL2022_Code | 5e275cb8b389be745fee1285f58732456146f7af | [
"MIT"
] | null | null | null | log/classification/pointnet_cls_Real_10_norm_z/train_dense_classification.py | congw112358/RAL2022_Code | 5e275cb8b389be745fee1285f58732456146f7af | [
"MIT"
] | null | null | null | """
Author: Benny
Date: Nov 2019
"""
import os
import sys
import torch
import numpy as np
import datetime
import logging
import provider
import importlib
import shutil
import argparse
from pathlib import Path
from tqdm import tqdm
from data_utils.OFFDataLoader import *
# from path import Path
# from data_utils.ModelNetDataLoader import ModelNetDataLoader
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('training')
parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--batch_size', type=int, default=8, help='batch size in training')
parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')
parser.add_argument('--num_category', default=10, type=int, choices=[10, 40], help='training on ModelNet10/40')
parser.add_argument('--epoch', default=100, type=int, help='number of epoch in training')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
parser.add_argument('--log_dir', type=str, default=None, help='experiment root')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate')
parser.add_argument('--use_normals', action='store_true', default=False, help='use normals')
parser.add_argument('--process_data', action='store_true', default=False, help='save data offline')
parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling')
parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3')
return parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace=True
def test(model, loader, num_class=10):
mean_correct = []
class_acc = np.zeros((num_class, 3))
classifier = model.eval()
for j, data in tqdm(enumerate(loader), total=len(loader)):
if not args.use_cpu:
points, target = data['pointcloud'].to(device).float(), data['category'].to(device)
points = points.transpose(2, 1)
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum()
class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0])
class_acc[cat, 1] += 1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1]
class_acc = np.mean(class_acc[:, 2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
exp_dir = Path('./log/')
exp_dir.mkdir(exist_ok=True)
exp_dir = exp_dir.joinpath('classification')
exp_dir.mkdir(exist_ok=True)
if args.log_dir is None:
exp_dir = exp_dir.joinpath(timestr)
else:
exp_dir = exp_dir.joinpath(args.log_dir)
exp_dir.mkdir(exist_ok=True)
checkpoints_dir = exp_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = exp_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
# data_path = 'data/modelnet40_normal_resampled/'
data_path = Path("mesh_data/ModelNet10")
train_transforms = transforms.Compose([
PointSampler(args.num_point, with_normal=args.use_normals),
Normalize(),
RandRotation_z(with_normal=args.use_normals, SO3=args.SO3_Rotation),
RandomNoise(),
ToTensor()
])
test_transforms = transforms.Compose([
PointSampler(args.num_point, with_normal=args.use_normals),
Normalize(),
RandRotation_z(with_normal=args.use_normals, SO3=args.SO3_Rotation),
RandomNoise(),
ToTensor()
])
# train_dataset = ModelNetDataLoader(root=data_path, args=args, split='train', process_data=args.process_data)
# test_dataset = ModelNetDataLoader(root=data_path, args=args, split='test', process_data=args.process_data)
train_dataset = PointCloudData(data_path, transform=train_transforms)
test_dataset = PointCloudData(data_path, valid=True, folder='test', transform=test_transforms)
trainDataLoader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=10)
'''MODEL LOADING'''
num_class = args.num_category
model = importlib.import_module(args.model)
shutil.copy('./models/%s.py' % args.model, str(exp_dir))
shutil.copy('models/pointnet2_utils.py', str(exp_dir))
shutil.copy('./train_dense_classification.py', str(exp_dir))
# shutil.copy('./train_dense_classification.py', str(exp_dir))
classifier = model.get_model(num_class, normal_channel=args.use_normals)
criterion = model.get_loss()
classifier.apply(inplace_relu)
if not args.use_cpu:
classifier = classifier.cuda()
criterion = criterion.cuda()
try:
checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
global_epoch = 0
global_step = 0
best_instance_acc = 0.0
best_class_acc = 0.0
'''TRANING'''
logger.info('Start training...')
end_epoch = start_epoch + args.epoch
print("start epoch: ", start_epoch)
print("end epoch: ", end_epoch)
for epoch in range(start_epoch, end_epoch):
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, end_epoch))
mean_correct = []
classifier = classifier.train()
scheduler.step()
for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
optimizer.zero_grad()
points, target = data['pointcloud'].to(device).float(), data['category'].to(device)
points = points.data.cpu().numpy()
points = provider.random_point_dropout(points)
points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
points = torch.Tensor(points)
points = points.transpose(2, 1)
if not args.use_cpu:
points, target = points.cuda(), target.cuda()
pred, trans_feat = classifier(points)
loss = criterion(pred, target.long(), trans_feat)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
global_step += 1
train_instance_acc = np.mean(mean_correct)
log_string('Train Instance Accuracy: %f' % train_instance_acc)
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader, num_class=num_class)
if (instance_acc >= best_instance_acc):
best_instance_acc = instance_acc
best_epoch = epoch + 1
if (class_acc >= best_class_acc):
best_class_acc = class_acc
log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))
log_string('Best Instance Accuracy: %f, Class Accuracy: %f' % (best_instance_acc, best_class_acc))
if (instance_acc >= best_instance_acc):
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': best_epoch,
'instance_acc': instance_acc,
'class_acc': class_acc,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
if __name__ == '__main__':
# torch.cuda.empty_cache()
args = parse_args()
main(args)
| 38.698113 | 138 | 0.648854 |
4a2366a9b3e13d0701c57377f7083c158540644e | 10,577 | py | Python | lifxlan/tilechain.py | spacelama/lifxlan | 3db9afc1a89b60fc659969f2fa8485caf65b53b5 | [
"MIT"
] | 2 | 2017-02-22T14:42:56.000Z | 2018-11-25T19:33:13.000Z | lifxlan/tilechain.py | spacelama/lifxlan | 3db9afc1a89b60fc659969f2fa8485caf65b53b5 | [
"MIT"
] | 5 | 2018-06-02T05:54:52.000Z | 2021-02-10T21:14:34.000Z | lifxlan/tilechain.py | spacelama/lifxlan | 3db9afc1a89b60fc659969f2fa8485caf65b53b5 | [
"MIT"
] | 1 | 2019-10-30T04:56:54.000Z | 2019-10-30T04:56:54.000Z | import os
from .errors import WorkflowException, InvalidParameterException
from .light import Light
from .msgtypes import GetTileState64, StateTileState64, SetTileState64, GetDeviceChain, StateDeviceChain, SetUserPosition
from threading import Thread
class TileChain(Light):
def __init__(self, mac_addr, ip_addr, service=1, port=56700, source_id=os.getpid(), verbose=False):
super(TileChain, self).__init__(mac_addr, ip_addr, service, port, source_id, verbose)
self.tile_info = None
self.tile_count = None
self.tile_map = None
self.canvas_dimensions = None
self.get_tile_info()
self.get_tile_map()
self.get_canvas_dimensions()
# returns information about all tiles
def get_tile_info(self, refresh_cache=False):
if (self.tile_info == None) or (refresh_cache == True):
response = self.req_with_resp(GetDeviceChain, StateDeviceChain)
tiles = []
for tile in response.tile_devices:
t = Tile(tile["user_x"], tile["user_y"], tile["width"], tile["height"], tile["device_version_vendor"], tile["device_version_product"], tile["device_version_version"], tile["firmware_build"], tile["firmware_version"])
tiles.append(t)
self.tile_info = tiles[:response.total_count]
self.tile_count = response.total_count
return self.tile_info
def get_tile_count(self, refresh_cache=False):
if (self.tile_count == None) or (refresh_cache == True):
response = self.req_with_resp(GetDeviceChain, StateDeviceChain)
self.tile_count = response.total_count
return self.tile_count
def get_tile_colors(self, start_index, tile_count=1, x=0, y=0, width=8):
if (start_index < 0) or (start_index >= self.tile_count):
raise InvalidParameterException("{} is not a valid start_index for TileChain with {} tiles.".format(start_index, self.tile_count))
colors = []
for i in range(tile_count):
payload = {"tile_index": start_index + i,
"length": 1,
"reserved": 0,
"x": x,
"y": y,
"width": width}
response = self.req_with_resp(GetTileState64, StateTileState64, payload)
colors.append(response.colors)
return colors
def get_tilechain_colors(self):
tilechain_colors = []
for i in range(self.tile_count):
tile_colors = self.get_tile_colors(i)
tilechain_colors.append(tile_colors[0])
return tilechain_colors
def set_tile_colors(self, start_index, colors, duration=0, tile_count=1, x=0, y=0, width=8, rapid=False):
if (start_index < 0) or (start_index >= self.tile_count):
raise InvalidParameterException("{} is not a valid start_index for TileChain with {} tiles.".format(start_index, self.tile_count))
payload = {"tile_index": start_index,
"length": tile_count,
"colors": colors,
"duration": duration,
"reserved": 0,
"x": x,
"y": y,
"width": width}
if not rapid:
self.req_with_ack(SetTileState64, payload)
else:
self.fire_and_forget(SetTileState64, payload, num_repeats=1)
def set_tilechain_colors(self, tilechain_colors, duration=0, rapid=False):
threads = []
for i in range(self.tile_count):
t = Thread(target = self.set_tile_colors, args = ((i, tilechain_colors[i], duration, 1, 0, 0, 8, rapid)))
threads.append(t)
t.start()
for t in threads:
t.join()
def recenter_coordinates(self):
num_tiles = self.get_tile_count()
x_vals, y_vals = self.get_xy_vals()
x_vals = self.center_axis(x_vals)
y_vals = self.center_axis(y_vals)
centered_coordinates = list(zip(x_vals, y_vals))
for (tile_index, (user_x, user_y)) in enumerate(centered_coordinates):
self.set_tile_coordinates(tile_index, user_x, user_y)
def project_matrix(self, hsvk_matrix, duration = 0, rapid=False):
num_tiles = self.get_tile_count()
canvas_x, canvas_y = self.get_canvas_dimensions()
matrix_x = len(hsvk_matrix[0])
matrix_y = len(hsvk_matrix)
if (matrix_x != canvas_x) or (matrix_y != canvas_y):
raise InvalidParameterException("Warning: TileChain canvas wants a {} x {} matrix, but given matrix is {} x {}.".format(canvas_x, canvas_y, matrix_x, matrix_y))
tile_width = 8 # hardcoded, argh
tile_height = 8
default_color = (0, 0, 0, 0)
tile_map = self.get_tile_map()
tile_colors = [[default_color for i in range(tile_width * tile_height)] for j in range(num_tiles)]
rows = canvas_y
cols = canvas_x
for row in range(rows):
for col in range(cols):
if tile_map[row][col] != 0:
(tile_num, color_num) = tile_map[row][col]
tile_colors[tile_num][color_num] = hsvk_matrix[row][col]
threads = []
for (i, tile_color) in enumerate(tile_colors):
t = Thread(target = self.set_tile_colors, args = ((i, tile_color, duration, 1, 0, 0, 8, rapid)))
threads.append(t)
t.start()
for t in threads:
t.join()
### HELPER FUNCTIONS
# danger zoooooone
def set_tile_coordinates(self, tile_index, x, y):
self.req_with_ack(SetUserPosition, {"tile_index": tile_index, "reserved": 0, "user_x": x, "user_y": y})
# update cached information
self.get_tile_info(refresh_cache=True)
self.get_tile_map(refresh_cache=True)
self.get_canvas_dimensions(refresh_cache=True)
def get_xy_vals(self):
tiles = self.get_tile_info()
num_tiles = self.get_tile_count()
x_vals = []
y_vals = []
for tile in tiles[:num_tiles]:
x_vals.append(tile.user_x)
y_vals.append(tile.user_y)
x_vals = self.center_axis(x_vals)
y_vals = self.center_axis(y_vals)
return x_vals, y_vals
def center_axis(self, axis_vals):
if 0.0 not in axis_vals:
smallest_val = min([abs(val) for val in axis_vals])
closest_val = 0.0
for val in axis_vals:
if abs(val) == smallest_val:
closest_val = val
axis_vals = [(-1*closest_val) + val for val in axis_vals]
return axis_vals
# all become non-negative -- shifts (0, 0) to the left/top
def shift_axis_upper_left(self, axis_vals, is_y = False):
if is_y:
axis_vals = [-1*val for val in axis_vals]
smallest_val = min(axis_vals)
axis_vals = [(-1*smallest_val) + val for val in axis_vals]
return axis_vals
def get_canvas_dimensions(self, refresh_cache=False):
if (self.canvas_dimensions == None) or (refresh_cache == True):
x_vals, y_vals = self.get_xy_vals()
min_x = min(x_vals)
max_x = max(x_vals)
min_y = min(y_vals)
max_y = max(y_vals)
x_tilespan = (max_x - min_x) + 1
y_tilespan = (max_y - min_y) + 1
tile_width = 8 #TO DO: get these programmatically for each light from the tile info
tile_height = 8 #TO DO: get these programmatically for each light from the tile info
canvas_x = int(x_tilespan * tile_width)
canvas_y = int(y_tilespan * tile_height)
self.canvas_dimensions = (canvas_x, canvas_y)
return self.canvas_dimensions
def get_tile_map(self, refresh_cache=False):
if (self.tile_map == None) or (refresh_cache == True):
num_tiles = self.get_tile_count()
tile_width = 8 #TO DO: get these programmatically for each light from the tile info
tile_height = 8 #TO DO: get these programmatically for each light from the tile info
(x, y) = self.get_canvas_dimensions()
#print(x, y)
tile_map = [[0 for i in range(x)] for j in range(y)]
tiles = self.get_tile_info()
x_vals, y_vals = self.get_xy_vals()
x_vals = self.shift_axis_upper_left(x_vals)
y_vals = self.shift_axis_upper_left(y_vals, is_y=True)
for i in range(num_tiles):
tile = tiles[i]
x_start_tilespan = x_vals[i]
y_start_tilespan = y_vals[i]
#print(i, x_start_tilespan, y_start_tilespan)
x_start_pixel = int(x_start_tilespan * tile_width)
y_start_pixel = int(y_start_tilespan * tile_height)
for j in range(y_start_pixel, y_start_pixel + tile_width):
for k in range(x_start_pixel, x_start_pixel + tile_height):
j0 = j - y_start_pixel
k0 = k - x_start_pixel
tile_map[j][k] = (i, (j0*tile_width + k0))
#for row in tile_map:
# print(row)
self.tile_map = tile_map
return self.tile_map
class Tile(object):
def __init__(self, user_x, user_y, width=8, height=8, device_version_vendor=None, device_version_product=None, device_version_version=None, firmware_build=None, firmware_version=None):
super(Tile, self).__init__()
self.user_x = user_x
self.user_y = user_y
self.width = width
self.height = height
self.device_version_vendor = device_version_vendor
self.device_version_product = device_version_product
self.device_version_version = device_version_version
self.firmware_build = firmware_build
self.firmware_version = firmware_version
def __str__(self):
s = "\nTile at {}, {}:".format(self.user_x, self.user_y)
s += "\n User X: " + str(self.user_x)
s += "\n User Y: " + str(self.user_y)
s += "\n Width: " + str(self.width)
s += "\n Height: " + str(self.height)
s += "\n Device Version Vendor: " + str(self.device_version_vendor)
s += "\n Device Version Product: " + str(self.device_version_product)
s += "\n Device Version Version: " + str(self.device_version_version)
s += "\n Firmware Build: " + str(self.firmware_build)
s += "\n Firmware Version: " + str(self.firmware_version)
return s
| 44.25523 | 232 | 0.606316 |
4a23680470163089ed7d1aad975a657f7c5715bf | 982 | py | Python | faceweb/faceapp/migrations/0009_auto_20200523_0646.py | Dheeraj2407/Face-web | e73696e0ff12fc19a8d88ccaf2a830db4fcbb102 | [
"MIT"
] | null | null | null | faceweb/faceapp/migrations/0009_auto_20200523_0646.py | Dheeraj2407/Face-web | e73696e0ff12fc19a8d88ccaf2a830db4fcbb102 | [
"MIT"
] | null | null | null | faceweb/faceapp/migrations/0009_auto_20200523_0646.py | Dheeraj2407/Face-web | e73696e0ff12fc19a8d88ccaf2a830db4fcbb102 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2020-05-23 06:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('faceapp', '0008_auto_20200522_0825'),
]
operations = [
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=50)),
],
),
migrations.AlterField(
model_name='teacherclass',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='faceapp.Subject'),
),
migrations.AlterField(
model_name='timetable',
name='subject',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='faceapp.Subject'),
),
]
| 30.6875 | 114 | 0.596741 |
4a236887b94e4f916a97466ce9370e4e7030b5af | 1,289 | py | Python | arcsde/forms.py | powderflask/django-arcsde | f7ee70e8a5cf14aeda6af137fa7ab301634fbb85 | [
"MIT"
] | null | null | null | arcsde/forms.py | powderflask/django-arcsde | f7ee70e8a5cf14aeda6af137fa7ab301634fbb85 | [
"MIT"
] | null | null | null | arcsde/forms.py | powderflask/django-arcsde | f7ee70e8a5cf14aeda6af137fa7ab301634fbb85 | [
"MIT"
] | null | null | null | from django import forms
#####################################################################
# Base form class used to share common functionality for SDE report forms.
# This class is ABSTRACT -- must be sub-classed to be of use!
#####################################################################
class AbstractSdeForm(forms.ModelForm):
"""
Uses SDE revision tracking fields to leave an audit trail when form is saved.
Ideally this would be done at the model level... but how to get request.user there at right moment?
"""
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None) # we need the user to track revisions (audit trail) during save
super().__init__(*args, **kwargs)
def set_user(self, user):
""" Set the auth.user who is modifying this form to leave last_edited audit trail on save """
self.user = user
def save(self, commit=True):
""" Try to update the revision / audit trail (last_edited_user) for the model being saved """
try:
self.instance.update_edited_by(self.user.username) if self.user else None
except AttributeError:
pass
# May also want to exclude writing shape fields here?
return super().save(commit) | 42.966667 | 109 | 0.592708 |
4a2368ea43e898904e7cad11db402c095231fd20 | 2,542 | py | Python | Projects/YTdownloader/main.py | Abbhiishek/Python | 3ad5310ca29469f353f9afa99531f01273eec6bd | [
"MIT"
] | 1 | 2022-02-04T07:04:34.000Z | 2022-02-04T07:04:34.000Z | Projects/YTdownloader/main.py | Abbhiishek/Python | 3ad5310ca29469f353f9afa99531f01273eec6bd | [
"MIT"
] | 12 | 2022-02-13T12:10:32.000Z | 2022-02-17T09:36:49.000Z | Projects/YTdownloader/main.py | Abbhiishek/Python | 3ad5310ca29469f353f9afa99531f01273eec6bd | [
"MIT"
] | null | null | null | # libraraies
import pytube
import sys
class YouTubeDownloder:
def __init__(self):
self.url = str(input("Enter the url of video : "))
self.youtube = pytube.YouTube(
self.url, on_progress_callback=YouTubeDownloder.onProgress
)
self.showTitle()
def showTitle(self):
print("title : {0}\n".format(self.youtube.title))
self.showStreams()
def showStreams(self):
self.streamNo = 1
for stream in self.youtube.streams.filter(file_extension='mp4'):
print(
"{0} => resolution:{1} // type:{2} //".format(
self.streamNo, stream.resolution, stream.type
)
)
self.streamNo += 1
self.chooseStream()
def chooseStream(self):
self.choose = int(input("please select one : "))
self.validateChooseValue()
def validateChooseValue(self):
if self.choose in range(1, self.streamNo):
self.getStream()
else:
print("please enter a correct option on the list.")
self.chooseStream()
def getStream(self):
self.stream = self.youtube.streams[self.choose - 1]
self.getFileSize()
def getFileSize(self):
global file_size
file_size = self.stream.filesize / 1000000
self.getPermisionToContinue()
def getPermisionToContinue(self):
print(
"\n title : {0} \n author : {1} \n size : {2:.2f}MB \n resolution : {3} \n fps : {4} \n ".format(
self.youtube.title,
self.youtube.author,
file_size,
self.stream.resolution,
self.stream.fps,
)
)
if input("do you want it ?(defualt = (y)es) or (n)o ") == "n":
self.showStreams()
else:
self.main()
def download(self):
self.stream.download()
@staticmethod
def onProgress(stream=None, chunk=None, remaining=None):
file_downloaded = file_size - (remaining / 1000000)
print(
f"downloading ... {file_downloaded / file_size * 100:0.2f} % [{file_downloaded:.1f}MB of {file_size:.1f}MB]",
end="\r",
)
def main(self):
try:
self.download()
except KeyboardInterrupt:
print("Canceled. ")
sys.exit(0)
if __name__ == "__main__":
try:
YouTubeDownloder()
except KeyboardInterrupt:
pass
except Exception as e:
print(e)
| 27.934066 | 121 | 0.5476 |
4a236948bc6d0086f9be493ef2213c7a54c83aae | 13,841 | py | Python | windows_lnk_file.py | Vector35/kaitai | 71fd8c31289aaeba12f48ae394631f9a56cfe056 | [
"MIT"
] | 20 | 2019-09-28T01:44:58.000Z | 2022-03-09T08:35:56.000Z | windows_lnk_file.py | Vector35/kaitai | 71fd8c31289aaeba12f48ae394631f9a56cfe056 | [
"MIT"
] | 4 | 2020-12-23T01:51:26.000Z | 2021-12-15T14:41:50.000Z | windows_lnk_file.py | Vector35/kaitai | 71fd8c31289aaeba12f48ae394631f9a56cfe056 | [
"MIT"
] | 4 | 2020-02-20T18:47:27.000Z | 2021-06-17T01:24:09.000Z | # This is a generated file! Please edit source .ksy file and use kaitai-struct-compiler to rebuild
from pkg_resources import parse_version
from . import kaitaistruct
from .kaitaistruct import KaitaiStruct, KaitaiStream, BytesIO
from enum import Enum
if parse_version(kaitaistruct.__version__) < parse_version('0.9'):
raise Exception("Incompatible Kaitai Struct Python API: 0.9 or later is required, but you have %s" % (kaitaistruct.__version__))
from . import windows_shell_items
class WindowsLnkFile(KaitaiStruct):
"""Windows .lnk files (AKA "shell link" file) are most frequently used
in Windows shell to create "shortcuts" to another files, usually for
purposes of running a program from some other directory, sometimes
with certain preconfigured arguments and some other options.
.. seealso::
Source - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
class WindowState(Enum):
normal = 1
maximized = 3
min_no_active = 7
class DriveTypes(Enum):
unknown = 0
no_root_dir = 1
removable = 2
fixed = 3
remote = 4
cdrom = 5
ramdisk = 6
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.header = WindowsLnkFile.FileHeader(self._io, self, self._root)
if self.header.flags.has_link_target_id_list:
self.target_id_list = WindowsLnkFile.LinkTargetIdList(self._io, self, self._root)
if self.header.flags.has_link_info:
self.info = WindowsLnkFile.LinkInfo(self._io, self, self._root)
if self.header.flags.has_name:
self.name = WindowsLnkFile.StringData(self._io, self, self._root)
if self.header.flags.has_rel_path:
self.rel_path = WindowsLnkFile.StringData(self._io, self, self._root)
if self.header.flags.has_work_dir:
self.work_dir = WindowsLnkFile.StringData(self._io, self, self._root)
if self.header.flags.has_arguments:
self.arguments = WindowsLnkFile.StringData(self._io, self, self._root)
if self.header.flags.has_icon_location:
self.icon_location = WindowsLnkFile.StringData(self._io, self, self._root)
class LinkTargetIdList(KaitaiStruct):
"""
.. seealso::
Section 2.2 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_id_list = self._io.read_u2le()
self._raw_id_list = self._io.read_bytes(self.len_id_list)
_io__raw_id_list = KaitaiStream(BytesIO(self._raw_id_list))
self.id_list = windows_shell_items.WindowsShellItems(_io__raw_id_list)
class StringData(KaitaiStruct):
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.chars_str = self._io.read_u2le()
self.str = (self._io.read_bytes((self.chars_str * 2))).decode(u"UTF-16LE")
class LinkInfo(KaitaiStruct):
"""
.. seealso::
Section 2.3 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_all = self._io.read_u4le()
self._raw_all = self._io.read_bytes((self.len_all - 4))
_io__raw_all = KaitaiStream(BytesIO(self._raw_all))
self.all = WindowsLnkFile.LinkInfo.All(_io__raw_all, self, self._root)
class VolumeIdBody(KaitaiStruct):
"""
.. seealso::
Section 2.3.1 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.drive_type = KaitaiStream.resolve_enum(WindowsLnkFile.DriveTypes, self._io.read_u4le())
self.drive_serial_number = self._io.read_u4le()
self.ofs_volume_label = self._io.read_u4le()
if self.is_unicode:
self.ofs_volume_label_unicode = self._io.read_u4le()
@property
def is_unicode(self):
if hasattr(self, '_m_is_unicode'):
return self._m_is_unicode if hasattr(self, '_m_is_unicode') else None
self._m_is_unicode = self.ofs_volume_label == 20
return self._m_is_unicode if hasattr(self, '_m_is_unicode') else None
@property
def volume_label_ansi(self):
if hasattr(self, '_m_volume_label_ansi'):
return self._m_volume_label_ansi if hasattr(self, '_m_volume_label_ansi') else None
if not (self.is_unicode):
_pos = self._io.pos()
self._io.seek((self.ofs_volume_label - 4))
self._m_volume_label_ansi = (self._io.read_bytes_term(0, False, True, True)).decode(u"cp437")
self._io.seek(_pos)
return self._m_volume_label_ansi if hasattr(self, '_m_volume_label_ansi') else None
class All(KaitaiStruct):
"""
.. seealso::
Section 2.3 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_header = self._io.read_u4le()
self._raw_header = self._io.read_bytes((self.len_header - 8))
_io__raw_header = KaitaiStream(BytesIO(self._raw_header))
self.header = WindowsLnkFile.LinkInfo.Header(_io__raw_header, self, self._root)
@property
def volume_id(self):
if hasattr(self, '_m_volume_id'):
return self._m_volume_id if hasattr(self, '_m_volume_id') else None
if self.header.flags.has_volume_id_and_local_base_path:
_pos = self._io.pos()
self._io.seek((self.header.ofs_volume_id - 4))
self._m_volume_id = WindowsLnkFile.LinkInfo.VolumeIdSpec(self._io, self, self._root)
self._io.seek(_pos)
return self._m_volume_id if hasattr(self, '_m_volume_id') else None
@property
def local_base_path(self):
if hasattr(self, '_m_local_base_path'):
return self._m_local_base_path if hasattr(self, '_m_local_base_path') else None
if self.header.flags.has_volume_id_and_local_base_path:
_pos = self._io.pos()
self._io.seek((self.header.ofs_local_base_path - 4))
self._m_local_base_path = self._io.read_bytes_term(0, False, True, True)
self._io.seek(_pos)
return self._m_local_base_path if hasattr(self, '_m_local_base_path') else None
class VolumeIdSpec(KaitaiStruct):
"""
.. seealso::
Section 2.3.1 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_all = self._io.read_u4le()
self._raw_body = self._io.read_bytes((self.len_all - 4))
_io__raw_body = KaitaiStream(BytesIO(self._raw_body))
self.body = WindowsLnkFile.LinkInfo.VolumeIdBody(_io__raw_body, self, self._root)
class LinkInfoFlags(KaitaiStruct):
"""
.. seealso::
Section 2.3 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.reserved1 = self._io.read_bits_int_be(6)
self.has_common_net_rel_link = self._io.read_bits_int_be(1) != 0
self.has_volume_id_and_local_base_path = self._io.read_bits_int_be(1) != 0
self.reserved2 = self._io.read_bits_int_be(24)
class Header(KaitaiStruct):
"""
.. seealso::
Section 2.3 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.flags = WindowsLnkFile.LinkInfo.LinkInfoFlags(self._io, self, self._root)
self.ofs_volume_id = self._io.read_u4le()
self.ofs_local_base_path = self._io.read_u4le()
self.ofs_common_net_rel_link = self._io.read_u4le()
self.ofs_common_path_suffix = self._io.read_u4le()
if not (self._io.is_eof()):
self.ofs_local_base_path_unicode = self._io.read_u4le()
if not (self._io.is_eof()):
self.ofs_common_path_suffix_unicode = self._io.read_u4le()
class LinkFlags(KaitaiStruct):
"""
.. seealso::
Section 2.1.1 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.is_unicode = self._io.read_bits_int_be(1) != 0
self.has_icon_location = self._io.read_bits_int_be(1) != 0
self.has_arguments = self._io.read_bits_int_be(1) != 0
self.has_work_dir = self._io.read_bits_int_be(1) != 0
self.has_rel_path = self._io.read_bits_int_be(1) != 0
self.has_name = self._io.read_bits_int_be(1) != 0
self.has_link_info = self._io.read_bits_int_be(1) != 0
self.has_link_target_id_list = self._io.read_bits_int_be(1) != 0
self._unnamed8 = self._io.read_bits_int_be(16)
self.reserved = self._io.read_bits_int_be(5)
self.keep_local_id_list_for_unc_target = self._io.read_bits_int_be(1) != 0
self._unnamed11 = self._io.read_bits_int_be(2)
class FileHeader(KaitaiStruct):
"""
.. seealso::
Section 2.1 - https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/[MS-SHLLINK].pdf
"""
def __init__(self, _io, _parent=None, _root=None):
self._io = _io
self._parent = _parent
self._root = _root if _root else self
self._read()
def _read(self):
self.len_header = self._io.read_bytes(4)
if not self.len_header == b"\x4C\x00\x00\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x4C\x00\x00\x00", self.len_header, self._io, u"/types/file_header/seq/0")
self.link_clsid = self._io.read_bytes(16)
if not self.link_clsid == b"\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46":
raise kaitaistruct.ValidationNotEqualError(b"\x01\x14\x02\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46", self.link_clsid, self._io, u"/types/file_header/seq/1")
self._raw_flags = self._io.read_bytes(4)
_io__raw_flags = KaitaiStream(BytesIO(self._raw_flags))
self.flags = WindowsLnkFile.LinkFlags(_io__raw_flags, self, self._root)
self.file_attrs = self._io.read_u4le()
self.time_creation = self._io.read_u8le()
self.time_access = self._io.read_u8le()
self.time_write = self._io.read_u8le()
self.target_file_size = self._io.read_u4le()
self.icon_index = self._io.read_s4le()
self.show_command = KaitaiStream.resolve_enum(WindowsLnkFile.WindowState, self._io.read_u4le())
self.hotkey = self._io.read_u2le()
self.reserved = self._io.read_bytes(10)
if not self.reserved == b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00":
raise kaitaistruct.ValidationNotEqualError(b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", self.reserved, self._io, u"/types/file_header/seq/11")
| 43.11838 | 183 | 0.60328 |
4a236a9365c81d4df17e6ded30959695d65cb8ff | 994 | py | Python | self-learning/based/ifoperator.py | vladspirin/python-learning | 6b005fb28f96c0d610348a0b5f8830f94c53075f | [
"Unlicense"
] | 1 | 2018-10-23T14:50:43.000Z | 2018-10-23T14:50:43.000Z | self-learning/based/ifoperator.py | vladspirin/python-learning | 6b005fb28f96c0d610348a0b5f8830f94c53075f | [
"Unlicense"
] | null | null | null | self-learning/based/ifoperator.py | vladspirin/python-learning | 6b005fb28f96c0d610348a0b5f8830f94c53075f | [
"Unlicense"
] | null | null | null | # True
# False
# x = 0
# if x:
# print('Переменная x вернула истину')
# else:
# print('Переменная x вернула ложь')
# if 1:
# print('Выражение истинно')
# else:
# print('Выражение ложно')
# light = 'red'
# if light == 'red':
# print("Stop")
# elif light == 'yellow':
# print('Wait')
# elif light == 'green':
# print('Go')
# else:
# print('What?')
# age = int(input('Сколько Вам лет? '))
#
# if age >= 18:
# print('Добро пожаловать!')
# else:
# print(f'Вам {age} лет, не хватает {18 - age}')
# operator or
# time = 11
# if time < 12 or time > 13:
# print('Open')
# else:
# print('Close')
# operator and
# time = 8
# day = 'Saturday'
#
# if time >= 8 and day != 'Sunday':
# print('Open')
# else:
# print('Close')
# инвертирование выражения
# x = 1
# if not x:
# print('OK')
# else:
# print('NO')
# подобие тернарного оператора, в Python его нет
x = 0
# result = 'OK' if x else 'NO'
# print(result)
print('OK' if x else 'NO')
| 16.566667 | 52 | 0.551308 |
4a236aaa3bbb5608fc0df6a11fe26c8c1cf12181 | 3,769 | py | Python | chat_mysite/settings.py | xiaoqiao99/chat | ca65ed25fbc277828390b890a50ecadf4675cfb4 | [
"MIT"
] | 2 | 2019-06-21T10:30:18.000Z | 2019-07-12T07:46:25.000Z | chat_mysite/settings.py | xiaoqiao99/chat | ca65ed25fbc277828390b890a50ecadf4675cfb4 | [
"MIT"
] | 8 | 2020-06-05T19:56:53.000Z | 2022-03-11T23:41:44.000Z | chat_mysite/settings.py | xiaoqiao99/chat | ca65ed25fbc277828390b890a50ecadf4675cfb4 | [
"MIT"
] | 3 | 2020-03-13T03:22:40.000Z | 2020-07-03T03:03:02.000Z | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r_pd)4!rn1#x9by)h+#(v28n8k)bnyk&1$!qr9*fkl%az@2zc^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'chat',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
ASGI_APPLICATION = 'chat_mysite.routing.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
},
},
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chat_mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chat_mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'my_test',
'PORT': '3306',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
#开发
# 'USER': 'kaifa',
# 'PASSWORD': 'kaifazufir2018518',
# 'HOST': '192.168.1.214',
# 线上
#'NAME': 'cyprex',
# 'USER': 'root',
# 'PASSWORD': 'WYRGy2XwS#oMns^9',
# 'HOST': '106.75.225.101',
# 'PORT': '5041'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| 25.295302 | 91 | 0.651897 |
4a236c3083672d986c35bbfd3d70550434f9b6c8 | 946 | py | Python | uncertainty_baselines/models/movielens_test.py | athon-millane/uncertainty-baselines | aa504fc51aac6d4cac47dbd34aa672c670dfbd28 | [
"Apache-2.0"
] | 2 | 2022-02-22T10:22:46.000Z | 2022-03-09T09:22:41.000Z | uncertainty_baselines/models/movielens_test.py | PGM-Lab/2022-AISTATS-diversity | 63df2e5f29cdaefe49626439bbe13289f37eed36 | [
"Apache-2.0"
] | null | null | null | uncertainty_baselines/models/movielens_test.py | PGM-Lab/2022-AISTATS-diversity | 63df2e5f29cdaefe49626439bbe13289f37eed36 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for uncertainty_baselines.models.criteo_mlp."""
import tensorflow as tf
import uncertainty_baselines as ub
class MovieLensTest(tf.test.TestCase):
def testCreateModel(self):
model = ub.models.MovieLensBuilder(31)
self.assertLen(model.layers, 8)
if __name__ == '__main__':
tf.test.main()
| 29.5625 | 74 | 0.756871 |
4a236dbc34dc3ea36b4c3a7ad9f690e355ace920 | 18,676 | py | Python | xclim/indices/generic.py | ClimateImpactLab/xclim | b4877050223c58afc1089961a2a516bda26d3917 | [
"Apache-2.0"
] | null | null | null | xclim/indices/generic.py | ClimateImpactLab/xclim | b4877050223c58afc1089961a2a516bda26d3917 | [
"Apache-2.0"
] | 1 | 2021-03-29T18:39:50.000Z | 2021-04-05T19:16:05.000Z | xclim/indices/generic.py | ClimateImpactLab/xclim | b4877050223c58afc1089961a2a516bda26d3917 | [
"Apache-2.0"
] | 1 | 2021-03-02T20:12:28.000Z | 2021-03-02T20:12:28.000Z | # -*- coding: utf-8 -*-
# noqa: D205,D400
"""
Generic indices submodule
=========================
Helper functions for common generic actions done in the computation of indices.
"""
from typing import Union
import numpy as np
import xarray as xr
from xclim.core.calendar import get_calendar
from xclim.core.units import convert_units_to, pint2cfunits, str2pint, to_agg_units
from . import run_length as rl
# __all__ = [
# "select_time",
# "select_resample_op",
# "doymax",
# "doymin",
# "default_freq",
# "threshold_count",
# "get_daily_events",
# "daily_downsampler",
# ]
binary_ops = {">": "gt", "<": "lt", ">=": "ge", "<=": "le", "==": "eq", "!=": "ne"}
def select_time(da: xr.DataArray, **indexer):
"""Select entries according to a time period.
Parameters
----------
da : xr.DataArray
Input data.
**indexer : {dim: indexer, }, optional
Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,
month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are
considered.
Returns
-------
xr.DataArray
Selected input values.
"""
if not indexer:
selected = da
else:
key, val = indexer.popitem()
time_att = getattr(da.time.dt, key)
selected = da.sel(time=time_att.isin(val)).dropna(dim="time")
return selected
def select_resample_op(da: xr.DataArray, op: str, freq: str = "YS", **indexer):
"""Apply operation over each period that is part of the index selection.
Parameters
----------
da : xr.DataArray
Input data.
op : str {'min', 'max', 'mean', 'std', 'var', 'count', 'sum', 'argmax', 'argmin'} or func
Reduce operation. Can either be a DataArray method or a function that can be applied to a DataArray.
freq : str
Resampling frequency defining the periods as defined in
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling.
**indexer : {dim: indexer, }, optional
Time attribute and values over which to subset the array. For example, use season='DJF' to select winter values,
month=1 to select January, or month=[6,7,8] to select summer months. If not indexer is given, all values are
considered.
Returns
-------
xarray.DataArray
The maximum value for each period.
"""
da = select_time(da, **indexer)
r = da.resample(time=freq, keep_attrs=True)
if isinstance(op, str):
return getattr(r, op)(dim="time", keep_attrs=True)
return r.map(op)
def doymax(da: xr.DataArray) -> xr.DataArray:
"""Return the day of year of the maximum value."""
i = da.argmax(dim="time")
out = da.time.dt.dayofyear[i]
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(da))
return out
def doymin(da: xr.DataArray) -> xr.DataArray:
"""Return the day of year of the minimum value."""
i = da.argmin(dim="time")
out = da.time.dt.dayofyear[i]
out.attrs.update(units="", is_dayofyear=1, calendar=get_calendar(da))
return out
def default_freq(**indexer) -> str:
"""Return the default frequency."""
freq = "AS-JAN"
if indexer:
group, value = indexer.popitem()
if "DJF" in value:
freq = "AS-DEC"
if group == "month" and sorted(value) != value:
raise NotImplementedError
return freq
def get_op(op: str):
"""Get python's comparing function according to its name of representation.
Accepted op string are keys and values of xclim.indices.generic.binary_ops.
"""
if op in binary_ops:
op = binary_ops[op]
elif op in binary_ops.values():
pass
else:
raise ValueError(f"Operation `{op}` not recognized.")
return xr.core.ops.get_op(op)
def compare(da: xr.DataArray, op: str, thresh: Union[float, int]) -> xr.DataArray:
"""Compare a dataArray to a threshold using given operator.
Parameters
----------
da : xr.DataArray
Input data.
op : {">", "<", ">=", "<=", "gt", "lt", "ge", "le"}
Logical operator {>, <, >=, <=, gt, lt, ge, le }. e.g. arr > thresh.
thresh : Union[float, int]
Threshold value.
Returns
-------
xr.DataArray
Boolean mask of the comparison.
"""
return get_op(op)(da, thresh)
def threshold_count(
da: xr.DataArray, op: str, thresh: Union[float, int], freq: str
) -> xr.DataArray:
"""Count number of days where value is above or below threshold.
Parameters
----------
da : xr.DataArray
Input data.
op : {">", "<", ">=", "<=", "gt", "lt", "ge", "le"}
Logical operator {>, <, >=, <=, gt, lt, ge, le }. e.g. arr > thresh.
thresh : Union[float, int]
Threshold value.
freq : str
Resampling frequency defining the periods as defined in
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling.
Returns
-------
xr.DataArray
The number of days meeting the constraints for each period.
"""
c = compare(da, op, thresh) * 1
return c.resample(time=freq).sum(dim="time")
def domain_count(da: xr.DataArray, low: float, high: float, freq: str) -> xr.DataArray:
"""Count number of days where value is within low and high thresholds.
A value is counted if it is larger than `low`, and smaller or equal to `high`, i.e. in `]low, high]`.
Parameters
----------
da : xr.DataArray
Input data.
low : float
Minimum threshold value.
high : float
Maximum threshold value.
freq : str
Resampling frequency defining the periods
defined in http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling.
Returns
-------
xr.DataArray
The number of days where value is within [low, high] for each period.
"""
c = compare(da, ">", low) * compare(da, "<=", high) * 1
return c.resample(time=freq).sum(dim="time")
def get_daily_events(da: xr.DataArray, da_value: float, operator: str) -> xr.DataArray:
r"""Return a 0/1 mask when a condition is True or False.
the function returns 1 where operator(da, da_value) is True
0 where operator(da, da_value) is False
nan where da is nan
Parameters
----------
da : xr.DataArray
da_value : float
operator : {">", "<", ">=", "<=", "gt", "lt", "ge", "le"}
Logical operator {>, <, >=, <=, gt, lt, ge, le}. e.g. arr > thresh.
Returns
-------
xr.DataArray
"""
func = getattr(da, "_binary_op")(get_op(operator))
events = func(da, da_value) * 1
events = events.where(~(np.isnan(da)))
events = events.rename("events")
return events
def daily_downsampler(da: xr.DataArray, freq: str = "YS") -> xr.DataArray:
r"""Daily climate data downsampler.
Parameters
----------
da : xr.DataArray
freq : str
Returns
-------
xr.DataArray
Note
----
Usage Example
grouper = daily_downsampler(da_std, freq='YS')
x2 = grouper.mean()
# add time coords to x2 and change dimension tags to time
time1 = daily_downsampler(da_std.time, freq=freq).first()
x2.coords['time'] = ('tags', time1.values)
x2 = x2.swap_dims({'tags': 'time'})
x2 = x2.sortby('time')
"""
# generate tags from da.time and freq
if isinstance(da.time.values[0], np.datetime64):
years = [f"{y:04d}" for y in da.time.dt.year.values]
months = [f"{m:02d}" for m in da.time.dt.month.values]
else:
# cannot use year, month, season attributes, not available for all calendars ...
years = [f"{v.year:04d}" for v in da.time.values]
months = [f"{v.month:02d}" for v in da.time.values]
seasons = [
"DJF DJF MAM MAM MAM JJA JJA JJA SON SON SON DJF".split()[int(m) - 1]
for m in months
]
n_t = da.time.size
if freq == "YS":
# year start frequency
l_tags = years
elif freq == "MS":
# month start frequency
l_tags = [years[i] + months[i] for i in range(n_t)]
elif freq == "QS-DEC":
# DJF, MAM, JJA, SON seasons
# construct tags from list of season+year, increasing year for December
ys = []
for i in range(n_t):
m = months[i]
s = seasons[i]
y = years[i]
if m == "12":
y = str(int(y) + 1)
ys.append(y + s)
l_tags = ys
else:
raise RuntimeError(f"Frequency `{freq}` not implemented.")
# add tags to buffer DataArray
buffer = da.copy()
buffer.coords["tags"] = ("time", l_tags)
# return groupby according to tags
return buffer.groupby("tags")
# CF-INDEX-META Indices
def count_level_crossings(
low_data: xr.DataArray, high_data: xr.DataArray, threshold: str, freq: str
) -> xr.DataArray:
"""Calculate the number of times low_data is below threshold while high_data is above threshold.
First, the threshold is transformed to the same standard_name and units as the input data,
then the thresholding is performed, and finally, the number of occurrences is counted.
Parameters
----------
low_data: xr.DataArray
Variable that must be under the threshold.
high_data: xr.DataArray
Variable that must be above the threshold.
threshold: str
Quantity.
freq: str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
# Convert units to low_data
high_data = convert_units_to(high_data, low_data)
threshold = convert_units_to(threshold, low_data)
lower = compare(low_data, "<", threshold)
higher = compare(high_data, ">=", threshold)
out = (lower & higher).resample(time=freq).sum()
return to_agg_units(out, low_data, "count", dim="time")
def count_occurrences(
data: xr.DataArray, threshold: str, condition: str, freq: str
) -> xr.DataArray:
"""Calculate the number of times some condition is met.
First, the threshold is transformed to the same standard_name and units as the input data.
Then the thresholding is performed as condition(data, threshold),
i.e. if condition is `<`, then this counts the number of times `data < threshold`.
Finally, count the number of occurrences when condition is met.
Parameters
----------
data : xr.DataArray
threshold : str
Quantity.
condition : {">", "<", ">=", "<=", "==", "!="}
Operator.
freq: str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
threshold = convert_units_to(threshold, data)
cond = compare(data, condition, threshold)
out = cond.resample(time=freq).sum()
return to_agg_units(out, data, "count", dim="time")
def diurnal_temperature_range(
low_data: xr.DataArray, high_data: xr.DataArray, freq: str
) -> xr.DataArray:
"""Calculate the average diurnal temperature range.
Parameters
----------
low_data : xr.DataArray
Lowest daily temperature (tasmin).
high_data : xr.DataArray
Highest daily temperature (tasmax).
freq: str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
high_data = convert_units_to(high_data, low_data)
dtr = high_data - low_data
out = dtr.resample(time=freq).mean()
u = str2pint(low_data.units)
out.attrs["units"] = pint2cfunits(u - u)
return out
def first_occurrence(
data: xr.DataArray, threshold: str, condition: str, freq: str
) -> xr.DataArray:
"""Calculate the first time some condition is met.
First, the threshold is transformed to the same standard_name and units as the input data.
Then the thresholding is performed as condition(data, threshold), i.e. if condition is <, data < threshold.
Finally, locate the first occurrence when condition is met.
Parameters
----------
data : xr.DataArray
threshold : str
Quantity
condition : {">", "<", ">=", "<=", "==", "!="}
Operator
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
threshold = convert_units_to(threshold, data)
cond = compare(data, condition, threshold)
out = cond.resample(time=freq).map(
rl.first_run,
window=1,
dim="time",
coord="dayofyear",
)
out.attrs["units"] = ""
return out
def last_occurrence(
data: xr.DataArray, threshold: str, condition: str, freq: str
) -> xr.DataArray:
"""Calculate the last time some condition is met.
First, the threshold is transformed to the same standard_name and units as the input data.
Then the thresholding is performed as condition(data, threshold), i.e. if condition is <, data < threshold.
Finally, locate the last occurrence when condition is met.
Parameters
----------
data : xr.DataArray
threshold : str
Quantity
condition : {">", "<", ">=", "<=", "==", "!="}
Operator
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
threshold = convert_units_to(threshold, data)
cond = compare(data, condition, threshold)
out = cond.resample(time=freq).map(
rl.last_run,
window=1,
dim="time",
coord="dayofyear",
)
out.attrs["units"] = ""
return out
def spell_length(
data: xr.DataArray, threshold: str, condition: str, reducer: str, freq: str
) -> xr.DataArray:
"""Calculate statistics on lengths of spells.
First, the threshold is transformed to the same standard_name and units as the input data.
Then the thresholding is performed as condition(data, threshold), i.e. if condition is <, data < threshold.
Then the spells are determined, and finally the statistics according to the specified reducer are calculated.
Parameters
----------
data : xr.DataArray
threshold : str
Quantity.
condition : {">", "<", ">=", "<=", "==", "!="}
Operator
reducer : {'maximum', 'minimum', 'mean', 'sum'}
Reducer.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
threshold = convert_units_to(threshold, data)
cond = compare(data, condition, threshold)
out = cond.resample(time=freq).map(
rl.rle_statistics,
reducer=reducer,
dim="time",
)
return to_agg_units(out, data, "count")
def statistics(data: xr.DataArray, reducer: str, freq: str) -> xr.DataArray:
"""Calculate a simple statistic of the data.
Parameters
----------
data : xr.DataArray
reducer : {'maximum', 'minimum', 'mean', 'sum'}
Reducer.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
out = getattr(data.resample(time=freq), reducer)()
out.attrs["units"] = data.attrs["units"]
return out
def thresholded_statistics(
data: xr.DataArray, threshold: str, condition: str, reducer: str, freq: str
) -> xr.DataArray:
"""Calculate a simple statistic of the data for which some condition is met.
First, the threshold is transformed to the same standard_name and units as the input data.
Then the thresholding is performed as condition(data, threshold), i.e. if condition is <, data < threshold.
Finally, the statistic is calculated for those data values that fulfil the condition.
Parameters
----------
data : xr.DataArray
threshold : str
Quantity.
condition : {">", "<", ">=", "<=", "==", "!="}
Operator
reducer : {'maximum', 'minimum', 'mean', 'sum'}
Reducer.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
threshold = convert_units_to(threshold, data)
cond = compare(data, condition, threshold)
out = getattr(data.where(cond).resample(time=freq), reducer)()
out.attrs["units"] = data.attrs["units"]
return out
def temperature_sum(
data: xr.DataArray, threshold: str, condition: str, freq: str
) -> xr.DataArray:
"""Calculate the temperature sum above/below a threshold.
First, the threshold is transformed to the same standard_name and units as the input data.
Then the thresholding is performed as condition(data, threshold), i.e. if condition is <, data < threshold.
Finally, the sum is calculated for those data values that fulfil the condition after subtraction of the threshold value.
If the sum is for values below the threshold the result is multiplied by -1.
Parameters
----------
data : xr.DataArray
threshold : str
Quantity
condition : {">", "<", ">=", "<=", "==", "!="}
Operator
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
threshold = convert_units_to(threshold, data)
cond = compare(data, condition, threshold)
direction = -1 if "<" in condition else 1
out = (data - threshold).where(cond).resample(time=freq).sum()
out = direction * out
return to_agg_units(out, data, "delta_prod")
def interday_diurnal_temperature_range(
low_data: xr.DataArray, high_data: xr.DataArray, freq: str
) -> xr.DataArray:
"""Calculate the average absolute day-to-day difference in diurnal temperature range.
Parameters
----------
low_data : xr.DataArray
Lowest daily temperature (tasmin).
high_data : xr.DataArray
Highest daily temperature (tasmax).
freq: str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
high_data = convert_units_to(high_data, low_data)
vdtr = abs((high_data - low_data).diff(dim="time"))
out = vdtr.resample(time=freq).mean(dim="time")
u = str2pint(low_data.units)
out.attrs["units"] = pint2cfunits(u - u)
return out
def extreme_temperature_range(
low_data: xr.DataArray, high_data: xr.DataArray, freq: str
) -> xr.DataArray:
"""Calculate the extreme temperature range as the maximum of daily maximum temperature minus the minimum of daily minimum temperature.
Parameters
----------
low_data : xr.DataArray
Lowest daily temperature (tasmin).
high_data : xr.DataArray
Highest daily temperature (tasmax).
freq: str
Resampling frequency.
Returns
-------
xarray.DataArray
"""
high_data = convert_units_to(high_data, low_data)
out = (high_data - low_data).resample(time=freq).mean()
u = str2pint(low_data.units)
out.attrs["units"] = pint2cfunits(u - u)
return out
| 28.776579 | 138 | 0.619779 |
4a236dd07d7f94721876a25c4653c4c28aaea8e2 | 14,858 | py | Python | theano/sandbox/multinomial.py | janchorowski/Theano | b46e24dcf37727f688057a55c57bf939f3f05c07 | [
"BSD-3-Clause"
] | 1 | 2015-04-24T17:09:10.000Z | 2015-04-24T17:09:10.000Z | theano/sandbox/multinomial.py | janchorowski/Theano | b46e24dcf37727f688057a55c57bf939f3f05c07 | [
"BSD-3-Clause"
] | null | null | null | theano/sandbox/multinomial.py | janchorowski/Theano | b46e24dcf37727f688057a55c57bf939f3f05c07 | [
"BSD-3-Clause"
] | null | null | null | import numpy
import theano
from theano import Op, Apply
import theano.tensor as T
from theano.gof import local_optimizer
from theano.tensor import NotScalarConstantError, get_scalar_constant_value
from theano.scalar import as_scalar
from theano.sandbox.cuda import cuda_available, GpuOp
if cuda_available:
from theano.sandbox.cuda import CudaNdarrayType
from theano.sandbox.cuda.basic_ops import host_from_gpu, gpu_from_host
from theano.sandbox.cuda.opt import register_opt
class MultinomialFromUniform(Op):
"""
Converts samples from a uniform into sample from a multinomial.
"""
__props__ = ("odtype",)
def __init__(self, odtype):
self.odtype = odtype
def __str__(self):
return '%s{%s}' % (self.__class__.__name__, self.odtype)
def __setstate__(self, dct):
self.__dict__.update(dct)
try:
self.odtype
except AttributeError:
self.odtype = 'auto'
def make_node(self, pvals, unis, n=1):
pvals = T.as_tensor_variable(pvals)
unis = T.as_tensor_variable(unis)
if pvals.ndim != 2:
raise NotImplementedError('pvals ndim should be 2', pvals.ndim)
if unis.ndim != 1:
raise NotImplementedError('unis ndim should be 1', unis.ndim)
if self.odtype == 'auto':
odtype = pvals.dtype
else:
odtype = self.odtype
out = T.tensor(dtype=odtype, broadcastable=pvals.type.broadcastable)
return Apply(self, [pvals, unis, as_scalar(n)], [out])
def grad(self, ins, outgrads):
pvals, unis, n = ins
(gz,) = outgrads
return [T.zeros_like(x) for x in ins]
def c_code_cache_version(self):
return (8,)
def c_code(self, node, name, ins, outs, sub):
# support old pickled graphs
if len(ins) == 2:
(pvals, unis) = ins
n = 1
else:
(pvals, unis, n) = ins
(z,) = outs
if self.odtype == 'auto':
t = "PyArray_TYPE(%(pvals)s)" % locals()
else:
t = theano.scalar.Scalar(self.odtype).dtype_specs()[1]
if t.startswith('theano_complex'):
t = t.replace('theano_complex', 'NPY_COMPLEX')
else:
t = t.upper()
fail = sub['fail']
return """
if (PyArray_NDIM(%(pvals)s) != 2)
{
PyErr_Format(PyExc_TypeError, "pvals wrong rank");
%(fail)s;
}
if (PyArray_NDIM(%(unis)s) != 1)
{
PyErr_Format(PyExc_TypeError, "unis wrong rank");
%(fail)s;
}
if (PyArray_DIMS(%(unis)s)[0] != (PyArray_DIMS(%(pvals)s)[0] * %(n)s))
{
PyErr_Format(PyExc_ValueError, "unis.shape[0] != pvals.shape[0] * n");
%(fail)s;
}
if ((NULL == %(z)s)
|| ((PyArray_DIMS(%(z)s))[0] != (PyArray_DIMS(%(pvals)s))[0])
|| ((PyArray_DIMS(%(z)s))[1] != (PyArray_DIMS(%(pvals)s))[1])
)
{
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*) PyArray_EMPTY(2,
PyArray_DIMS(%(pvals)s),
%(t)s,
0);
if (!%(z)s)
{
PyErr_SetString(PyExc_MemoryError, "failed to alloc z output");
%(fail)s;
}
}
{ // NESTED SCOPE
const int nb_multi = PyArray_DIMS(%(pvals)s)[0];
const int nb_outcomes = PyArray_DIMS(%(pvals)s)[1];
const int n_samples = %(n)s;
//
// For each multinomial, loop over each possible outcome
//
for (int c = 0; c < n_samples; ++c){
for (int n = 0; n < nb_multi; ++n)
{
int waiting = 1;
double cummul = 0.;
const dtype_%(unis)s* unis_n = (dtype_%(unis)s*)PyArray_GETPTR1(%(unis)s, c*nb_multi + n);
for (int m = 0; m < nb_outcomes; ++m)
{
dtype_%(z)s* z_nm = (dtype_%(z)s*)PyArray_GETPTR2(%(z)s, n,m);
const dtype_%(pvals)s* pvals_nm = (dtype_%(pvals)s*)PyArray_GETPTR2(%(pvals)s, n,m);
cummul += *pvals_nm;
if (c == 0)
{
if (waiting && (cummul > *unis_n))
{
*z_nm = 1.;
waiting = 0;
}
else
{
// if we re-used old z pointer, we have to clear it out.
*z_nm = 0.;
}
}
else {
if (cummul > *unis_n)
{
*z_nm = *z_nm + 1.;
break;
}
}
}
}
}
} // END NESTED SCOPE
""" % locals()
def perform(self, node, ins, outs):
# support old pickled graphs
if len(ins) == 2:
(pvals, unis) = ins
n_samples = 1
else:
(pvals, unis, n_samples) = ins
(z,) = outs
if unis.shape[0] != pvals.shape[0] * n_samples:
raise ValueError("unis.shape[0] != pvals.shape[0] * n_samples",
unis.shape[0], pvals.shape[0], n_samples)
if z[0] is None or z[0].shape != pvals.shape:
z[0] = numpy.zeros(pvals.shape, dtype=node.outputs[0].dtype)
nb_multi = pvals.shape[0]
nb_outcomes = pvals.shape[1]
# For each multinomial, loop over each possible outcome
for c in range(n_samples):
for n in range(nb_multi):
waiting = True
cummul = 0
unis_n = unis[c * nb_multi + n]
for m in range(nb_outcomes):
cummul += pvals[n, m]
if c == 0:
if (waiting and (cummul > unis_n)):
z[0][n, m] = 1
waiting = False
else:
z[0][n, m] = 0
else:
if (cummul > unis_n):
z[0][n, m] += 1
break
class GpuMultinomialFromUniform(MultinomialFromUniform, GpuOp):
"""
The output is transposed compared to MultinomialFromUniform.
We must insert a Transpose op after it.
The optimization that moves it to the gpu does it.
"""
def make_node(self, pvals, unis):
assert pvals.dtype == 'float32'
assert unis.dtype == 'float32'
if not isinstance(pvals.type, CudaNdarrayType):
raise TypeError('pvals must be cudandarray', pvals)
if not isinstance(unis.type, CudaNdarrayType):
raise TypeError('unis must be cudandarray', unis)
if self.odtype == 'auto':
odtype = pvals.dtype
else:
odtype = self.odtype
if odtype != pvals.dtype:
raise NotImplementedError(
'GpuMultinomialFromUniform works only if '
'self.odtype == pvals.dtype', odtype, pvals.dtype)
br = (pvals.broadcastable[1], pvals.broadcastable[0])
out = CudaNdarrayType(broadcastable=br)()
return Apply(self, [pvals, unis], [out])
def perform(self, node, ins, outs):
# The perform from parent don't work with CudaNdarray. We
# don't need it as DebugMode will test again it as an
# optimization insert the GPU op.
return Op.perform(self, node, ins, outs)
def c_code_cache_version(self):
return (8,)
def c_support_code_apply(self, node, nodename):
return """
static __global__ void k_multi_warp_%(nodename)s(
const int nb_multi,
const int nb_outcomes,
float * global_pvals,
const int pvals_row_stride,
const int pvals_col_stride,
float * global_unis,
const int unis_stride,
float * global_outs,
const int outs_row_stride,
const int outs_col_stride
)
{
// each thread takes care of one multinomial draw
int n = blockDim.x*blockIdx.x + threadIdx.x;
if (n < nb_multi)
{
float cummul = 0.;
bool done = false;
const float unis_n = global_unis[n*unis_stride];
for (int m = 0; m < nb_outcomes; ++m)
{
float current_out = 0.;
if (!done)
{
cummul += global_pvals[m * pvals_col_stride + n * pvals_row_stride];
if (unis_n < cummul)
{
current_out = 1.;
done = true;
}
}
//write out transposed for speed.
global_outs[n * outs_col_stride + m * outs_row_stride] = current_out;
}
}
}
""" % locals()
def c_code(self, node, name, ins, outs, sub):
(pvals, unis) = ins
(z,) = outs
fail = sub['fail']
return """
if (CudaNdarray_NDIM(%(pvals)s) != 2)
{
PyErr_Format(PyExc_TypeError, "pvals wrong rank");
%(fail)s;
}
if (CudaNdarray_NDIM(%(unis)s) != 1)
{
PyErr_Format(PyExc_TypeError, "unis wrong rank");
%(fail)s;
}
if (CudaNdarray_HOST_DIMS(%(unis)s)[0] != CudaNdarray_HOST_DIMS(%(pvals)s)[0])
{
PyErr_Format(PyExc_ValueError, "unis.shape[0] != pvals.shape[0]");
%(fail)s;
}
//N.B. that the output is TRANSPOSED compared with pvals
if ((NULL == %(z)s)
|| (CudaNdarray_HOST_DIMS(%(z)s)[0] != CudaNdarray_HOST_DIMS(%(pvals)s)[1])
|| (CudaNdarray_HOST_DIMS(%(z)s)[1] != CudaNdarray_HOST_DIMS(%(pvals)s)[0]))
{
Py_XDECREF(%(z)s);
npy_intp dims[2];
dims[0] = (CudaNdarray_HOST_DIMS(%(pvals)s)[1]);
dims[1] = (CudaNdarray_HOST_DIMS(%(pvals)s)[0]);
%(z)s = (CudaNdarray*)CudaNdarray_NewDims(2, dims);
if (!%(z)s)
{
PyErr_SetString(PyExc_MemoryError, "failed to alloc z output");
%(fail)s;
}
}
{ // NESTED SCOPE
int nb_multi = CudaNdarray_HOST_DIMS(%(pvals)s)[0];
int nb_outcomes = CudaNdarray_HOST_DIMS(%(pvals)s)[1];
//TODO : change this for a beautiful constant
int max_nb_blocks = 2<<15 - 1;
int nb_blocks = max_nb_blocks + 1;
int nb_threads=16; // so it really starts at 32, because of the *2
do
{
nb_threads*=2;
if (nb_multi %% nb_threads == 0)
nb_blocks = nb_multi/nb_threads;
else
nb_blocks = (int)((float)nb_multi/(float)nb_threads + 1.);
} while (nb_blocks > max_nb_blocks);
//printf("\\nN=%%i b=%%i t=%%i t*b=%%i", nb_multi, nb_blocks, nb_threads, nb_blocks*nb_threads);
// TODO : next line is a bit hardcoded...
if (nb_threads > 512)
{
PyErr_Format(PyExc_ValueError, "Mutinomial is not implemented for so many rows in the matrix (%%i)", nb_multi);
%(fail)s;
}
dim3 n_blocks(nb_blocks,1,1);
dim3 n_threads(nb_threads,1,1);
int n_shared = 0;
assert(nb_blocks*nb_threads >= nb_multi);
k_multi_warp_%(name)s<<<n_blocks, n_threads, n_shared>>>(
CudaNdarray_HOST_DIMS(%(z)s)[1],
CudaNdarray_HOST_DIMS(%(z)s)[0],
CudaNdarray_DEV_DATA(%(pvals)s),
CudaNdarray_HOST_STRIDES(%(pvals)s)[0],
CudaNdarray_HOST_STRIDES(%(pvals)s)[1],
CudaNdarray_DEV_DATA(%(unis)s),
CudaNdarray_HOST_STRIDES(%(unis)s)[0],
CudaNdarray_DEV_DATA(%(z)s),
CudaNdarray_HOST_STRIDES(%(z)s)[0],
CudaNdarray_HOST_STRIDES(%(z)s)[1]
);
CNDA_THREAD_SYNC;
cudaError_t sts = cudaGetLastError();
if (cudaSuccess != sts)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %%s: %%s. (grid: %%i x %%i; block: %%i x %%i x %%i; shared: %%i)\\n",
"k_multi_warp_%(name)s",
cudaGetErrorString(sts),
n_blocks.x,
n_blocks.y,
n_threads.x,
n_threads.y,
n_threads.z,
n_shared);
%(fail)s;
}
} // END NESTED SCOPE
""" % locals()
@local_optimizer([MultinomialFromUniform])
def local_gpu_multinomial(node):
if type(node.op) is MultinomialFromUniform:
if len(node.inputs) == 2:
p, u = node.inputs
n_samples = 1
else:
p, u, n_samples = node.inputs
try:
if get_scalar_constant_value(n_samples) != 1:
return None
except NotScalarConstantError:
return None
m, = node.outputs
if (p.dtype == u.dtype == m.dtype == 'float32' and
any([i.owner and isinstance(i.owner.op,
theano.sandbox.cuda.HostFromGpu)
for i in node.inputs])):
gpu_op = GpuMultinomialFromUniform(node.op.odtype)
return [host_from_gpu(gpu_op(*[gpu_from_host(i)
for i in [p, u]])).T]
if (isinstance(node.op, theano.sandbox.cuda.GpuFromHost) and
node.inputs[0].owner and
type(node.inputs[0].owner.op) is MultinomialFromUniform):
multi = node.inputs[0].owner
if len(node.inputs) == 2:
p, u = node.inputs
n_samples = 1
else:
p, u, n_samples = node.inputs
try:
if get_scalar_constant_value(n_samples) != 1:
return None
except NotScalarConstantError:
return None
m, = multi.outputs
if (p.dtype == u.dtype == m.dtype == 'float32'):
gpu_op = GpuMultinomialFromUniform(multi.op.odtype)
ret = gpu_op(*[gpu_from_host(i) for i in [p, u]]).T
# The dimshuffle is on the cpu, but will be moved to the
# gpu by an opt.
return [gpu_from_host(ret)]
if cuda_available:
register_opt()(local_gpu_multinomial)
pass
| 35.208531 | 131 | 0.493135 |
4a236ec186ca73df43b280a9a0ceef1f981fd9e5 | 1,503 | py | Python | helloDjango/boards/models.py | GavWaite/hello-django | a51b0752e6ecadac8db72603b5b9ad557ce8d419 | [
"MIT"
] | null | null | null | helloDjango/boards/models.py | GavWaite/hello-django | a51b0752e6ecadac8db72603b5b9ad557ce8d419 | [
"MIT"
] | null | null | null | helloDjango/boards/models.py | GavWaite/hello-django | a51b0752e6ecadac8db72603b5b9ad557ce8d419 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
# This is where we define our data model which will be stored in the database
# Note that we use the built in model for User
# Also worth noting is the fields are all subclasses of django.db.models.Field
# Lots of choices here
class Board(models.Model):
name = models.CharField(max_length=30, unique=True) # Note the unique optional parameter
description = models.CharField(max_length=100)
def __str__(self):
return self.name
class Topic(models.Model):
subject = models.CharField(max_length=255)
last_updated = models.DateTimeField(auto_now_add=True) # Note the auto_now_add to set to time of object creation
board = models.ForeignKey(Board, related_name='topics', on_delete=models.PROTECT) # the related_name refers to the automatic inverse relationship
starter = models.ForeignKey(User, related_name='topics', on_delete=models.PROTECT)
def __str__(self):
return self.subject
class Post(models.Model):
message = models.TextField(max_length=4000)
topic = models.ForeignKey(Topic, related_name='posts', on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(null=True)
created_by = models.ForeignKey(User, related_name='posts', on_delete=models.PROTECT)
updated_by = models.ForeignKey(User, null=True, related_name='+', on_delete=models.PROTECT) # the + signifies we do not need an inverse | 45.545455 | 149 | 0.757818 |
4a236f3cc09a6546af818cd19340862fae3ce52d | 297 | py | Python | django_task/api/tasks.py | juxiaona/test_dev05 | 7a47ff51cc4afd33353323ebe10469a1a3e40e4a | [
"Apache-2.0"
] | 1 | 2021-07-11T09:16:24.000Z | 2021-07-11T09:16:24.000Z | django_task/api/tasks.py | juxiaona/test_dev05 | 7a47ff51cc4afd33353323ebe10469a1a3e40e4a | [
"Apache-2.0"
] | 1 | 2021-07-11T09:16:52.000Z | 2021-07-11T09:16:52.000Z | django_task/api/tasks.py | juxiaona/test_dev05 | 7a47ff51cc4afd33353323ebe10469a1a3e40e4a | [
"Apache-2.0"
] | 3 | 2021-10-17T12:23:01.000Z | 2022-02-03T02:29:05.000Z | import os
from time import sleep
from celery import shared_task
from django_task.settings import BASE_DIR
TEST_FILE = os.path.join(BASE_DIR, "api", "running_tests.py")
@shared_task
def running():
os.system(TEST_FILE)
return
@shared_task
def add(x, y):
sleep(10)
return x + y
| 14.85 | 61 | 0.720539 |
4a236f6cd4d0c1ed3cede68cd989e7791c54f6fc | 668 | py | Python | setup.py | jlbush23/tess_cpm | 7e18a4ae5372cbeaef496a3289bdaa4405bc0503 | [
"MIT"
] | 6 | 2021-06-30T09:30:27.000Z | 2022-03-24T21:26:45.000Z | setup.py | jlbush23/tess_cpm | 7e18a4ae5372cbeaef496a3289bdaa4405bc0503 | [
"MIT"
] | 7 | 2019-11-07T19:42:05.000Z | 2021-02-28T01:24:40.000Z | setup.py | jlbush23/tess_cpm | 7e18a4ae5372cbeaef496a3289bdaa4405bc0503 | [
"MIT"
] | 6 | 2019-11-07T17:17:13.000Z | 2021-02-28T00:48:30.000Z | import setuptools
with open("README.md", "r") as f:
readme = f.read()
setuptools.setup(
name="tess_cpm",
version="0.0.1",
author="Soichiro Hattori",
author_email="[email protected]",
url="https://github.com/soichiro-hattori/tess_cpm",
license="MIT",
description="An implementation of the Causal Pixel Model (CPM) for TESS data",
long_description="readme",
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[
"numpy",
"matplotlib",
"scipy",
"astropy",
"astroquery",
"scikit-learn",
"lightkurve"
]
)
| 24.740741 | 82 | 0.628743 |
4a236f8a2fa862b812a8a32b59b18663e87a4c96 | 9,637 | py | Python | pieces.py | ps1899/Chess-AI | c77dbece8383a297cc654c5ca23fe2817076de79 | [
"Apache-2.0"
] | 1 | 2020-08-25T13:28:37.000Z | 2020-08-25T13:28:37.000Z | pieces.py | ps1899/Chess-AI | c77dbece8383a297cc654c5ca23fe2817076de79 | [
"Apache-2.0"
] | null | null | null | pieces.py | ps1899/Chess-AI | c77dbece8383a297cc654c5ca23fe2817076de79 | [
"Apache-2.0"
] | null | null | null | import board, ai
class Piece():
WHITE = "W"
BLACK = "B"
def __init__(self, x, y, color, piece_type, value):
self.x = x
self.y = y
self.color = color
self.piece_type = piece_type
self.value = value
# Returns all diagonal moves for this piece. This should therefore only
# be used by the Bishop and Queen since they are the only pieces that can
# move diagonally.
def get_possible_diagonal_moves(self, board):
moves = []
for i in range(1, board.Board.WIDTH):
if (not board.in_bounds(self.x+i, self.y+i)):
break
piece = board.get_piece(self.x+i, self.y+i)
moves.append(self.get_move(board, self.x+i, self.y+i))
if (piece != 0):
break
for i in range(1, board.Board.WIDTH):
if (not board.in_bounds(self.x+i, self.y-i)):
break
piece = board.get_piece(self.x+i, self.y-i)
moves.append(self.get_move(board, self.x+i, self.y-i))
if (piece != 0):
break
for i in range(1, board.Board.WIDTH):
if (not board.in_bounds(self.x-i, self.y-i)):
break
piece = board.get_piece(self.x-i, self.y-i)
moves.append(self.get_move(board, self.x-i, self.y-i))
if (piece != 0):
break
for i in range(1, board.Board.WIDTH):
if (not board.in_bounds(self.x-i, self.y+i)):
break
piece = board.get_piece(self.x-i, self.y+i)
moves.append(self.get_move(board, self.x-i, self.y+i))
if (piece != 0):
break
return self.remove_null_from_list(moves)
# Returns all horizontal moves for this piece. This should therefore only
# be used by the Rooks and Queen since they are the only pieces that can
# move horizontally.
def get_possible_horizontal_moves(self, board):
moves = []
# Moves to the right of the piece.
for i in range(1, board.Board.WIDTH - self.x):
piece = board.get_piece(self.x + i, self.y)
moves.append(self.get_move(board, self.x+i, self.y))
if (piece != 0):
break
# Moves to the left of the piece.
for i in range(1, self.x + 1):
piece = board.get_piece(self.x - i, self.y)
moves.append(self.get_move(board, self.x-i, self.y))
if (piece != 0):
break
# Downward moves.
for i in range(1, board.Board.HEIGHT - self.y):
piece = board.get_piece(self.x, self.y + i)
moves.append(self.get_move(board, self.x, self.y+i))
if (piece != 0):
break
# Upward moves.
for i in range(1, self.y + 1):
piece = board.get_piece(self.x, self.y - i)
moves.append(self.get_move(board, self.x, self.y-i))
if (piece != 0):
break
return self.remove_null_from_list(moves)
# Returns a Move object with (xfrom, yfrom) set to the piece current position.
# (xto, yto) is set to the given position. If the move is not valid 0 is returned.
# A move is not valid if it is out of bounds, or a piece of the same color is
# being eaten.
def get_move(self, board, xto, yto):
move = 0
if (board.in_bounds(xto, yto)):
piece = board.get_piece(xto, yto)
if (piece != 0):
if (piece.color != self.color):
move = ai.Move(self.x, self.y, xto, yto, False)
else:
move = ai.Move(self.x, self.y, xto, yto, False)
return move
# Returns the list of moves cleared of all the 0's.
def remove_null_from_list(self, l):
return [move for move in l if move != 0]
def to_string(self):
return self.color + self.piece_type + " "
class Rook(Piece):
PIECE_TYPE = "R"
VALUE = 500
def __init__(self, x, y, color):
super(Rook, self).__init__(x, y, color, Rook.PIECE_TYPE, Rook.VALUE)
def get_possible_moves(self, board):
return self.get_possible_horizontal_moves(board)
def clone(self):
return Rook(self.x, self.y, self.color)
class Knight(Piece):
PIECE_TYPE = "N"
VALUE = 320
def __init__(self, x, y, color):
super(Knight, self).__init__(x, y, color, Knight.PIECE_TYPE, Knight.VALUE)
def get_possible_moves(self, board):
moves = []
moves.append(self.get_move(board, self.x+2, self.y+1))
moves.append(self.get_move(board, self.x-1, self.y+2))
moves.append(self.get_move(board, self.x-2, self.y+1))
moves.append(self.get_move(board, self.x+1, self.y-2))
moves.append(self.get_move(board, self.x+2, self.y-1))
moves.append(self.get_move(board, self.x+1, self.y+2))
moves.append(self.get_move(board, self.x-2, self.y-1))
moves.append(self.get_move(board, self.x-1, self.y-2))
return self.remove_null_from_list(moves)
def clone(self):
return Knight(self.x, self.y, self.color)
class Bishop(Piece):
PIECE_TYPE = "B"
VALUE = 330
def __init__(self, x, y, color):
super(Bishop, self).__init__(x, y, color, Bishop.PIECE_TYPE, Bishop.VALUE)
def get_possible_moves(self, board):
return self.get_possible_diagonal_moves(board)
def clone(self):
return Bishop(self.x, self.y, self.color)
class Queen(Piece):
PIECE_TYPE = "Q"
VALUE = 900
def __init__(self, x, y, color):
super(Queen, self).__init__(x, y, color, Queen.PIECE_TYPE, Queen.VALUE)
def get_possible_moves(self, board):
diagonal = self.get_possible_diagonal_moves(board)
horizontal = self.get_possible_horizontal_moves(board)
return horizontal + diagonal
def clone(self):
return Queen(self.x, self.y, self.color)
class King(Piece):
PIECE_TYPE = "K"
VALUE = 20000
def __init__(self, x, y, color):
super(King, self).__init__(x, y, color, King.PIECE_TYPE, King.VALUE)
def get_possible_moves(self, board):
moves = []
moves.append(self.get_move(board, self.x+1, self.y))
moves.append(self.get_move(board, self.x+1, self.y+1))
moves.append(self.get_move(board, self.x, self.y+1))
moves.append(self.get_move(board, self.x-1, self.y+1))
moves.append(self.get_move(board, self.x-1, self.y))
moves.append(self.get_move(board, self.x-1, self.y-1))
moves.append(self.get_move(board, self.x, self.y-1))
moves.append(self.get_move(board, self.x+1, self.y-1))
moves.append(self.get_top_castling_move(board))
moves.append(self.get_bottom_castling_move(board))
return self.remove_null_from_list(moves)
def get_top_castling_move(self, board):
if (self.color == Piece.WHITE and board.white_king_moved):
return 0
if (self.color == Piece.BLACK and board.black_king_moved):
return 0
piece = board.get_piece(self.x, self.y-3)
if (piece != 0):
if (piece.color == self.color and piece.piece_type == Rook.PIECE_TYPE):
if (board.get_piece(self.x, self.y-1) == 0 and board.get_piece(self.x, self.y-2) == 0):
return ai.Move(self.x, self.y, self.x, self.y-2, True)
return 0
def get_bottom_castling_move(self, board):
if (self.color == Piece.WHITE and board.white_king_moved):
return 0
if (self.color == Piece.BLACK and board.black_king_moved):
return 0
piece = board.get_piece(self.x, self.y+4)
if (piece != 0):
if (piece.color == self.color and piece.piece_type == Rook.PIECE_TYPE):
if (board.get_piece(self.x, self.y+1) == 0 and board.get_piece(self.x, self.y+2) == 0 and board.get_piece(self.x, self.y+3) == 0):
return ai.Move(self.x, self.y, self.x, self.y+2, True)
return 0
def clone(self):
return King(self.x, self.y, self.color)
class Pawn(Piece):
PIECE_TYPE = "P"
VALUE = 100
def __init__(self, x, y, color):
super(Pawn, self).__init__(x, y, color, Pawn.PIECE_TYPE, Pawn.VALUE)
def is_starting_position(self):
if (self.color == Piece.BLACK):
return self.x == board.Board.WIDTH - 2
else:
return self.x == 1
def get_possible_moves(self, board):
moves = []
# Direction the pawn can move in.
direction = 1
if (self.color == Piece.BLACK):
direction = -1
# The general 1 step forward move.
if (board.get_piece(self.x+direction, self.y) == 0):
moves.append(self.get_move(board, self.x + direction, self.y))
# The Pawn can take 2 steps as the first move.
if (self.is_starting_position() and board.get_piece(self.x + direction, self.y) == 0 and board.get_piece(self.x + direction*2, self.y) == 0):
moves.append(self.get_move(board, self.x + direction * 2, self.y))
# Eating pieces.
piece = board.get_piece(self.x + direction, self.y + 1)
if (piece != 0):
moves.append(self.get_move(board, self.x + direction, self.y + 1))
piece = board.get_piece(self.x + direction, self.y - 1)
if (piece != 0):
moves.append(self.get_move(board, self.x + direction, self.y - 1))
return self.remove_null_from_list(moves)
def clone(self):
return Pawn(self.x, self.y, self.color)
| 32.778912 | 149 | 0.584414 |
4a236fbe72bc52479b8ce71677b5a288b6f70db0 | 6,997 | py | Python | ZenPacks/daviswr/ZoneMinder/dsplugins/Storage.py | daviswr/ZenPacks.daviswr.ZoneMinder | 7baf10fd0f27b533ab8d1eb8c8b41a3680997b98 | [
"MIT"
] | 2 | 2021-04-15T09:05:33.000Z | 2022-01-07T14:36:16.000Z | ZenPacks/daviswr/ZoneMinder/dsplugins/Storage.py | daviswr/ZenPacks.daviswr.ZoneMinder | 7baf10fd0f27b533ab8d1eb8c8b41a3680997b98 | [
"MIT"
] | 5 | 2019-11-25T05:02:36.000Z | 2021-06-30T12:53:39.000Z | ZenPacks/daviswr/ZoneMinder/dsplugins/Storage.py | daviswr/ZenPacks.daviswr.ZoneMinder | 7baf10fd0f27b533ab8d1eb8c8b41a3680997b98 | [
"MIT"
] | 2 | 2020-12-22T02:50:40.000Z | 2020-12-22T16:11:43.000Z | """Monitors ZoneMinder storage volumes using the JSON API"""
import logging
LOG = logging.getLogger('zen.ZoneMinder')
import json
import re
import urllib
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import getPage
from ZenPacks.zenoss.PythonCollector.datasources.PythonDataSource import (
PythonDataSourcePlugin
)
from ZenPacks.daviswr.ZoneMinder.lib import zmUtil
class Storage(PythonDataSourcePlugin):
"""ZoneMinder storage data source plugin"""
@classmethod
def config_key(cls, datasource, context):
return(
context.device().id,
datasource.getCycleTime(context),
context.id,
'zoneminder-storage',
)
@classmethod
def params(cls, datasource, context):
return {
'username': context.zZoneMinderUsername,
'password': context.zZoneMinderPassword,
'hostname': context.zZoneMinderHostname,
'port': context.zZoneMinderPort,
'path': context.zZoneMinderPath,
'ssl': context.zZoneMinderSSL,
'base_url': context.zZoneMinderURL,
}
@inlineCallbacks
def collect(self, config):
data = self.new_data()
for datasource in config.datasources:
# LOG.debug('%s: parameters\n%s', config.id, datasource.params)
username = datasource.params['username']
password = datasource.params['password']
hostname = datasource.params['hostname']
port = datasource.params['port']
path = datasource.params['path']
ssl = datasource.params['ssl']
base_url = datasource.params['base_url']
comp_id = datasource.component.replace('zmStorage_', '')
if not username or not password:
LOG.error(
'%s: zZoneMinderUsername or zZoneMinderPassword not set',
config.id
)
returnValue(None)
base_url = zmUtil.generate_zm_url(
hostname=hostname or config.id,
port=port or 443,
path=path or '/zm/',
ssl=ssl or True,
url=base_url
)
if re.match(zmUtil.url_regex, base_url) is None:
LOG.error('%s: %s is not a valid URL', config.id, base_url)
returnValue(None)
else:
LOG.debug(
'%s: using base ZoneMinder URL %s',
config.id,
base_url
)
login_params = urllib.urlencode({
'action': 'login',
'view': 'login',
'username': username,
'password': password,
# 1.34+ requires OPT_USE_LEGACY_API_AUTH
'stateful': 1,
})
login_url = '{0}index.php?{1}'.format(base_url, login_params)
api_url = '{0}api/'.format(base_url)
cookies = dict()
try:
# Attempt login
login_response = yield getPage(
login_url,
method='POST',
cookies=cookies
)
if 'Invalid username or password' in login_response:
LOG.error(
'%s: ZoneMinder login credentials invalid',
config.id,
)
returnValue(None)
elif not cookies:
LOG.error('%s: No cookies received', config.id)
returnValue(None)
# Console
# Session cookies on 1.34 require view=login on action=login
# This returns a 302 to the console page
# rather than just the console
response = yield getPage(
'{0}index.php?view=console'.format(base_url),
method='GET',
cookies=cookies
)
# Scrape storage info from HTML
volumes = zmUtil.scrape_console_volumes(response)
if comp_id not in volumes:
LOG.warn(
'%s: %s not found in ZM web console',
config.id,
datasource.component
)
# Versions
response = yield getPage(
api_url + 'host/getVersion.json',
method='GET',
cookies=cookies
)
versions = zmUtil.dissect_versions(json.loads(response))
storage = list()
# 1.32+ required for storage.json
if (versions['daemon']['major'] >= 1
and versions['daemon']['minor'] >= 32):
# Storage
response = yield getPage(
api_url + 'storage.json',
method='GET',
cookies=cookies
)
storage = json.loads(response).get('storage', list())
# API logout
yield getPage(
api_url + 'host/logout.json',
method='GET',
cookies=cookies
)
else:
# Browser-style log out
# Doesn't work with 1.34.21
yield getPage(
base_url + 'index.php?action=logout',
method='POST',
cookies=cookies
)
except Exception:
LOG.exception('%s: failed to get store data', config.id)
continue
# Combine storage info from API with that scraped from Console
for item in storage:
store = item['Storage']
if store['Name'] in volumes:
volumes[store['Name']].update(store)
# Scraping failed
else:
volumes[store['Name']] = store
if 'DiskSpace' in volumes[store['Name']]:
volumes[store['Name']]['events'] = int(
volumes[store['Name']]['DiskSpace']
)
LOG.debug('%s: ZM storage output:\n%s', config.id, volumes)
stats = volumes.get(comp_id, dict())
for datapoint_id in (x.id for x in datasource.points):
if datapoint_id not in stats:
continue
value = stats.get(datapoint_id)
dpname = '_'.join((datasource.datasource, datapoint_id))
data['values'][datasource.component][dpname] = (value, 'N')
returnValue(data)
| 34.985 | 77 | 0.477919 |
4a236fcb3c72ef06dcf52214d0310fc83d05bdae | 43,050 | py | Python | Networks/EfficientNet_hl.py | XLPRUtils/XLPR_Classification | c8a3a574013858e23d4722dd03c1d9fa59a41c0e | [
"MIT"
] | 2 | 2020-11-17T08:57:01.000Z | 2021-06-30T00:42:53.000Z | Networks/EfficientNet_hl.py | XLPRUtils/XLPR_Classification | c8a3a574013858e23d4722dd03c1d9fa59a41c0e | [
"MIT"
] | null | null | null | Networks/EfficientNet_hl.py | XLPRUtils/XLPR_Classification | c8a3a574013858e23d4722dd03c1d9fa59a41c0e | [
"MIT"
] | null | null | null | """model.py - Model and module class for EfficientNet.
They are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import torch
from torch import nn
from torch.nn import functional as F
"""utils.py - Helper functions for building the model and for loading model parameters.
These helper functions are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
import os
################################################################################
### Help functions for model architecture
################################################################################
# GlobalParams and BlockArgs: Two namedtuples
# Swish and MemoryEfficientSwish: Two implementations of the method
# round_filters and round_repeats:
# Functions to calculate params for scaling model width and depth ! ! !
# get_width_and_height_from_size and calculate_output_image_size
# drop_connect: A structural design
# get_same_padding_conv2d:
# Conv2dDynamicSamePadding
# Conv2dStaticSamePadding
# get_same_padding_maxPool2d:
# MaxPool2dDynamicSamePadding
# MaxPool2dStaticSamePadding
# It's an additional function, not used in EfficientNet,
# but can be used in other model (such as EfficientDet).
# Identity: An implementation of identical mapping
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'width_coefficient', 'depth_coefficient', 'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum', 'batch_norm_epsilon',
'drop_connect_rate', 'depth_divisor', 'min_depth'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'num_repeat', 'kernel_size', 'stride', 'expand_ratio',
'input_filters', 'output_filters', 'se_ratio', 'id_skip'])
# Set GlobalParams and BlockArgs's defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
# An ordinary implementation of Swish function
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
# A memory-efficient implementation of Swish function
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_tensors[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
def round_filters(filters, global_params):
"""Calculate and round number of filters based on width multiplier.
Use width_coefficient, depth_divisor and min_depth of global_params.
Args:
filters (int): Filters number to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new_filters: New filters number after calculating.
"""
multiplier = global_params.width_coefficient
if not multiplier:
return filters
# TODO: modify the params names.
# maybe the names (width_divisor,min_width)
# are more suitable than (depth_divisor,min_depth).
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor # pay attention to this line when using min_depth
# follow the formula transferred from official TensorFlow implementation
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
"""Calculate module's repeat number of a block based on depth multiplier.
Use depth_coefficient of global_params.
Args:
repeats (int): num_repeat to be calculated.
global_params (namedtuple): Global params of the model.
Returns:
new repeat: New repeat number after calculating.
"""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
# follow the formula transferred from official TensorFlow implementation
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
"""Drop connect.
Args:
input (tensor: BCWH): Input of this structure.
p (float: 0.0~1.0): Probability of drop connection.
training (bool): The running mode.
Returns:
output: Output after drop connection.
"""
assert p >= 0 and p <= 1, 'p must be in range of [0,1]'
if not training:
return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
# generate binary_tensor mask according to probability (p for 0, 1-p for 1)
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_width_and_height_from_size(x):
"""Obtain height and width from x.
Args:
x (int, tuple or list): Data size.
Returns:
size: A tuple or list (H,W).
"""
if isinstance(x, int):
return x, x
if isinstance(x, list) or isinstance(x, tuple):
return x
else:
raise TypeError()
def calculate_output_image_size(input_image_size, stride):
"""Calculates the output image size when using Conv2dSamePadding with a stride.
Necessary for static padding. Thanks to mannatsingh for pointing this out.
Args:
input_image_size (int, tuple or list): Size of input image.
stride (int, tuple or list): Conv2d operation's stride.
Returns:
output_image_size: A list [H,W].
"""
if input_image_size is None:
return None
image_height, image_width = get_width_and_height_from_size(input_image_size)
stride = stride if isinstance(stride, int) else stride[0]
image_height = int(math.ceil(image_height / stride))
image_width = int(math.ceil(image_width / stride))
return [image_height, image_width]
# Note:
# The following 'SamePadding' functions make output size equal ceil(input size/stride).
# Only when stride equals 1, can the output size be the same as input size.
# Don't be confused by their function names ! ! !
def get_same_padding_conv2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
Conv2dDynamicSamePadding or Conv2dStaticSamePadding.
"""
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
# Tips for 'SAME' mode padding.
# Given the following:
# i: width or height
# s: stride
# k: kernel size
# d: dilation
# p: padding
# Output after Conv2d:
# o = floor((i+p-((k-1)*d+1))/s+1)
# If o equals i, i = floor((i+p-((k-1)*d+1))/s+1),
# => p = (i-1)*s+((k-1)*d+1)-i
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) # change the output size according to stride ! ! !
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
# With the same calculation as Conv2dDynamicSamePadding
def __init__(self, in_channels, out_channels, kernel_size, stride=1, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, stride, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
def get_same_padding_maxPool2d(image_size=None):
"""Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models.
Args:
image_size (int or tuple): Size of the image.
Returns:
MaxPool2dDynamicSamePadding or MaxPool2dStaticSamePadding.
"""
if image_size is None:
return MaxPool2dDynamicSamePadding
else:
return partial(MaxPool2dStaticSamePadding, image_size=image_size)
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1, return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation, return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
class MaxPool2dStaticSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with the given input image size.
The padding mudule is calculated in construction function, then used in forward.
"""
def __init__(self, kernel_size, stride, image_size=None, **kwargs):
super().__init__(kernel_size, stride, **kwargs)
self.stride = [self.stride] * 2 if isinstance(self.stride, int) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int) else self.dilation
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = (image_size, image_size) if isinstance(image_size, int) else image_size
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
return x
class Identity(nn.Module):
"""Identity mapping.
Send input to output directly.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, input):
return input
################################################################################
### Helper functions for loading model params
################################################################################
# BlockDecoder: A Class for encoding and decoding BlockArgs
# efficientnet_params: A function to query compound coefficient
# get_model_params and efficientnet:
# Functions to get BlockArgs and GlobalParams for efficientnet
# url_map and url_map_advprop: Dicts of url_map for pretrained weights
# load_pretrained_weights: A function to load pretrained weights
class BlockDecoder(object):
"""Block Decoder for readability,
straight from the official TensorFlow repository.
"""
@staticmethod
def _decode_block_string(block_string):
"""Get a block through a string notation of arguments.
Args:
block_string (str): A string notation of arguments.
Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'.
Returns:
BlockArgs: The namedtuple defined at the top of this file.
"""
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
num_repeat=int(options['r']),
kernel_size=int(options['k']),
stride=[int(options['s'][0])],
expand_ratio=int(options['e']),
input_filters=int(options['i']),
output_filters=int(options['o']),
se_ratio=float(options['se']) if 'se' in options else None,
id_skip=('noskip' not in block_string))
@staticmethod
def _encode_block_string(block):
"""Encode a block to a string.
Args:
block (namedtuple): A BlockArgs type argument.
Returns:
block_string: A String form of BlockArgs.
"""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""Decode a list of string notations to specify blocks inside the network.
Args:
string_list (list[str]): A list of strings, each string is a notation of block.
Returns:
blocks_args: A list of BlockArgs namedtuples of block args.
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""Encode a list of BlockArgs to a list of strings.
Args:
blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args.
Returns:
block_strings: A list of strings, each string is a notation of block.
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet_params(model_name):
"""Map EfficientNet model name to parameter coefficients.
Args:
model_name (str): Model name to be queried.
Returns:
params_dict[model_name]: A (width,depth,res,dropout) tuple.
"""
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
def efficientnet(width_coefficient=None, depth_coefficient=None, image_size=None,
dropout_rate=0.2, drop_connect_rate=0.2, num_classes=1000):
"""Create BlockArgs and GlobalParams for efficientnet model.
Args:
width_coefficient (float)
depth_coefficient (float)
image_size (int)
dropout_rate (float)
drop_connect_rate (float)
num_classes (int)
Meaning as the name suggests.
Returns:
blocks_args, global_params.
"""
# Blocks args for the whole model(efficientnet-b0 by default)
# It will be modified in the construction of EfficientNet Class according to model
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25',
'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25',
'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25',
'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
image_size=image_size,
dropout_rate=dropout_rate,
num_classes=num_classes,
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
drop_connect_rate=drop_connect_rate,
depth_divisor=8,
min_depth=None,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
"""Get the block args and global params for a given model name.
Args:
model_name (str): Model's name.
override_params (dict): A dict to modify global_params.
Returns:
blocks_args, global_params
"""
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
# train with Standard methods
# check more details in paper(EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks)
url_map = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
}
# train with Adversarial Examples(AdvProp)
# check more details in paper(Adversarial Examples Improve Image Recognition)
url_map_advprop = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth',
}
# TODO: add the petrained weights url map of 'efficientnet-l2'
def load_pretrained_weights(model, model_name, weights_path=None, load_fc=True, advprop=False):
"""Loads pretrained weights from weights path or download using url.
Args:
model (Module): The whole model of efficientnet.
model_name (str): Model name of efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
load_fc (bool): Whether to load pretrained weights for fc layer at the end of the model.
advprop (bool): Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
"""
if isinstance(weights_path,str):
state_dict = torch.load(weights_path)
else:
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
ret = model.load_state_dict(state_dict, strict=False)
assert not ret.missing_keys, f'Missing keys when loading pretrained weights: {ret.missing_keys}'
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
ret = model.load_state_dict(state_dict, strict=False)
assert set(ret.missing_keys) == set(
['_fc.weight', '_fc.bias']), f'Missing keys when loading pretrained weights: {ret.missing_keys}'
assert not ret.unexpected_keys, f'Missing keys when loading pretrained weights: {ret.unexpected_keys}'
print('Loaded pretrained weights for {}'.format(model_name))
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
Args:
block_args (namedtuple): BlockArgs, defined in utils.py.
global_params (namedtuple): GlobalParam, defined in utils.py.
image_size (tuple or list): [image_height, image_width].
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
# Expansion phase (Inverted Bottleneck)
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1,1))
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Pointwise convolution phase
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""EfficientNet model.
Most easily loaded with the .from_name or .from_pretrained methods.
Args:
blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.
global_params (namedtuple): A set of GlobalParams shared between blocks.
References:
[1] https://arxiv.org/abs/1905.11946 (EfficientNet)
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> model.eval()
>>> outputs = model(inputs)
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Get stem static or dynamic convolution depending on image size
image_size = global_params.image_size
Conv2d = get_same_padding_conv2d(image_size=image_size)
# Stem
in_channels = 1 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
image_size = calculate_output_image_size(image_size, 2)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
image_size = calculate_output_image_size(image_size, block_args.stride)
if block_args.num_repeat > 1: # modify block_args to keep same output size
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size))
# image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_endpoints(self, inputs):
"""Use convolution layer to extract features
from reduction levels i in [1, 2, 3, 4, 5].
Args:
inputs (tensor): Input tensor.
Returns:
Dictionary of last intermediate features
with reduction levels i in [1, 2, 3, 4, 5].
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> endpoints = model.extract_features(inputs)
>>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])
>>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])
>>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])
>>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])
>>> print(endpoints['reduction_5'].shape) # torch.Size([1, 1280, 7, 7])
"""
endpoints = dict()
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
prev_x = x
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints[f'reduction_{len(endpoints)+1}'] = prev_x
prev_x = x
# Head
x = self._swish(self._bn1(self._conv_head(x)))
endpoints[f'reduction_{len(endpoints)+1}'] = x
return endpoints
def extract_features(self, inputs):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.flatten(start_dim=1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, in_channels=3, **override_params):
"""create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
in_channels (int): Input data's channel number.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
An efficientnet model.
"""
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
model = cls(blocks_args, global_params)
model._change_in_channels(in_channels)
return model
@classmethod
def from_pretrained(cls, model_name, weights_path=None, advprop=False,
in_channels=3, num_classes=1000, **override_params):
"""create an efficientnet model according to name.
Args:
model_name (str): Name for efficientnet.
weights_path (None or str):
str: path to pretrained weights file on the local disk.
None: use pretrained weights downloaded from the Internet.
advprop (bool):
Whether to load pretrained weights
trained with advprop (valid when weights_path is None).
in_channels (int): Input data's channel number.
num_classes (int):
Number of categories for classification.
It controls the output size for final linear layer.
override_params (other key word params):
Params to override model's global_params.
Optional key:
'width_coefficient', 'depth_coefficient',
'image_size', 'dropout_rate',
'num_classes', 'batch_norm_momentum',
'batch_norm_epsilon', 'drop_connect_rate',
'depth_divisor', 'min_depth'
Returns:
A pretrained efficientnet model.
"""
model = cls.from_name(model_name, num_classes = num_classes, **override_params)
load_pretrained_weights(model, model_name, weights_path=weights_path, load_fc=(num_classes == 1000), advprop=advprop)
model._change_in_channels(in_channels)
return model
@classmethod
def get_image_size(cls, model_name):
"""Get the input image size for a given efficientnet model.
Args:
model_name (str): Name for efficientnet.
Returns:
Input image size (resolution).
"""
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
"""Validates model name.
Args:
model_name (str): Name for efficientnet.
Returns:
bool: Is a valid name or not.
"""
valid_models = ['efficientnet-b'+str(i) for i in range(9)]
# Support the construction of 'efficientnet-l2' without pretrained weights
valid_models += ['efficientnet-l2']
if model_name not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
def _change_in_channels(self, in_channels):
"""Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
Args:
in_channels (int): Input data's channel number.
"""
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size = self._global_params.image_size)
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
if __name__ == '__main__':
model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=2)
use_gpu = torch.cuda.is_available()
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model).cuda()
if torch.cuda.is_available():
model.cuda()
test_data = torch.randn(5,3,168,168).cuda()
test_out = model(test_data)
print(test_data.size(), test_out.size()) | 43.883792 | 131 | 0.63561 |
4a2370b74cc24716eba8b5980899eb66c5a105c5 | 5,767 | py | Python | cam_analysis/optimized_expression.py | cabrittin/volumetric_analysis | 82004378abae963ef02858bf4711786dad76f133 | [
"MIT"
] | null | null | null | cam_analysis/optimized_expression.py | cabrittin/volumetric_analysis | 82004378abae963ef02858bf4711786dad76f133 | [
"MIT"
] | null | null | null | cam_analysis/optimized_expression.py | cabrittin/volumetric_analysis | 82004378abae963ef02858bf4711786dad76f133 | [
"MIT"
] | null | null | null | """
optimized_expression.py
Finds the optimal expression pattern that maximizes coverage of synaptic partners
while minimizing coverage of neighbors.
@author Christopher Brittin
@date 2019 April 14
"""
import sys
sys.path.append('./volumetric_analysis')
sys.path.append('.')
import argparse
import numpy as np
from random import sample
from tqdm import tqdm
from random import random
import matplotlib.pyplot as plt
from mat_loader import MatLoader
from cam.expression import Matrix
import aux
class Optimize(Matrix):
def __init__(self,fgenes,fexp):
Matrix.__init__(self,fgenes,fexp)
def reduced_expression(self,cells):
idx = [self.cells[c] for c in cells]
E = self.E[idx,:]
jdx = np.where(E.any(axis=0))[0]
E = E[:,jdx]
genes = [self.gene_idx[j] for j in jdx]
return genes,E.T
def build_synapse_matices(cells,synapses,synpartners=set([])):
n = len(cells)
m = len(synapses)
cells_idx = {cells[i]:i for i in range(n)}
P = np.zeros((n,m))
A = np.zeros((n,m))
jdx = 0
for cont in synapses:
partners = set(synapses[cont]['partners'])
neighbors = set(synapses[cont]['neighbors'])
nonsyn = neighbors - partners - synpartners
for c in partners: P[cells_idx[c],jdx] = 1
for c in nonsyn: A[cells_idx[c],jdx] = 1
jdx += 1
return P,A
def anneal(sol):
old_cost = cost(sol)
T = 1.0
T_min = 0.00001
alpha = 0.9
while T > T_min:
i = 1
while i <= 100:
new_sol = neighbor(sol)
new_cost = cost(new_sol)
ap = acceptance_probability(old_cost, new_cost, T)
if ap > random():
sol = new_sol
old_cost = new_cost
i += 1
T = T*alpha
return sol, cost
def syncost(E,S,A,gamma = [0.5,0.4,0.1]):
syn = np.sum(np.dot(E,S))
adj = np.sum(np.dot(E,A))
sparse = np.sum(E != 0)
J = gamma[0] * syn - gamma[1] * adj - gamma[2] * sparse
return J
def adjcost(E,S,A,gamma = [0.5,0.4,0.1]):
syn = np.sum(np.dot(E,S))
adj = np.sum(np.dot(E,A))
sparse = np.sum(E != 0)
J = -gamma[0] * syn + gamma[1] * adj - gamma[2] * sparse
return J
def acceptance_probability(new_cost,old_cost,T):
p = np.exp((new_cost - old_cost) / T)
return min(p,1)
def perturb(coord,E):
rdx = np.random.randint(0,len(coord))
(a,b) = coord[rdx]
E[a,b] = int(not E[a,b])
return E
def run_optimization(E0,S,A,funcCost,iters=100):
(n,m) = E0.shape
E = np.zeros(E0.shape)
coord = np.nonzero(E0)
coord = list(zip(coord[0],coord[1]))
k = int(len(coord) / 2)
for (a,b) in sample(coord,k): E[a,b] = 1
if not iters: iters = 10*len(coord)
old_cost = funcCost(E,S,A)
T = 1.0
T_min = 0.00001
alpha = 0.9
cost_rec = [old_cost]
while T > T_min:
i = 1
while i <= iters:
#Perturb E
rdx = np.random.randint(0,len(coord))
(a,b) = coord[rdx]
E[a,b] = int(not E[a,b])
#rdx = np.random.randint(0,n)
#old_row = E[rdx,:]
#nonzero = np.nonzero(old_row)
#E[rdx,nonzero] = 0
#Compute new cost
new_cost = funcCost(E,S,A)
#Decide to accept perturbation
ap = acceptance_probability(new_cost,old_cost,T)
if ap > random():
old_cost = new_cost
cost_rec.append(new_cost)
else:
E[a,b] = int(not(E[a,b]))
#E[rdx,nonzero] = 1
i += 1
T *= alpha
return E,cost_rec
cam = 'mat/cam_isoforms.txt'
FOUT = 'cam_analysis/results/cam_optimize_%d_v2.csv'
if __name__=='__main__':
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('matrix',
action = 'store',
help = 'Path to matrix file')
parser.add_argument('deg',
action = 'store',
type= int,
help = 'Conserved degree')
params = parser.parse_args()
_iter = 100
M = MatLoader()
M.load_left()
C = M.load_consensus_graphs(params.deg)
S = M.load_consensus_chemical_synapse(params.deg)
e = Optimize(cam,params.matrix)
e.load_genes()
e.load_cells(sorted(C.A.nodes()))
e.assign_expression()
e.binarize()
gene_count = {g:[0]*_iter for g in e.genes}
for j in range(_iter):
print('Iter: %d/%d'%(j+1,_iter))
for cell in tqdm(M.left,desc='Optimize'):
if not C.A.has_node(cell): continue
if not C.C.has_node(cell): continue
neigh = sorted(C.A.neighbors(cell))
genes,E = e.reduced_expression(neigh)
P,A = build_synapse_matices(neigh,S[cell],set(C.C.neighbors(cell)))
#P,A = build_synapse_matices(neigh,S[cell])
Eopt,cost_rec = run_optimization(E,P,A,syncost)
idx = set(np.nonzero(Eopt)[0])
_genes = [genes[i] for i in idx]
for g in _genes: gene_count[g][j] += 1
aux.write.from_dict(FOUT%params.deg,gene_count)
#sorted_genes = sorted(gene_count.items(), key=lambda kv: kv[1])
#for g in sorted_genes: print(g)
"""
print(np.sum(E),np.sum(Eopt))
print(np.sum(np.dot(E,P)),np.sum(np.dot(E,A)))
print(np.sum(np.dot(Eopt,P)),np.sum(np.dot(Eopt,A)))
idx = set(np.nonzero(Eopt)[0])
_genes = [genes[i] for i in idx]
print(len(genes),len(_genes))
print(sorted(_genes))
plt.figure()
plt.plot(cost_rec,'b-')
plt.show()
"""
| 28.408867 | 90 | 0.561991 |
4a23716cf1f8e7f927163d0e39218da95e16a82c | 457 | py | Python | 2018/01.23/API/jya.api.py | mksweetlife/study | 0786a4bd7901ac0d1aa5efdae5b755693eee5cd3 | [
"MIT"
] | 1 | 2017-10-24T08:19:15.000Z | 2017-10-24T08:19:15.000Z | 2018/01.23/API/jya.api.py | mksweetlife/study | 0786a4bd7901ac0d1aa5efdae5b755693eee5cd3 | [
"MIT"
] | 31 | 2017-10-31T11:09:44.000Z | 2018-12-04T07:47:46.000Z | 2018/01.23/API/jya.api.py | mksweetlife/study | 0786a4bd7901ac0d1aa5efdae5b755693eee5cd3 | [
"MIT"
] | 5 | 2017-10-26T02:13:08.000Z | 2018-07-05T04:58:47.000Z | import requests
def getPrice(coin):
A = ['btc','bch','btg','bcd','ubtc','eth','etc','ada','qtum','xlm','neo','gas','rpx','hsr','knc','tsl','tron','omg','wtc','mco','storm','gto','pxs','chat','ink','hlc','ent','qbt','spc','put']
if coin not in A:
return 'false'
r = requests.get('https://api.coinnest.co.kr/api/pub/ticker/?coin={0}'.format(coin))
j = r.json()
last = j['last']
return last
# 확인해보기
a = getPrice('btc')
b = getPrice('dbc')
print(a, b) | 30.466667 | 192 | 0.588621 |
4a23721ac54d2501994e0a002e5f88bff6bf6521 | 12,746 | py | Python | analysis/webservice/algorithms/doms/ResultsStorage.py | ngachung/incubator-sdap-nexus | 38e768694fcc142e2d88283cb1e44e05f88da847 | [
"Apache-2.0"
] | 17 | 2017-11-16T07:36:33.000Z | 2021-11-07T00:02:20.000Z | analysis/webservice/algorithms/doms/ResultsStorage.py | ngachung/incubator-sdap-nexus | 38e768694fcc142e2d88283cb1e44e05f88da847 | [
"Apache-2.0"
] | 35 | 2018-01-11T00:50:20.000Z | 2022-03-17T23:08:07.000Z | analysis/webservice/algorithms/doms/ResultsStorage.py | ngachung/incubator-sdap-nexus | 38e768694fcc142e2d88283cb1e44e05f88da847 | [
"Apache-2.0"
] | 25 | 2017-11-16T07:36:38.000Z | 2022-02-03T20:48:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import logging
import uuid
from datetime import datetime
import pkg_resources
from cassandra.cluster import Cluster
from cassandra.policies import TokenAwarePolicy, DCAwareRoundRobinPolicy
from cassandra.query import BatchStatement
from cassandra.auth import PlainTextAuthProvider
from pytz import UTC
class AbstractResultsContainer:
def __init__(self, config=None):
self._log = logging.getLogger(__name__)
self._log.info("Creating DOMS Results Storage Instance")
self._session = None
self._config = configparser.RawConfigParser()
self._config.read(AbstractResultsContainer._get_config_files('domsconfig.ini'))
if config:
self.override_config(config)
else:
print('Config NOT provided from params...')
def __enter__(self):
cassHost = self._config.get("cassandra", "host")
cassKeyspace = self._config.get("cassandra", "keyspace")
cassDatacenter = self._config.get("cassandra", "local_datacenter")
cassVersion = int(self._config.get("cassandra", "protocol_version"))
cassUsername = self._config.get("cassandra", "username")
cassPassword = self._config.get("cassandra", "password")
auth_provider = PlainTextAuthProvider(username=cassUsername, password=cassPassword)
dc_policy = DCAwareRoundRobinPolicy(cassDatacenter)
token_policy = TokenAwarePolicy(dc_policy)
self._cluster = Cluster([host for host in cassHost.split(',')], load_balancing_policy=token_policy,
protocol_version=cassVersion, auth_provider=auth_provider)
self._session = self._cluster.connect(cassKeyspace)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._cluster.shutdown()
def _parseDatetime(self, dtString):
dt = datetime.strptime(dtString, "%Y-%m-%dT%H:%M:%SZ")
epoch = datetime.utcfromtimestamp(0)
time = (dt - epoch).total_seconds() * 1000.0
return int(time)
def override_config(self, config):
for section in config.sections():
if self._config.has_section(section):
for option in config.options(section):
if config.get(section, option) is not None:
self._config.set(section, option, config.get(section, option))
@staticmethod
def _get_config_files(filename):
log = logging.getLogger(__name__)
candidates = []
extensions = ['.default', '']
for extension in extensions:
try:
candidate = pkg_resources.resource_filename(__name__, filename + extension)
log.info('use config file {}'.format(filename + extension))
candidates.append(candidate)
except KeyError as ke:
log.warning('configuration file {} not found'.format(filename + extension))
return candidates
class ResultsStorage(AbstractResultsContainer):
def __init__(self, config=None):
AbstractResultsContainer.__init__(self, config)
def insertResults(self, results, params, stats, startTime, completeTime, userEmail, execution_id=None):
if isinstance(execution_id, str):
execution_id = uuid.UUID(execution_id)
execution_id = self.insertExecution(execution_id, startTime, completeTime, userEmail)
self.__insertParams(execution_id, params)
self.__insertStats(execution_id, stats)
self.__insertResults(execution_id, results)
return execution_id
def insertExecution(self, execution_id, startTime, completeTime, userEmail):
if execution_id is None:
execution_id = uuid.uuid4()
cql = "INSERT INTO doms_executions (id, time_started, time_completed, user_email) VALUES (%s, %s, %s, %s)"
self._session.execute(cql, (execution_id, startTime, completeTime, userEmail))
return execution_id
def __insertParams(self, execution_id, params):
cql = """INSERT INTO doms_params
(execution_id, primary_dataset, matchup_datasets, depth_min, depth_max, time_tolerance, radius_tolerance, start_time, end_time, platforms, bounding_box, parameter)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
"""
self._session.execute(cql, (execution_id,
params["primary"],
",".join(params["matchup"]) if type(params["matchup"]) == list else params[
"matchup"],
params["depthMin"] if "depthMin" in list(params.keys()) else None,
params["depthMax"] if "depthMax" in list(params.keys()) else None,
int(params["timeTolerance"]),
params["radiusTolerance"],
params["startTime"],
params["endTime"],
params["platforms"],
params["bbox"],
params["parameter"]
))
def __insertStats(self, execution_id, stats):
cql = """
INSERT INTO doms_execution_stats
(execution_id, num_gridded_matched, num_gridded_checked, num_insitu_matched, num_insitu_checked, time_to_complete)
VALUES
(%s, %s, %s, %s, %s, %s)
"""
self._session.execute(cql, (
execution_id,
stats["numGriddedMatched"],
stats["numGriddedChecked"],
stats["numInSituMatched"],
stats["numInSituRecords"],
stats["timeToComplete"]
))
def __insertResults(self, execution_id, results):
cql = """
INSERT INTO doms_data
(id, execution_id, value_id, primary_value_id, x, y, source_dataset, measurement_time, platform, device, measurement_values, is_primary)
VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
"""
insertStatement = self._session.prepare(cql)
batch = BatchStatement()
for result in results:
self.__insertResult(execution_id, None, result, batch, insertStatement)
self._session.execute(batch)
def __insertResult(self, execution_id, primaryId, result, batch, insertStatement):
dataMap = self.__buildDataMap(result)
result_id = uuid.uuid4()
batch.add(insertStatement, (
result_id,
execution_id,
result["id"],
primaryId,
result["x"],
result["y"],
result["source"],
result["time"],
result["platform"] if "platform" in result else None,
result["device"] if "device" in result else None,
dataMap,
1 if primaryId is None else 0
)
)
n = 0
if "matches" in result:
for match in result["matches"]:
self.__insertResult(execution_id, result["id"], match, batch, insertStatement)
n += 1
if n >= 20:
if primaryId is None:
self.__commitBatch(batch)
n = 0
if primaryId is None:
self.__commitBatch(batch)
def __commitBatch(self, batch):
self._session.execute(batch)
batch.clear()
def __buildDataMap(self, result):
dataMap = {}
for name in result:
value = result[name]
if name not in ["id", "x", "y", "source", "time", "platform", "device", "point", "matches"] and type(
value) in [float, int]:
dataMap[name] = value
return dataMap
class ResultsRetrieval(AbstractResultsContainer):
def __init__(self, config=None):
AbstractResultsContainer.__init__(self, config)
def retrieveResults(self, execution_id, trim_data=False):
if isinstance(execution_id, str):
execution_id = uuid.UUID(execution_id)
params = self.__retrieveParams(execution_id)
stats = self.__retrieveStats(execution_id)
data = self.__retrieveData(execution_id, trim_data=trim_data)
return params, stats, data
def __retrieveData(self, id, trim_data=False):
dataMap = self.__retrievePrimaryData(id, trim_data=trim_data)
self.__enrichPrimaryDataWithMatches(id, dataMap, trim_data=trim_data)
data = [dataMap[name] for name in dataMap]
return data
def __enrichPrimaryDataWithMatches(self, id, dataMap, trim_data=False):
cql = "SELECT * FROM doms_data where execution_id = %s and is_primary = false"
rows = self._session.execute(cql, (id,))
for row in rows:
entry = self.__rowToDataEntry(row, trim_data=trim_data)
if row.primary_value_id in dataMap:
if not "matches" in dataMap[row.primary_value_id]:
dataMap[row.primary_value_id]["matches"] = []
dataMap[row.primary_value_id]["matches"].append(entry)
else:
print(row)
def __retrievePrimaryData(self, id, trim_data=False):
cql = "SELECT * FROM doms_data where execution_id = %s and is_primary = true"
rows = self._session.execute(cql, (id,))
dataMap = {}
for row in rows:
entry = self.__rowToDataEntry(row, trim_data=trim_data)
dataMap[row.value_id] = entry
return dataMap
def __rowToDataEntry(self, row, trim_data=False):
if trim_data:
entry = {
"x": float(row.x),
"y": float(row.y),
"source": row.source_dataset,
"time": row.measurement_time.replace(tzinfo=UTC)
}
else:
entry = {
"id": row.value_id,
"x": float(row.x),
"y": float(row.y),
"source": row.source_dataset,
"device": row.device,
"platform": row.platform,
"time": row.measurement_time.replace(tzinfo=UTC)
}
for key in row.measurement_values:
value = float(row.measurement_values[key])
entry[key] = value
return entry
def __retrieveStats(self, id):
cql = "SELECT * FROM doms_execution_stats where execution_id = %s limit 1"
rows = self._session.execute(cql, (id,))
for row in rows:
stats = {
"numGriddedMatched": row.num_gridded_matched,
"numGriddedChecked": row.num_gridded_checked,
"numInSituMatched": row.num_insitu_matched,
"numInSituChecked": row.num_insitu_checked,
"timeToComplete": row.time_to_complete
}
return stats
raise Exception("Execution not found with id '%s'" % id)
def __retrieveParams(self, id):
cql = "SELECT * FROM doms_params where execution_id = %s limit 1"
rows = self._session.execute(cql, (id,))
for row in rows:
params = {
"primary": row.primary_dataset,
"matchup": row.matchup_datasets.split(","),
"depthMin": row.depth_min,
"depthMax": row.depth_max,
"timeTolerance": row.time_tolerance,
"radiusTolerance": row.radius_tolerance,
"startTime": row.start_time.replace(tzinfo=UTC),
"endTime": row.end_time.replace(tzinfo=UTC),
"platforms": row.platforms,
"bbox": row.bounding_box,
"parameter": row.parameter
}
return params
raise Exception("Execution not found with id '%s'" % id)
| 40.208202 | 183 | 0.593127 |
4a23724dde84b494a7c47c17dcf1d75ad57fdefe | 130 | py | Python | example_config.py | baofeng-dong/facilities-survey-dashboard | b5eb4a8a84299f76bfebc45fb501cfb95fd0af43 | [
"MIT"
] | 1 | 2017-04-27T09:03:57.000Z | 2017-04-27T09:03:57.000Z | example_config.py | baofeng-dong/facilities-survey-dashboard | b5eb4a8a84299f76bfebc45fb501cfb95fd0af43 | [
"MIT"
] | null | null | null | example_config.py | baofeng-dong/facilities-survey-dashboard | b5eb4a8a84299f76bfebc45fb501cfb95fd0af43 | [
"MIT"
] | null | null | null | DB_CONFIG = "postgresql://user:password@localhost/db"
ROOT_DIR = "/path/to/project"
ADMIN_USER = "username"
ADMIN_PW = "password"
| 26 | 53 | 0.746154 |
4a2373574a8c20f2e658dd78ede53ea3d56378cc | 1,196 | py | Python | eclipse-mosquitto/test/broker/07-will-no-flag.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | 2 | 2021-04-20T14:28:59.000Z | 2021-05-06T07:46:53.000Z | eclipse-mosquitto/test/broker/07-will-no-flag.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | null | null | null | eclipse-mosquitto/test/broker/07-will-no-flag.py | HenriqueBuzin/mosquitto-eclipse-mqtt | 00468923fcf70eefdf2c707b6ba9bdd4f859faf2 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# Test whether a connection is disconnected if it sets the will flag but does
# not provide a will payload.
from mosq_test_helper import *
def do_test(proto_ver):
rc = 1
keepalive = 10
connect_packet = mosq_test.gen_connect("will-no-payload", keepalive=keepalive, will_topic="will/topic", will_qos=1, will_retain=True, proto_ver=proto_ver)
b = list(struct.unpack("B"*len(connect_packet), connect_packet))
bmod = b[0:len(b)-2]
bmod[1] = bmod[1] - 2 # Reduce remaining length by two to remove final two payload length values
connect_packet = struct.pack("B"*len(bmod), *bmod)
port = mosq_test.get_port()
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port)
try:
sock = mosq_test.do_client_connect(connect_packet, b"", port=port)
sock.close()
rc = 0
except mosq_test.TestError:
pass
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde.decode('utf-8'))
print("proto_ver=%d" % (proto_ver))
exit(rc)
do_test(proto_ver=4)
do_test(proto_ver=5)
exit(0)
| 28.47619 | 158 | 0.655518 |
4a23741db7c41a09c38c65c61a5e8c809d685ee1 | 3,723 | py | Python | blog/admin.py | whitexiong/DjangoBlog | 61b3f4a9ec805949d0a21487888f422f40592871 | [
"MIT"
] | 1 | 2020-07-11T16:45:13.000Z | 2020-07-11T16:45:13.000Z | blog/admin.py | whitexiong/DjangoBlog | 61b3f4a9ec805949d0a21487888f422f40592871 | [
"MIT"
] | null | null | null | blog/admin.py | whitexiong/DjangoBlog | 61b3f4a9ec805949d0a21487888f422f40592871 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import Article, Category, Tag, Links, SideBar, BlogSettings
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse
from django.utils.html import format_html
class ArticleListFilter(admin.SimpleListFilter):
title = _("作者")
parameter_name = 'author'
def lookups(self, request, model_admin):
authors = list(set(map(lambda x: x.author, Article.objects.all())))
for author in authors:
yield (author.id, _(author.username))
def queryset(self, request, queryset):
id = self.value()
if id:
return queryset.filter(author__id__exact=id)
else:
return queryset
class ArticleForm(forms.ModelForm):
# body = forms.CharField(widget=AdminPagedownWidget())
class Meta:
model = Article
fields = '__all__'
def makr_article_publish(modeladmin, request, queryset):
queryset.update(status='p')
def draft_article(modeladmin, request, queryset):
queryset.update(status='d')
def close_article_commentstatus(modeladmin, request, queryset):
queryset.update(comment_status='c')
def open_article_commentstatus(modeladmin, request, queryset):
queryset.update(comment_status='o')
makr_article_publish.short_description = '发布选中文章'
draft_article.short_description = '选中文章设置为草稿'
close_article_commentstatus.short_description = '关闭文章评论'
open_article_commentstatus.short_description = '打开文章评论'
class ArticlelAdmin(admin.ModelAdmin):
list_per_page = 5
search_fields = ('body', 'title')
form = ArticleForm
list_display = (
'id',
'title',
'author',
'link_to_category',
'created_time',
'views',
'status',
'type',
'article_order')
list_display_links = ('id', 'title')
list_filter = (ArticleListFilter, 'status', 'type', 'category', 'tags')
filter_horizontal = ('tags',)
exclude = ('created_time', 'last_mod_time')
view_on_site = True
actions = [
makr_article_publish,
draft_article,
close_article_commentstatus,
open_article_commentstatus]
def link_to_category(self, obj):
info = (obj.category._meta.app_label, obj.category._meta.model_name)
link = reverse('admin:%s_%s_change' % info, args=(obj.category.id,))
return format_html(u'<a href="%s">%s</a>' % (link, obj.category.name))
link_to_category.short_description = '分类目录'
def get_form(self, request, obj=None, **kwargs):
form = super(ArticlelAdmin, self).get_form(request, obj, **kwargs)
form.base_fields['author'].queryset = get_user_model(
).objects.filter(is_superuser=True)
return form
def save_model(self, request, obj, form, change):
super(ArticlelAdmin, self).save_model(request, obj, form, change)
def get_view_on_site_url(self, obj=None):
if obj:
url = obj.get_full_url()
return url
else:
from DjangoBlog.utils import get_current_site
site = get_current_site().domain
return site
class TagAdmin(admin.ModelAdmin):
exclude = ('slug', 'last_mod_time', 'created_time')
class CategoryAdmin(admin.ModelAdmin):
exclude = ('slug', 'last_mod_time', 'created_time')
class LinksAdmin(admin.ModelAdmin):
exclude = ('last_mod_time', 'created_time')
class SideBarAdmin(admin.ModelAdmin):
list_display = ('name', 'content', 'is_enable', 'sequence')
exclude = ('last_mod_time', 'created_time')
class BlogSettingsAdmin(admin.ModelAdmin):
pass
| 29.085938 | 78 | 0.677142 |
4a2374f8b5bb24a1179462a2ddd5bc186e771ece | 696 | py | Python | examples/greet0.2.py | aisola/python-cli | 520b512ae3c01da2675816ad7a7b9500128b2499 | [
"MIT"
] | null | null | null | examples/greet0.2.py | aisola/python-cli | 520b512ae3c01da2675816ad7a7b9500128b2499 | [
"MIT"
] | null | null | null | examples/greet0.2.py | aisola/python-cli | 520b512ae3c01da2675816ad7a7b9500128b2499 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
sys.path.append('../')
import cli
# the action of the program
def main(context):
if context.flag("lang") == "english":
print "Hello, friend!"
elif context.flag("lang") == "spanish":
print "Hola, amigo!"
elif context.flag("lang") == "french":
print "Salut, mon ami!"
else:
context.error("unknown language '%s'" % context.flag("lang"))
if __name__ == "__main__":
APP = cli.CLI("greet")
APP.version = "0.2"
APP.usage = "fight the loneliness!"
APP.flags = [
cli.Flag(cli.STRING, "lang,l", "english", "set the language for the greeting"),
]
APP.action = main
APP.run(sys.argv)
| 26.769231 | 87 | 0.587644 |
4a2375fa549af2af9303b7a4de4fcd84e89cc6f0 | 612 | py | Python | astropy/io/misc/asdf/tags/coordinates/earthlocation.py | emirkmo/astropy | d96cd45b25ae55117d1bcc9c40e83a82037fc815 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:26:49.000Z | 2019-03-11T12:26:49.000Z | astropy/io/misc/asdf/tags/coordinates/earthlocation.py | emirkmo/astropy | d96cd45b25ae55117d1bcc9c40e83a82037fc815 | [
"BSD-3-Clause"
] | 1 | 2019-10-09T18:54:27.000Z | 2019-10-09T18:54:27.000Z | astropy/io/misc/asdf/tags/coordinates/earthlocation.py | emirkmo/astropy | d96cd45b25ae55117d1bcc9c40e83a82037fc815 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from astropy.coordinates import EarthLocation
from astropy.io.misc.asdf.types import AstropyType
class EarthLocationType(AstropyType):
name = 'coordinates/earthlocation'
types = [EarthLocation]
version = '1.0.0'
@classmethod
def to_tree(cls, obj, ctx):
return obj.info._represent_as_dict()
@classmethod
def from_tree(cls, node, ctx):
return EarthLocation.info._construct_from_dict(node)
@classmethod
def assert_equal(cls, old, new):
return (old == new).all()
| 26.608696 | 63 | 0.691176 |
4a2376e933dd5d9afabbad743f076b3b633b20ad | 20,925 | py | Python | cloudify_docker/ansible.py | jrzeszutek/cloudify-docker-plugin | 7c1416424ef6aad08bde7891d9eca920a34bc26a | [
"Apache-2.0"
] | null | null | null | cloudify_docker/ansible.py | jrzeszutek/cloudify-docker-plugin | 7c1416424ef6aad08bde7891d9eca920a34bc26a | [
"Apache-2.0"
] | null | null | null | cloudify_docker/ansible.py | jrzeszutek/cloudify-docker-plugin | 7c1416424ef6aad08bde7891d9eca920a34bc26a | [
"Apache-2.0"
] | null | null | null | ########
# Copyright (c) 2014-2020 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import json
import errno
import shutil
import getpass
import tempfile
from uuid import uuid1
from fabric.api import put, sudo
from .tasks import get_lan_ip
from .tasks import get_fabric_settings
from .tasks import get_docker_machine_from_ctx
from cloudify.manager import get_rest_client
from cloudify.decorators import operation
from cloudify.exceptions import (NonRecoverableError, HttpException)
from cloudify_common_sdk.resource_downloader import unzip_archive
from cloudify_common_sdk.resource_downloader import untar_archive
from cloudify_common_sdk.resource_downloader import get_shared_resource
from cloudify_common_sdk.resource_downloader import TAR_FILE_EXTENSTIONS
from .tasks import HOSTS
from .tasks import LOCAL_HOST_ADDRESSES
WORKSPACE = 'workspace'
LIST_TYPES = ['skip-tags', 'tags']
BP_INCLUDES_PATH = '/opt/manager/resources/blueprints/' \
'{tenant}/{blueprint}/{relative_path}'
@operation
def set_playbook_config(ctx, **kwargs):
"""
Set all playbook node instance configuration as runtime properties
:param _ctx: Cloudify node instance which is instance of CloudifyContext
:param config: Playbook node configurations
"""
def _get_secure_values(data, sensitive_keys, parent_hide=False):
"""
::param data : dict to check againt sensitive_keys
::param sensitive_keys : a list of keys we want to hide the values for
::param parent_hide : boolean flag to pass if the parent key is
in sensitive_keys
"""
for key in data:
# check if key or its parent {dict value} in sensitive_keys
hide = parent_hide or (key in sensitive_keys)
value = data[key]
# handle dict value incase sensitive_keys was inside another key
if isinstance(value, dict):
# call _get_secure_value function recusivly
# to handle the dict value
inner_dict = _get_secure_values(value, sensitive_keys, hide)
data[key] = inner_dict
else:
data[key] = '*'*len(value) if hide else value
return data
if kwargs and isinstance(kwargs, dict):
kwargs = _get_secure_values(kwargs, kwargs.get("sensitive_keys", {}))
for key, value in kwargs.items():
ctx.instance.runtime_properties[key] = value
ctx.instance.update()
@operation
def create_ansible_playbook(ctx, **kwargs):
def handle_file_path(file_path, additional_playbook_files, _ctx):
"""Get the path to a file.
I do this for two reasons:
1. The Download Resource only downloads an individual file.
Ansible Playbooks are often many files.
2. I have not figured out how to pass a file as an in
memory object to the PlaybookExecutor class.
:param file_path: The `site_yaml_path` from `run`.
:param additional_playbook_files: additional files
adjacent to the playbook path.
:param _ctx: The Cloudify Context.
:return: The absolute path on the manager to the file.
"""
def _get_deployment_blueprint(deployment_id):
new_blueprint = ""
try:
# get the latest deployment update to get the new blueprint id
client = get_rest_client()
dep_upd = \
client.deployment_updates.list(deployment_id=deployment_id,
sort='created_at')[-1]
new_blueprint = \
client.deployment_updates.get(dep_upd.id)[
"new_blueprint_id"]
except KeyError:
raise NonRecoverableError(
"can't get blueprint for deployment {0}".format(
deployment_id))
return new_blueprint
def download_nested_file_to_new_nested_temp_file(file_path, new_root,
_ctx):
""" Download file to a similar folder system with a new
root directory.
:param file_path: the resource path for download resource source.
:param new_root: Like a temporary directory
:param _ctx:
:return:
"""
dirname, file_name = os.path.split(file_path)
# Create the new directory path including the new root.
new_dir = os.path.join(new_root, dirname)
new_full_path = os.path.join(new_dir, file_name)
try:
os.makedirs(new_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(new_dir):
pass
else:
raise
return _ctx.download_resource(file_path, new_full_path)
if not isinstance(file_path, basestring):
raise NonRecoverableError(
'The variable file_path {0} is a {1},'
'expected a string.'.format(file_path, type(file_path)))
if not getattr(_ctx, '_local', False):
if additional_playbook_files:
# This section is intended to handle scenario where we want
# to download the resource instead of use absolute path.
# Perhaps this should replace the old way entirely.
# For now, the important thing here is that we are
# enabling downloading the playbook to a remote host.
playbook_file_dir = tempfile.mkdtemp()
new_file_path = download_nested_file_to_new_nested_temp_file(
file_path,
playbook_file_dir,
_ctx
)
for additional_file in additional_playbook_files:
download_nested_file_to_new_nested_temp_file(
additional_file,
playbook_file_dir,
_ctx
)
return new_file_path
else:
# handle update deployment different blueprint playbook name
deployment_blueprint = _ctx.blueprint.id
if _ctx.workflow_id == 'update':
deployment_blueprint = \
_get_deployment_blueprint(_ctx.deployment.id)
file_path = \
BP_INCLUDES_PATH.format(
tenant=_ctx.tenant_name,
blueprint=deployment_blueprint,
relative_path=file_path)
if os.path.exists(file_path):
return file_path
raise NonRecoverableError(
'File path {0} does not exist.'.format(file_path))
def handle_site_yaml(site_yaml_path, additional_playbook_files, _ctx):
""" Create an absolute local path to the site.yaml.
:param site_yaml_path: Relative to the blueprint.
:param additional_playbook_files: additional playbook files relative to
the playbook.
:param _ctx: The Cloudify context.
:return: The final absolute path on the system to the site.yaml.
"""
site_yaml_real_path = os.path.abspath(
handle_file_path(site_yaml_path, additional_playbook_files, _ctx))
site_yaml_real_dir = os.path.dirname(site_yaml_real_path)
site_yaml_real_name = os.path.basename(site_yaml_real_path)
site_yaml_new_dir = os.path.join(
_ctx.instance.runtime_properties[WORKSPACE], 'playbook')
shutil.copytree(site_yaml_real_dir, site_yaml_new_dir)
site_yaml_final_path = os.path.join(site_yaml_new_dir,
site_yaml_real_name)
return site_yaml_final_path
def get_inventory_file(filepath, _ctx, new_inventory_path):
"""
This method will get the location for inventory file.
The file location could be locally with relative to the blueprint
resources or it could be remotely on the remote machine
:return:
:param filepath: File path to do check for
:param _ctx: The Cloudify context.
:param new_inventory_path: New path which holds the file inventory path
when "filepath" is a local resource
:return: File location for inventory file
"""
if os.path.isfile(filepath):
# The file already exists on the system, then return the file url
return filepath
else:
# Check to see if the file does not exit, then try to lookup the
# file from the Cloudify blueprint resources
try:
_ctx.download_resource(filepath, new_inventory_path)
except HttpException:
_ctx.logger.error(
'Error when trying to download {0}'.format(filepath))
return None
return new_inventory_path
def handle_source_from_string(filepath, _ctx, new_inventory_path):
inventory_file = get_inventory_file(filepath, _ctx, new_inventory_path)
if inventory_file:
return inventory_file
else:
with open(new_inventory_path, 'w') as outfile:
_ctx.logger.info(
'Writing this data to temp file: {0}'.format(
new_inventory_path))
outfile.write(filepath)
return new_inventory_path
def handle_key_data(_data, workspace_dir, container_volume):
"""Take Key Data from ansible_ssh_private_key_file and
replace with a temp file.
:param _data: The hosts dict (from YAML).
:param workspace_dir: The temp dir where we are putting everything.
:return: The hosts dict with a path to a temp file.
"""
def recurse_dictionary(existing_dict,
key='ansible_ssh_private_key_file'):
if key not in existing_dict:
for k, v in existing_dict.items():
if isinstance(v, dict):
existing_dict[k] = recurse_dictionary(v)
elif key in existing_dict:
# If is_file_path is True, this has already been done.
try:
is_file_path = os.path.exists(existing_dict[key])
except TypeError:
is_file_path = False
if not is_file_path:
private_key_file = \
os.path.join(workspace_dir, str(uuid1()))
with open(private_key_file, 'w') as outfile:
outfile.write(existing_dict[key])
os.chmod(private_key_file, 0o600)
private_key_file = \
private_key_file.replace(workspace_dir,
container_volume)
existing_dict[key] = private_key_file
return existing_dict
return recurse_dictionary(_data)
def handle_sources(data, site_yaml_abspath, _ctx, container_volume):
"""Allow users to provide a path to a hosts file
or to generate hosts dynamically,
which is more comfortable for Cloudify users.
:param data: Either a dict (from YAML)
or a path to a conventional Ansible file.
:param site_yaml_abspath: This is the path to the site yaml folder.
:param _ctx: The Cloudify context.
:return: The final path of the hosts file that
was either provided or generated.
"""
hosts_abspath = os.path.join(os.path.dirname(site_yaml_abspath), HOSTS)
if isinstance(data, dict):
data = handle_key_data(
data, os.path.dirname(site_yaml_abspath), container_volume)
if os.path.exists(hosts_abspath):
_ctx.logger.error(
'Hosts data was provided but {0} already exists. '
'Overwriting existing file.'.format(hosts_abspath))
with open(hosts_abspath, 'w') as outfile:
yaml.safe_dump(data, outfile, default_flow_style=False)
elif isinstance(data, basestring):
hosts_abspath = handle_source_from_string(data, _ctx,
hosts_abspath)
return hosts_abspath
def prepare_options_config(options_config, run_data, destination, ctx):
options_list = []
if 'extra_vars' not in options_config:
options_config['extra_vars'] = {}
options_config['extra_vars'].update(run_data)
for key, value in options_config.items():
if key == 'extra_vars':
f = tempfile.NamedTemporaryFile(delete=False, dir=destination)
with open(f.name, 'w') as outfile:
json.dump(value, outfile)
value = '@{filepath}'.format(filepath=f.name)
elif key == 'verbosity':
ctx.logger.error('No such option verbosity')
del key
continue
key = key.replace("_", "-")
if isinstance(value, basestring):
value = value.encode('utf-8')
elif isinstance(value, dict):
value = json.dumps(value)
elif isinstance(value, list) and key not in LIST_TYPES:
value = [i.encode('utf-8') for i in value]
elif isinstance(value, list):
value = ",".join(value).encode('utf-8')
options_list.append(
'--{key}={value}'.format(key=key, value=repr(value)))
return ' '.join(options_list)
def prepare_playbook_args(ctx):
playbook_source_path = \
ctx.instance.runtime_properties.get('playbook_source_path', None)
playbook_path = \
ctx.instance.runtime_properties.get('playbook_path', None) \
or ctx.instance.runtime_properties.get('site_yaml_path', None)
sources = \
ctx.instance.runtime_properties.get('sources', {})
debug_level = \
ctx.instance.runtime_properties.get('debug_level', 2)
additional_args = \
ctx.instance.runtime_properties.get('additional_args', '')
additional_playbook_files = \
ctx.instance.runtime_properties.get(
'additional_playbook_files', None) or []
ansible_env_vars = \
ctx.instance.runtime_properties.get('ansible_env_vars', None) \
or {'ANSIBLE_HOST_KEY_CHECKING': "False"}
ctx.instance.runtime_properties[WORKSPACE] = tempfile.mkdtemp()
# check if source path is provided [full path/URL]
if playbook_source_path:
# here we will combine playbook_source_path with playbook_path
playbook_tmp_path = get_shared_resource(playbook_source_path)
if playbook_tmp_path == playbook_source_path:
# didn't download anything so check the provided path
# if file and absolute path
if os.path.isfile(playbook_tmp_path) and \
os.path.isabs(playbook_tmp_path):
# check file type if archived
file_name = playbook_tmp_path.rsplit('/', 1)[1]
file_type = file_name.rsplit('.', 1)[1]
if file_type == 'zip':
playbook_tmp_path = \
unzip_archive(playbook_tmp_path)
elif file_type in TAR_FILE_EXTENSTIONS:
playbook_tmp_path = \
untar_archive(playbook_tmp_path)
playbook_path = "{0}/{1}".format(playbook_tmp_path,
playbook_path)
else:
# here will handle the bundled ansible files
playbook_path = handle_site_yaml(
playbook_path, additional_playbook_files, ctx)
playbook_args = {
'playbook_path': playbook_path,
'sources': handle_sources(sources, playbook_path,
ctx,
ctx.node.properties.get(
'docker_machine', {}).get(
'container_volume', "")),
'verbosity': debug_level,
'additional_args': additional_args or '',
}
options_config = \
ctx.instance.runtime_properties.get('options_config', {})
run_data = \
ctx.instance.runtime_properties.get('run_data', {})
return playbook_args, ansible_env_vars, options_config, run_data
playbook_args, ansible_env_vars, options_config, run_data = \
prepare_playbook_args(ctx)
docker_ip, docker_user, docker_key, container_volume = \
get_docker_machine_from_ctx(ctx)
# The decorators will take care of creating the playbook workspace
# which will package everything in a directory for our usages
# it will be in the kwargs [playbook_args.playbook_path]
playbook_path = playbook_args.get("playbook_path", "")
debug_level = playbook_args.get("debug_level", 2)
destination = os.path.dirname(playbook_path)
verbosity = '-v'
for i in range(1, debug_level):
verbosity += 'v'
command_options = \
prepare_options_config(options_config, run_data, destination, ctx)
additional_args = playbook_args.get("additional_args", "")
if not destination:
raise NonRecoverableError(
"something is wrong with the playbook provided")
return
else:
ctx.logger.info("playbook is ready at {0}".format(destination))
playbook_path = playbook_path.replace(destination, container_volume)
command_options = command_options.replace(destination,
container_volume)
ctx.instance.runtime_properties['destination'] = destination
ctx.instance.runtime_properties['docker_host'] = docker_ip
ctx.instance.runtime_properties['ansible_env_vars'] = ansible_env_vars
ctx.instance.runtime_properties['ansible_container_command_arg'] = \
"ansible-playbook {0} -i hosts {1} {2} {3} ".format(
verbosity,
command_options,
additional_args,
playbook_path)
# copy these files to docker machine if needed at that destination
if not docker_ip:
raise NonRecoverableError("no docker_ip was provided")
return
if docker_ip not in LOCAL_HOST_ADDRESSES and not docker_ip == get_lan_ip():
with get_fabric_settings(ctx, docker_ip,
docker_user,
docker_key) as s:
with s:
destination_parent = destination.rsplit('/', 1)[0]
if destination_parent != '/tmp':
sudo('mkdir -p {0}'.format(destination_parent))
sudo("chown -R {0}:{0} {1}".format(docker_user,
destination_parent))
put(destination, destination_parent, mirror_local_mode=True)
@operation
def remove_ansible_playbook(ctx, **kwargs):
docker_ip, docker_user, docker_key, _ = get_docker_machine_from_ctx(ctx)
destination = ctx.instance.runtime_properties.get('destination', "")
if not destination:
raise NonRecoverableError("destination was not assigned due to error")
return
ctx.logger.info("removing file from destination {0}".format(destination))
if os.path.exists(destination):
os.system("sudo chown -R {0} {1}".format(getpass.getuser(),
destination))
shutil.rmtree(destination)
ctx.instance.runtime_properties.pop('destination', None)
if not docker_ip:
raise NonRecoverableError("no docker_ip was provided")
return
if docker_ip not in LOCAL_HOST_ADDRESSES and not docker_ip == get_lan_ip():
with get_fabric_settings(ctx, docker_ip, docker_user, docker_key) as s:
with s:
sudo("rm -rf {0}".format(destination))
| 45.292208 | 79 | 0.602724 |
4a2376ee2f6b3c88fd6d6ad488d4b15b967f74da | 8,753 | py | Python | NeuralStyleTransfer/main.py | ThreeSRR/Chinese-Painting-Generator | 56d7cc1e0254912b0515ae2c1b9679d9b16e04dc | [
"Apache-2.0"
] | 6 | 2021-01-06T04:27:32.000Z | 2022-03-13T04:53:03.000Z | NeuralStyleTransfer/main.py | ThreeSRR/Chinese-Painting-Generator | 56d7cc1e0254912b0515ae2c1b9679d9b16e04dc | [
"Apache-2.0"
] | null | null | null | NeuralStyleTransfer/main.py | ThreeSRR/Chinese-Painting-Generator | 56d7cc1e0254912b0515ae2c1b9679d9b16e04dc | [
"Apache-2.0"
] | 4 | 2021-02-26T06:51:38.000Z | 2022-03-13T07:23:59.000Z | import copy
import argparse
import os
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.models as models
from LossFunction import ContentLoss, StyleLoss
plt.switch_backend('agg')
def image_loader(image_name, transform, device):
image = Image.open(image_name)
image = transform(image).unsqueeze(0)
return image.to(device, torch.float)
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = torch.tensor(mean).view(-1, 1, 1)
self.std = torch.tensor(std).view(-1, 1, 1)
def forward(self, img):
return (img - self.mean) / self.std
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
style_img, content_img,
content_layers, style_layers, device):
'''
to add content loss and style loss layers after convolution layer by creating a new Sequential module
'''
cnn = copy.deepcopy(cnn)
content_loss_list = []
style_loss_list = []
normalization = Normalization(normalization_mean, normalization_std).to(device)
model = nn.Sequential(normalization)
i = 0
for layer in cnn.children():
if isinstance(layer, nn.Conv2d):
i += 1
name = 'conv_{}'.format(i)
elif isinstance(layer, nn.ReLU):
name = 'relu_{}'.format(i)
layer = nn.ReLU(inplace=False)
elif isinstance(layer, nn.MaxPool2d):
name = 'pool_{}'.format(i)
elif isinstance(layer, nn.BatchNorm2d):
name = 'bn_{}'.format(i)
else:
raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))
model.add_module(name, layer)
if name in content_layers:
# add content loss
target = model(content_img).detach()
content_loss = ContentLoss(target)
model.add_module("content_loss_{}".format(i), content_loss)
content_loss_list.append(content_loss)
if name in style_layers:
# add style loss
target_feature = model(style_img).detach()
style_loss = StyleLoss(target_feature)
model.add_module("style_loss_{}".format(i), style_loss)
style_loss_list.append(style_loss)
for i in range(len(model) - 1, -1, -1):
if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):
break
model = model[:(i + 1)]
return model, style_loss_list, content_loss_list
def get_input_optimizer(input_img):
'''
L-BFGS algorithm to run our gradient descent
to train the input image in order to minimise the content/style losses
'''
optimizer = optim.LBFGS([input_img.requires_grad_()])
return optimizer
def run(cnn, content_layers_default, style_layers_default, content_img, style_img, input_img, device,
num_steps=300, style_weight=10000, content_weight=1):
"""
the function to perform neural transfer
"""
style_loss_list = []
content_loss_list = []
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)
model, style_losses, content_losses = get_style_model_and_losses(
cnn, cnn_normalization_mean, cnn_normalization_std, style_img, content_img,
content_layers_default, style_layers_default, device)
optimizer = get_input_optimizer(input_img)
epoch = [0]
while epoch[0] <= num_steps:
def closure():
input_img.data.clamp_(0, 1)
optimizer.zero_grad()
model(input_img)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.loss
for cl in content_losses:
content_score += cl.loss
style_score *= style_weight
content_score *= content_weight
loss = style_score + content_score
loss.backward()
epoch[0] += 1
if epoch[0] % 10 == 0:
style_loss_list.append(style_score.item())
content_loss_list.append(content_score.item())
if epoch[0] % 50 == 0:
print("epoch {}: Style Loss : {:4f} Content Loss: {:4f}".format(epoch[0], style_score.item(), content_score.item()))
return style_score + content_score
optimizer.step(closure)
input_img.data.clamp_(0, 1)
return input_img, style_loss_list, content_loss_list
def style_transfer(style_img, content_img, outputpath='./result.png', num_steps=500, style_weight=100000, content_weight=1, name='test', loss_dir='losses'):
'''
the main function of neural style transfer
:param style_img: the image with target style you want to transfer to
:param content_img: the original image, to transfer its style while reserve its content
:param outputpath: the path to save image with transferred style
:param num_steps: number of steps to update parameters
:param style_weight: weight of style
:param content_weight: weight of loss
'''
imsize = 512
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.Compose([
transforms.Resize(imsize),
transforms.CenterCrop(imsize),
transforms.ToTensor()
])
style_img = image_loader(style_img, transform, device)
content_img = image_loader(content_img, transform, device)
# use the features module of pretrained vgg19
# need the output of the individual convolution layers to measure content and style loss.
cnn = models.vgg19(pretrained=True).features.to(device).eval()
# desired depth layers to compute style/content losses :
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']
input_img = content_img.clone()
output, style_loss, content_loss = run(cnn, content_layers_default, style_layers_default, content_img, style_img, input_img, device,
num_steps=num_steps, style_weight=style_weight, content_weight=content_weight)
output = output.detach().cpu().numpy().squeeze(0).transpose([1,2,0])
plt.imsave(outputpath, output)
plt.clf()
x = [i*10 for i in range(len(style_loss))]
plt.plot(x, style_loss, label='style_loss')
plt.plot(x, content_loss, label='content_loss')
plt.xlabel('steps')
plt.ylabel('loss')
plt.legend()
plt.savefig(os.path.join(loss_dir, "loss" + name))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--style_img_path',
default='./data/style/style3.jpg',
help='path of style image',
type=str)
parser.add_argument('--content_img_dir',
default='./data/content',
help='directory of content images',
type=str)
parser.add_argument('--result_dir',
default='./results',
help='directory to save results',
type=str)
parser.add_argument('--num_steps',
default=500,
help='number of steps to update',
type=int)
parser.add_argument('--style_weight',
default=100000,
help='weight of style',
type=int)
parser.add_argument('--content_weight',
default=1,
help='weight of content',
type=int)
args = parser.parse_args()
style_img_path = args.style_img_path
content_img_dir = args.content_img_dir
result_dir = args.result_dir
num_steps = args.num_steps
style_weight = args.style_weight
content_weight = args.content_weight
if not os.path.isdir(result_dir):
os.mkdir(result_dir)
loss_dir = os.path.join(result_dir, 'losses')
if not os.path.isdir(loss_dir):
os.mkdir(loss_dir)
for img in os.listdir(content_img_dir):
content_img_path = os.path.join(content_img_dir, img)
outputpath = os.path.join(result_dir, 'result-' + img)
style_transfer(style_img_path, content_img_path, outputpath=outputpath, num_steps=num_steps, style_weight=style_weight, content_weight=content_weight, name=img, loss_dir=loss_dir)
| 34.191406 | 187 | 0.629384 |
4a237a1c3fcc6d9cebaa17f698552a2fcb2fbaa2 | 3,772 | py | Python | mml2music/song.py | CuteFwan/mml2music | e9a0a7a93f2b16ab5a197d5b8364bd0ae83eac4b | [
"MIT"
] | 6 | 2019-08-09T08:23:28.000Z | 2021-12-31T20:26:44.000Z | mml2music/song.py | CuteFwan/mml2music | e9a0a7a93f2b16ab5a197d5b8364bd0ae83eac4b | [
"MIT"
] | null | null | null | mml2music/song.py | CuteFwan/mml2music | e9a0a7a93f2b16ab5a197d5b8364bd0ae83eac4b | [
"MIT"
] | 1 | 2019-09-07T01:54:57.000Z | 2019-09-07T01:54:57.000Z | from .errors import *
class Note:
__slots__ = ('position', 'frequency', 'length', 'volume')
def __init__(self, position: float, frequency: float, length: float, volume: float):
self.position = position
self.frequency = frequency
self.length = length
self.volume = volume
class Track:
def __init__(self, *, max_length : int = None, max_notes : int = None):
self.max_length = max_length if (max_length and max_length > 0) else None
self.max_notes = max_notes if (max_notes and max_notes > 0) else None
self.notes = list()
self.position = 0
print(self.max_length, self.max_notes)
def check_length(self, new_length : int):
"""Returns True if new_length exceeds old length"""
return (self.max_length and new_length > self.max_length) or new_length <= 0
def add_note(self, note: Note):
"""Add a new note at the end of the track."""
if self.check_length(self.position + note.length):
raise ExceededLength("Adding note would exceed length limit.")
pass
elif self.max_notes and len(self.notes) >= self.max_notes:
raise ExceededNotes("Adding note would exceed note limit.")
pass
else:
self.notes.append(note)
self.position += note.length
def extend_last(self, length: float):
"""Extend the last note in the track."""
if self.check_length(self.position + length):
raise ExceededLength("Extending last note would exceed length limit.")
else:
self.notes[-1].length += length
self.position += length
def rest(self, length: float):
"""Add a rest of certain length to the current position of the track."""
if self.check_length(self.position + length):
raise ExceededLength("Adding rest would exceed length limit.")
pass
else:
self.position += length
def tempo(self, mult: float):
"""Modify the tempo of the track independant of the pitch."""
if mult <= 0:
# mult should not be negative or zero
return
elif self.check_length(self.position / mult):
raise ExceededLength("Modifying tempo would exceed length limit.")
pass
else:
for note in self.notes:
note.position /= mult
note.length /= mult
self.position /= mult
def pitch(self, mult: float):
"""Modify the pitch of the track independant of the tempo."""
if mult <= 0:
# mult should not be negative or zero
return
for note in self.notes:
note.frequency *= mult
def speed(self, mult: float):
"""Modify the tempo and pitch of the track at the same time."""
if mult <= 0:
# mult should not be negative or zero
return
elif self.check_length(self.position / mult):
raise ExceededLength("Modifying speed would exceed length limit.")
pass
else:
for note in self.notes:
note.position /= mult
note.length /= mult
note.frequency *= mult
self.position /= mult
def constrain(self, *, low: float = 0, high: float = 44000):
"""Constrain the note frequencies of the track fo a certain range."""
self.notes = [note for note in self.notes if low <= note.frequency <= high]
def reverse(self):
new_notes = []
for note in self.notes[::-1]:
note.position = self.position - note.position - note.length
new_notes.append(note)
del self.notes[:]
self.notes = new_notes | 36.980392 | 88 | 0.586957 |
4a237a4010df8596fb438931b8bd24c348b7728e | 1,708 | py | Python | rnn_play.py | wrannaman/tensorflow-pickup-lines | 484b71cb610bb581062ab02e90e30d689bc01ed6 | [
"Apache-2.0"
] | 15 | 2017-06-26T02:15:10.000Z | 2020-07-30T15:26:17.000Z | rnn_play.py | wrannaman/tensorflow-pickup-lines | 484b71cb610bb581062ab02e90e30d689bc01ed6 | [
"Apache-2.0"
] | null | null | null | rnn_play.py | wrannaman/tensorflow-pickup-lines | 484b71cb610bb581062ab02e90e30d689bc01ed6 | [
"Apache-2.0"
] | 2 | 2020-09-10T20:50:13.000Z | 2021-08-04T14:54:59.000Z | import tensorflow as tf
import numpy as np
import my_txtutils
target = open("outputs.txt", 'w')
# these must match what was saved !
ALPHASIZE = my_txtutils.ALPHASIZE
NLAYERS = 3
INTERNALSIZE = 512
##################################### REPLACE THESE WITH YOUR TRAINED MODEL CHECKPOINT FILES #############################
author = "twitter2_checkpoints/rnn_train_1498315225-15000000"
ncnt = 0
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('twitter2_checkpoints/rnn_train_1498315225-15000000.meta')
new_saver.restore(sess, author)
x = my_txtutils.convert_from_alphabet(ord("L"))
x = np.array([[x]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
# initial values
y = x
h = np.zeros([1, INTERNALSIZE * NLAYERS], dtype=np.float32) # [ BATCHSIZE, INTERNALSIZE * NLAYERS]
for i in range(1000000000):
yo, h = sess.run(['Yo:0', 'H:0'], feed_dict={'X:0': y, 'pkeep:0': 1., 'Hin:0': h, 'batchsize:0': 1})
# If sampling is be done from the topn most likely characters, the generated text
# is more credible and more "english". If topn is not set, it defaults to the full
# distribution (ALPHASIZE)
# Recommended: topn = 10 for intermediate checkpoints, topn=2 or 3 for fully trained checkpoints
c = my_txtutils.sample_from_probabilities(yo, topn=2)
y = np.array([[c]]) # shape [BATCHSIZE, SEQLEN] with BATCHSIZE=1 and SEQLEN=1
c = chr(my_txtutils.convert_to_alphabet(c))
print(c, end="")
target.write(c)
if c == '\n':
ncnt = 0
else:
ncnt += 1
if ncnt == 100:
print("")
ncnt = 0
target.close()
| 35.583333 | 122 | 0.619438 |
4a237a854535205f287a9834509f8a17f08a657b | 4,073 | py | Python | mtl_rl/environment.py | snehanyk05/RL_CNN_Lateral_Control | 589e98ad895cdb27c9b39fb0bee3d05d9e48c416 | [
"MIT"
] | null | null | null | mtl_rl/environment.py | snehanyk05/RL_CNN_Lateral_Control | 589e98ad895cdb27c9b39fb0bee3d05d9e48c416 | [
"MIT"
] | null | null | null | mtl_rl/environment.py | snehanyk05/RL_CNN_Lateral_Control | 589e98ad895cdb27c9b39fb0bee3d05d9e48c416 | [
"MIT"
] | null | null | null | import time
import sysv_ipc
from utils import *
class Env(object):
def __init__(self, shm_sz=640*480*3+4*1+54*4, vision=False):
self.shm_sz = shm_sz
self.vision = vision
self.first_launch = True
os.system("pkill torcs")
time.sleep(0.5)
os.system("torcs &")
time.sleep(0.5)
os.system("sh autostart.sh")
time.sleep(0.5)
self.shared_memory = sysv_ipc.SharedMemory(934)
# check if launch successfully
self.check_launch()
self.steps_ = 0
def step(self, a):
while True:
shared = self.shared_memory.read(self.shm_sz)
written = read_written(shared)
if written == 1:
ob, r, term, info = self.read_(shared)
self.write_(a)
self.steps_ += 1
return ob, r, term, info
def read_(self, shared):
speed = read_speed(shared) # [0, 50]
to_track_middle = read_to_track_middle(shared) # [-1, 1]
angle_ = read_angle(shared)
angle = angle_/3.1416 # [-1, 1]
to_track_middle_m = read_dist_raced(shared)
# steer = read_steer(shared)
info = {"speed": speed, "to_track_middle": to_track_middle,
"angle": angle_, "to_middle_m": to_track_middle_m}
if self.vision:
ob = read_img(shared)
else:
ob = [angle, to_track_middle, speed*3.6/70]
# img = read_img(shared)
# print "dist to middle:", to_track_middle
if abs(to_track_middle) <= 1.0:
term = False
else:
term = True
reward = np.cos(angle) - np.sin(np.abs(angle)) - np.abs(to_track_middle)
if term:
reward = -1.0
# print "reward: %.4f\tdist: %.3f\tangle: %.3f | %s" % (reward, to_track_middle, angle, case)
return ob, reward, term, info
def write_(self, a):
write_steer(self.shared_memory, a)
write_written(self.shared_memory)
def reset(self, relaunch = False):
if not self.first_launch:
if not relaunch:
write_restart(self.shared_memory)
write_written(self.shared_memory)
# Loop till restart complete
while True:
shared = self.shared_memory.read(self.shm_sz)
written = read_written(shared)
if written == 1:
ob, r, term, info = self.read_(shared)
write_written(self.shared_memory)
if not term:
break
else:
# relaunch here
write_relaunch(self.shared_memory)
write_written(self.shared_memory)
time.sleep(0.5)
os.system("pkill torcs")
time.sleep(0.5)
os.system("torcs &")
time.sleep(0.5)
os.system("sh autostart.sh")
time.sleep(0.5)
self.check_launch()
while True:
shared = self.shared_memory.read(self.shm_sz)
written = read_written(shared)
if written == 1:
ob, r, term, info = self.read_(shared)
break
self.first_launch = False
return ob
def end(self):
os.system("pkill torcs")
def check_launch(self):
written = 0
while True:
for i in range(5):
time.sleep(1)
shared = self.shared_memory.read(self.shm_sz)
written = read_written(shared)
print('written = ', written)
if written != 1:
print("Count down:", (3 - i))
else:
break
if written == 1:
break
os.system("pkill torcs")
time.sleep(0.5)
os.system("torcs &")
time.sleep(0.5)
os.system("sh autostart.sh")
time.sleep(0.5)
| 33.661157 | 101 | 0.503315 |
4a237b36fa569f810fd20fb71b6fe2dbef85d298 | 401 | py | Python | ITE-428/basicproject2/MyLibrary.py | richeyphu/ITE-428-LAB | c68fdddb7bbfa8b74a58a0b26bdcb4565af3b08b | [
"MIT"
] | 1 | 2021-07-14T11:12:19.000Z | 2021-07-14T11:12:19.000Z | ITE-428/basicproject2/MyLibrary.py | richeyphu/ITE-428-LAB | c68fdddb7bbfa8b74a58a0b26bdcb4565af3b08b | [
"MIT"
] | null | null | null | ITE-428/basicproject2/MyLibrary.py | richeyphu/ITE-428-LAB | c68fdddb7bbfa8b74a58a0b26bdcb4565af3b08b | [
"MIT"
] | null | null | null | # function declaration
import math
def line1():
print("-" * 35)
def line2(ch): # line2('#')
# print(ch * 35)
print("{}".format(ch) * 35)
def line3(ch, num):
print("{}".format(ch) * num)
def calBMI(w, h):
bmi = w / math.pow(h, 2)
return bmi
def create_email(name, last):
return "{}.{}[email protected]".format(last[0:2].lower(), name.lower())
| 16.04 | 72 | 0.523691 |
4a237bd18d3291d79be0411ca59e81f0f826fbb9 | 10,156 | py | Python | qiskit/circuit/library/arithmetic/piecewise_linear_pauli_rotations.py | maddy-tod/qiskit-terra | f11740cf0375c725880fc5feea749fbb64011f11 | [
"Apache-2.0"
] | 1 | 2020-04-21T04:32:08.000Z | 2020-04-21T04:32:08.000Z | qiskit/circuit/library/arithmetic/piecewise_linear_pauli_rotations.py | maddy-tod/qiskit-terra | f11740cf0375c725880fc5feea749fbb64011f11 | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/library/arithmetic/piecewise_linear_pauli_rotations.py | maddy-tod/qiskit-terra | f11740cf0375c725880fc5feea749fbb64011f11 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=no-member
"""Piecewise-linearly-controlled rotation."""
from typing import List, Optional
import numpy as np
from qiskit.circuit import QuantumRegister
from qiskit.circuit.exceptions import CircuitError
from .functional_pauli_rotations import FunctionalPauliRotations
from .linear_pauli_rotations import LinearPauliRotations
from .integer_comparator import IntegerComparator
class PiecewiseLinearPauliRotations(FunctionalPauliRotations):
r"""Piecewise-linearly-controlled Pauli rotations.
For a piecewise linear (not necessarily continuous) function :math:`f(x)`, which is defined
through breakpoints, slopes and offsets as follows.
Suppose the breakpoints :math:`(x_0, ..., x_J)` are a subset of :math:`[0, 2^n-1]`, where
:math:`n` is the number of state qubits. Further on, denote the corresponding slopes and
offsets by :math:`a_j` and :math:`b_j` respectively.
Then f(x) is defined as:
.. math::
f(x) = \begin{cases}
0, x < x_0 \\
a_j (x - x_j) + b_j, x_j \leq x < x_{j+1}
\end{cases}
where we implicitly assume :math:`x_{J+1} = 2^n`.
"""
def __init__(self,
num_state_qubits: Optional[int] = None,
breakpoints: Optional[List[int]] = None,
slopes: Optional[List[float]] = None,
offsets: Optional[List[float]] = None,
basis: str = 'Y',
name: str = 'pw_lin') -> None:
"""Construct piecewise-linearly-controlled Pauli rotations.
Args:
num_state_qubits: The number of qubits representing the state.
breakpoints: The breakpoints to define the piecewise-linear function.
Defaults to ``[0]``.
slopes: The slopes for different segments of the piecewise-linear function.
Defaults to ``[1]``.
offsets: The offsets for different segments of the piecewise-linear function.
Defaults to ``[0]``.
basis: The type of Pauli rotation (``'X'``, ``'Y'``, ``'Z'``).
name: The name of the circuit.
"""
# store parameters
self._breakpoints = breakpoints if breakpoints is not None else [0]
self._slopes = slopes if slopes is not None else [1]
self._offsets = offsets if offsets is not None else [0]
super().__init__(num_state_qubits=num_state_qubits, basis=basis, name=name)
@property
def breakpoints(self) -> List[int]:
"""The breakpoints of the piecewise linear function.
The function is linear in the intervals ``[point_i, point_{i+1}]`` where the last
point implicitely is ``2**(num_state_qubits + 1)``.
"""
return self._breakpoints
@breakpoints.setter
def breakpoints(self, breakpoints: List[int]) -> None:
"""Set the breakpoints.
Args:
breakpoints: The new breakpoints.
"""
self._invalidate()
self._breakpoints = breakpoints
if self.num_state_qubits and breakpoints:
self._reset_registers(self.num_state_qubits)
@property
def slopes(self) -> List[int]:
"""The breakpoints of the piecewise linear function.
The function is linear in the intervals ``[point_i, point_{i+1}]`` where the last
point implicitely is ``2**(num_state_qubits + 1)``.
"""
return self._slopes
@slopes.setter
def slopes(self, slopes: List[float]) -> None:
"""Set the slopes.
Args:
slopes: The new slopes.
"""
self._invalidate()
self._slopes = slopes
@property
def offsets(self) -> List[float]:
"""The breakpoints of the piecewise linear function.
The function is linear in the intervals ``[point_i, point_{i+1}]`` where the last
point implicitely is ``2**(num_state_qubits + 1)``.
"""
return self._offsets
@offsets.setter
def offsets(self, offsets: List[float]) -> None:
"""Set the offsets.
Args:
offsets: The new offsets.
"""
self._invalidate()
self._offsets = offsets
@property
def mapped_slopes(self) -> List[float]:
"""The slopes mapped to the internal representation.
Returns:
The mapped slopes.
"""
mapped_slopes = np.zeros_like(self.slopes)
for i, slope in enumerate(self.slopes):
mapped_slopes[i] = slope - sum(mapped_slopes[:i])
return mapped_slopes
@property
def mapped_offsets(self) -> List[float]:
"""The offsets mapped to the internal representation.
Returns:
The mapped offsets.
"""
mapped_offsets = np.zeros_like(self.offsets)
for i, (offset, slope, point) in enumerate(zip(self.offsets,
self.slopes,
self.breakpoints)):
mapped_offsets[i] = offset - slope * point - sum(mapped_offsets[:i])
return mapped_offsets
@property
def contains_zero_breakpoint(self) -> bool:
"""Whether 0 is the first breakpoint.
Returns:
True, if 0 is the first breakpoint, otherwise False.
"""
return np.isclose(0, self.breakpoints[0])
def evaluate(self, x: float) -> float:
"""Classically evaluate the piecewise linear rotation.
Args:
x: Value to be evaluated at.
Returns:
Value of piecewise linear function at x.
"""
y = (x >= self.breakpoints[0]) * (x * self.mapped_slopes[0] + self.mapped_offsets[0])
for i in range(1, len(self.breakpoints)):
y = y + (x >= self.breakpoints[i]) * (x * self.mapped_slopes[i] +
self.mapped_offsets[i])
return y
@property
def num_ancilla_qubits(self) -> int:
"""The number of ancilla qubits.
Returns:
The number of ancilla qubits in the circuit.
"""
num_ancilla_qubits = self.num_state_qubits - 1 + len(self.breakpoints)
if self.contains_zero_breakpoint:
num_ancilla_qubits -= 1
return num_ancilla_qubits
def _configuration_is_valid(self, raise_on_failure: bool = True) -> bool:
valid = True
if self.num_state_qubits is None:
valid = False
if raise_on_failure:
raise AttributeError('The number of qubits has not been set.')
if self.num_qubits < self.num_state_qubits + 1:
valid = False
if raise_on_failure:
raise CircuitError('Not enough qubits in the circuit, need at least '
'{}.'.format(self.num_state_qubits + 1))
if len(self.breakpoints) != len(self.slopes) or len(self.breakpoints) != len(self.offsets):
valid = False
if raise_on_failure:
raise ValueError('Mismatching sizes of breakpoints, slopes and offsets.')
return valid
def _reset_registers(self, num_state_qubits: Optional[int]) -> None:
if num_state_qubits:
qr_state = QuantumRegister(num_state_qubits)
qr_target = QuantumRegister(1)
self.qregs = [qr_state, qr_target]
if self.num_ancilla_qubits > 0:
qr_ancilla = QuantumRegister(self.num_ancilla_qubits)
self.qregs += [qr_ancilla]
else:
self.qregs = []
def _build(self):
super()._build()
qr_state = self.qubits[:self.num_state_qubits]
qr_target = [self.qubits[self.num_state_qubits]]
qr_ancilla = self.qubits[self.num_state_qubits + 1:]
# apply comparators and controlled linear rotations
for i, point in enumerate(self.breakpoints):
if i == 0 and self.contains_zero_breakpoint:
# apply rotation
lin_r = LinearPauliRotations(num_state_qubits=self.num_state_qubits,
slope=self.mapped_slopes[i],
offset=self.mapped_offsets[i],
basis=self.basis)
self.append(lin_r.to_gate(), qr_state[:] + qr_target)
else:
if self.contains_zero_breakpoint:
i_compare = i - 1
else:
i_compare = i
# apply Comparator
comp = IntegerComparator(num_state_qubits=self.num_state_qubits, value=point)
qr = qr_state[:] + [qr_ancilla[i_compare]] # add ancilla as compare qubit
qr_remaining_ancilla = qr_ancilla[i_compare + 1:] # take remaining ancillas
self.append(comp.to_gate(),
qr[:] + qr_remaining_ancilla[:comp.num_ancilla_qubits])
# apply controlled rotation
lin_r = LinearPauliRotations(num_state_qubits=self.num_state_qubits,
slope=self.mapped_slopes[i],
offset=self.mapped_offsets[i],
basis=self.basis)
self.append(lin_r.to_gate().control(),
[qr_ancilla[i_compare]] + qr_state[:] + qr_target)
# uncompute comparator
self.append(comp.to_gate().inverse(),
qr[:] + qr_remaining_ancilla[:comp.num_ancilla_qubits])
| 36.532374 | 99 | 0.584088 |
4a237c9b2ca671187f36e8d23c528fe8c881207b | 5,238 | py | Python | cogs/currency.py | nzwl702/A-Lonely-Soul | d9ddf6173e3c8795ae7d3db6dff755db3945c894 | [
"MIT"
] | null | null | null | cogs/currency.py | nzwl702/A-Lonely-Soul | d9ddf6173e3c8795ae7d3db6dff755db3945c894 | [
"MIT"
] | null | null | null | cogs/currency.py | nzwl702/A-Lonely-Soul | d9ddf6173e3c8795ae7d3db6dff755db3945c894 | [
"MIT"
] | null | null | null | from datetime import datetime
from random import choice
from random import randint
import discord
from discord.ext import commands
from .userdata.resources.files import Files
from .userdata.user import User
class Currency(commands.Cog):
def __init__(self, client):
self.client = client
def help(self, command):
if command is None:
embed = discord.Embed(title="CURRENCY COMMANDS")
embed.description = "`use`, `inv`, `coins`, `money`, `work`, `search`"
else:
embed = discord.Embed(title=command)
if command == 'use':
embed.description = '`reap use <item>`: use an item that you have'
elif command == 'inv':
embed.description = '`reap inv`: shows your inventory'
elif command == 'coins' or command == 'money':
embed.description = '`reap coins/money [<user>]`: gets the amount of coins you or someone has'
elif command == 'work':
embed.description = '`reap work`: work for some coins!'
elif command == 'search':
embed.description = '`reap search`: find some items/coins'
return embed
@commands.Cog.listener()
async def on_ready(self):
print('Currency cog is ready.')
# Commands
@commands.command()
async def use(self, ctx, item):
rstr = User.USER_LIST[ctx.author.id].use(item)
await ctx.send(rstr)
@commands.command()
async def inv(self, ctx):
rstr = User.USER_LIST[ctx.author.id].getinv()
await ctx.send(rstr)
# Later replace with some way of getting a pen and paper -> mix
@commands.command()
async def get_map(self, ctx):
User.USER_LIST[ctx.author.id].add_inv('map')
@commands.command(aliases=['money'])
async def coins(self, ctx, other=None):
if other is None:
coins = User.USER_LIST[ctx.author.id].inv.coins
await ctx.send(f"You have {coins} coins")
else:
coins = User.USER_LIST[int(other[3:-1])].inv.coins
await ctx.send(f"They have {coins} coins")
@commands.command()
async def work(self, ctx):
now_time = datetime.now().replace(microsecond=0)
diff = 0
ok = False
if 'work' not in User.USER_LIST[ctx.author.id].wait.keys():
ok = True
else:
last_time = datetime.strptime(User.USER_LIST[ctx.author.id].wait['work'], '%Y-%m-%d/%H:%M:%S')
diff = (now_time - last_time).total_seconds()
ok = diff >= 600
if ok:
User.USER_LIST[ctx.author.id].wait['work'] = str(now_time.strftime("%Y-%m-%d/%H:%M:%S"))
lines = Files.read('sentences.txt')
ans = lines[randint(0, len(lines) - 1)].rstrip('\n')
line = 'Retype the following line: `' + ans + '`'
await ctx.send(line)
def check(m):
return m.author == ctx.author
try:
answer = await self.client.wait_for('message', check=check, timeout=20.0)
except:
await ctx.send('You are too slow!')
return
if answer.content[1:] == ans[1:]:
msg = 'Nice job! You earned 500 coins.'
User.USER_LIST[ctx.author.id].inv.coins += 500
else:
msg = 'Bruh do you know how to type? Go back to middle school.'
await ctx.send(msg)
else:
msg = "You have to wait a little before working.\n" + \
str((600 - int(diff)) // 60) + " minutes " + str(int(900 - diff) % 60) + \
" seconds until you can work again"
await ctx.send(msg)
@commands.command()
async def search(self, ctx):
now_time = datetime.now().replace(microsecond=0)
diff = 0
ok = False
if 'search' not in User.USER_LIST[ctx.author.id].wait.keys():
ok = True
else:
last_time = datetime.strptime(User.USER_LIST[ctx.author.id].wait['search'], '%Y-%m-%d/%H:%M:%S')
diff = (now_time - last_time).total_seconds()
ok = diff >= 10
if ok:
User.USER_LIST[ctx.author.id].wait['search'] = str(now_time.strftime("%Y-%m-%d/%H:%M:%S"))
rint = randint(1, 12)
if rint < 3:
msg = 'Rip. You didnt find anything.'
else:
searchable_items = ['pen', 'cloth', 'woodenchest', 'paper']
if rint < 9:
amt = randint(40, 60)
item = str(amt) + ' coins'
User.USER_LIST[ctx.author.id].inv.coins += amt
else:
item = choice(searchable_items)
User.USER_LIST[ctx.author.id].inv.add_inv(item)
item = '1 ' + item
msg = 'Wow. You have received ' + item
await ctx.send(msg)
else:
msg = "You have to wait a little before searching again.\n" + \
str(int(10 - diff)) + " seconds until you can search again"
await ctx.send(msg)
def setup(client):
client.add_cog(Currency(client))
| 35.632653 | 110 | 0.538946 |
4a237cb2a851c50fe071b57920003df1b635378d | 24,580 | py | Python | nuitka/tree/TreeHelpers.py | Pihu1998/Nuitka | b84034fbc516afb1d501a9eb84a58552efa247c2 | [
"Apache-2.0"
] | null | null | null | nuitka/tree/TreeHelpers.py | Pihu1998/Nuitka | b84034fbc516afb1d501a9eb84a58552efa247c2 | [
"Apache-2.0"
] | 1 | 2019-03-01T11:33:40.000Z | 2019-03-01T11:33:40.000Z | nuitka/tree/TreeHelpers.py | riya-17/Nuitka | 69183f6567befd62bc1d768dd070fb99475b8cb4 | [
"Apache-2.0"
] | 1 | 2019-03-26T16:56:21.000Z | 2019-03-26T16:56:21.000Z | # Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Helper functions for parsing the AST nodes and building the Nuitka node tree.
"""
import ast
from logging import warning
from nuitka import Constants, Options, Tracing
from nuitka.nodes.CallNodes import makeExpressionCall
from nuitka.nodes.CodeObjectSpecs import CodeObjectSpec
from nuitka.nodes.ConstantRefNodes import makeConstantRefNode
from nuitka.nodes.ContainerMakingNodes import (
ExpressionMakeList,
ExpressionMakeSetLiteral,
ExpressionMakeTuple,
)
from nuitka.nodes.DictionaryNodes import ExpressionKeyValuePair, ExpressionMakeDict
from nuitka.nodes.ExceptionNodes import StatementReraiseException
from nuitka.nodes.FrameNodes import (
StatementsFrameAsyncgen,
StatementsFrameCoroutine,
StatementsFrameFunction,
StatementsFrameGenerator,
StatementsFrameModule,
)
from nuitka.nodes.ImportNodes import ExpressionBuiltinImport
from nuitka.nodes.NodeBases import NodeBase
from nuitka.nodes.NodeMakingHelpers import mergeStatements
from nuitka.nodes.StatementNodes import StatementsSequence
from nuitka.PythonVersions import needsSetLiteralReverseInsertion, python_version
def dump(node):
Tracing.printLine(ast.dump(node))
def getKind(node):
return node.__class__.__name__.split(".")[-1]
def extractDocFromBody(node):
body = node.body
doc = None
# Work around ast.get_docstring breakage.
if (
node.body
and getKind(node.body[0]) == "Expr"
and getKind(node.body[0].value) == "Str"
):
if "no_docstrings" not in Options.getPythonFlags():
doc = body[0].value.s
body = body[1:]
return body, doc
def parseSourceCodeToAst(source_code, filename, line_offset):
# Workaround: ast.parse cannot cope with some situations where a file is not
# terminated by a new line.
if not source_code.endswith("\n"):
source_code = source_code + "\n"
body = ast.parse(source_code, filename)
assert getKind(body) == "Module"
if line_offset > 0:
ast.increment_lineno(body, line_offset)
return body
def detectFunctionBodyKind(nodes, start_value=None):
# This is a complex mess, following the scope means a lot of checks need
# to be done. pylint: disable=too-many-branches,too-many-statements
indications = set()
if start_value is not None:
indications.add(start_value)
flags = set()
def _checkCoroutine(field):
""" Check only for co-routine nature of the field and only update that.
"""
# TODO: This is clumsy code, trying to achieve what non-local does for
# Python2 as well.
old = set(indications)
indications.clear()
_check(field)
if "Coroutine" in indications:
old.add("Coroutine")
indications.clear()
indications.update(old)
del old
def _check(node):
node_class = node.__class__
if node_class is ast.Yield:
indications.add("Generator")
elif (
python_version >= 300 and node_class is ast.YieldFrom
): # @UndefinedVariable
indications.add("Generator")
elif python_version >= 350 and node_class in (
ast.Await,
ast.AsyncWith,
): # @UndefinedVariable
indications.add("Coroutine")
# Recurse to children, but do not cross scope boundary doing so.
if node_class is ast.ClassDef:
for name, field in ast.iter_fields(node):
if name in ("name", "body"):
pass
elif name in ("bases", "decorator_list", "keywords"):
for child in field:
_check(child)
elif name == "starargs":
if field is not None:
_check(field)
elif name == "kwargs":
if field is not None:
_check(field)
else:
assert False, (name, field, ast.dump(node))
elif node_class in (ast.FunctionDef, ast.Lambda) or (
python_version >= 350 and node_class is ast.AsyncFunctionDef
): # @UndefinedVariable
for name, field in ast.iter_fields(node):
if name in ("name", "body"):
pass
elif name in ("bases", "decorator_list"):
for child in field:
_check(child)
elif name == "args":
for child in field.defaults:
_check(child)
if python_version >= 300:
for child in node.args.kw_defaults:
if child is not None:
_check(child)
for child in node.args.args:
if child.annotation is not None:
_check(child.annotation)
elif name == "returns":
if field is not None:
_check(field)
else:
assert False, (name, field, ast.dump(node))
elif node_class is ast.GeneratorExp:
for name, field in ast.iter_fields(node):
if name == "name":
pass
elif name in ("body", "comparators", "elt"):
if python_version >= 370:
_checkCoroutine(field)
elif name == "generators":
_check(field[0].iter)
# New syntax in 3.7 allows these to be present in functions not
# declared with "async def", so we need to check them, but
# only if top level.
if python_version >= 370 and node in nodes:
for gen in field:
if gen.is_async:
indications.add("Coroutine")
break
elif _checkCoroutine(gen):
break
else:
assert False, (name, field, ast.dump(node))
elif node_class is ast.ListComp and python_version >= 300:
for name, field in ast.iter_fields(node):
if name in ("name", "body", "comparators"):
pass
elif name == "generators":
_check(field[0].iter)
elif name in ("body", "elt"):
_check(field)
else:
assert False, (name, field, ast.dump(node))
elif python_version >= 270 and node_class is ast.SetComp: # @UndefinedVariable
for name, field in ast.iter_fields(node):
if name in ("name", "body", "comparators", "elt"):
pass
elif name == "generators":
_check(field[0].iter)
else:
assert False, (name, field, ast.dump(node))
elif python_version >= 270 and node_class is ast.DictComp: # @UndefinedVariable
for name, field in ast.iter_fields(node):
if name in ("name", "body", "comparators", "key", "value"):
pass
elif name == "generators":
_check(field[0].iter)
else:
assert False, (name, field, ast.dump(node))
elif node_class is ast.Name:
if python_version >= 300 and node.id == "super":
flags.add("has_super")
elif python_version < 300 and node_class is ast.Exec:
flags.add("has_exec")
if node.globals is None:
flags.add("has_unqualified_exec")
for child in ast.iter_child_nodes(node):
_check(child)
elif python_version < 300 and node_class is ast.ImportFrom:
for import_desc in node.names:
if import_desc.name[0] == "*":
flags.add("has_exec")
for child in ast.iter_child_nodes(node):
_check(child)
else:
for child in ast.iter_child_nodes(node):
_check(child)
for node in nodes:
_check(node)
if indications:
if "Coroutine" in indications and "Generator" in indications:
function_kind = "Asyncgen"
else:
# If we found something, make sure we agree on all clues.
assert len(indications) == 1, indications
function_kind = indications.pop()
else:
function_kind = "Function"
return function_kind, flags
build_nodes_args3 = None
build_nodes_args2 = None
build_nodes_args1 = None
def setBuildingDispatchers(path_args3, path_args2, path_args1):
# Using global here, as this is really a singleton, in the form of a module,
# and this is to break the cyclic dependency it has, pylint: disable=global-statement
global build_nodes_args3, build_nodes_args2, build_nodes_args1
build_nodes_args3 = path_args3
build_nodes_args2 = path_args2
build_nodes_args1 = path_args1
def buildNode(provider, node, source_ref, allow_none=False):
if node is None and allow_none:
return None
try:
kind = getKind(node)
if hasattr(node, "lineno"):
source_ref = source_ref.atLineNumber(node.lineno)
else:
source_ref = source_ref
if kind in build_nodes_args3:
result = build_nodes_args3[kind](
provider=provider, node=node, source_ref=source_ref
)
elif kind in build_nodes_args2:
result = build_nodes_args2[kind](node=node, source_ref=source_ref)
elif kind in build_nodes_args1:
result = build_nodes_args1[kind](source_ref=source_ref)
elif kind == "Pass":
result = None
else:
assert False, ast.dump(node)
if result is None and allow_none:
return None
assert isinstance(result, NodeBase), result
return result
except SyntaxError:
raise
except RuntimeError:
# Very likely the stack overflow, which we will turn into too complex
# code exception, don't warn about it with a code dump then.
raise
except:
warning("Problem at '%s' with %s." % (source_ref, ast.dump(node)))
raise
def buildNodeList(provider, nodes, source_ref, allow_none=False):
if nodes is not None:
result = []
for node in nodes:
if hasattr(node, "lineno"):
node_source_ref = source_ref.atLineNumber(node.lineno)
else:
node_source_ref = source_ref
entry = buildNode(provider, node, node_source_ref, allow_none)
if entry is not None:
result.append(entry)
return result
else:
return []
_host_node = None
def buildAnnotationNode(provider, node, source_ref):
if (
python_version >= 370
and provider.getParentModule().getFutureSpec().isFutureAnnotations()
):
# Using global value for cache, to avoid creating it over and over,
# avoiding the pylint: disable=global-statement
global _host_node
if _host_node is None:
_host_node = ast.parse("x:1")
_host_node.body[0].annotation = node
r = compile(_host_node, "<annotations>", "exec", 1048576, dont_inherit=True)
# Using exec here, to compile the ast node tree back to string,
# there is no accessible "ast.unparse", and this works as a hack
# to convert our node to a string annotation, pylint: disable=exec-used
m = {}
exec(r, m) # @UndefinedVariable
return makeConstantRefNode(
constant=m["__annotations__"]["x"], source_ref=source_ref
)
return buildNode(provider, node, source_ref)
def makeModuleFrame(module, statements, source_ref):
assert module.isCompiledPythonModule()
if Options.isFullCompat():
code_name = "<module>"
else:
if module.isMainModule():
code_name = "<module>"
else:
code_name = "<module %s>" % module.getFullName()
return StatementsFrameModule(
statements=statements,
code_object=CodeObjectSpec(
co_name=code_name,
co_kind="Module",
co_varnames=(),
co_argcount=0,
co_kwonlyargcount=0,
co_has_starlist=False,
co_has_stardict=False,
co_filename=module.getRunTimeFilename(),
co_lineno=source_ref.getLineNumber(),
future_spec=module.getFutureSpec(),
),
source_ref=source_ref,
)
def buildStatementsNode(provider, nodes, source_ref):
# We are not creating empty statement sequences.
if nodes is None:
return None
# Build as list of statements, throw away empty ones, and remove useless
# nesting.
statements = buildNodeList(provider, nodes, source_ref, allow_none=True)
statements = mergeStatements(statements)
# We are not creating empty statement sequences. Might be empty, because
# e.g. a global node generates not really a statement, or pass statements.
if not statements:
return None
else:
return StatementsSequence(statements=statements, source_ref=source_ref)
def buildFrameNode(provider, nodes, code_object, source_ref):
# We are not creating empty statement sequences.
if nodes is None:
return None
# Build as list of statements, throw away empty ones, and remove useless
# nesting.
statements = buildNodeList(provider, nodes, source_ref, allow_none=True)
statements = mergeStatements(statements)
# We are not creating empty statement sequences. Might be empty, because
# e.g. a global node generates not really a statement, or pass statements.
if not statements:
return None
if provider.isExpressionOutlineFunction():
provider = provider.getParentVariableProvider()
if provider.isExpressionFunctionBody() or provider.isExpressionClassBody():
result = StatementsFrameFunction(
statements=statements, code_object=code_object, source_ref=source_ref
)
elif provider.isExpressionGeneratorObjectBody():
result = StatementsFrameGenerator(
statements=statements, code_object=code_object, source_ref=source_ref
)
elif provider.isExpressionCoroutineObjectBody():
result = StatementsFrameCoroutine(
statements=statements, code_object=code_object, source_ref=source_ref
)
elif provider.isExpressionAsyncgenObjectBody():
result = StatementsFrameAsyncgen(
statements=statements, code_object=code_object, source_ref=source_ref
)
else:
assert False, provider
return result
def makeStatementsSequenceOrStatement(statements, source_ref):
""" Make a statement sequence, but only if more than one statement
Useful for when we can unroll constructs already here, but are not sure if
we actually did that. This avoids the branch or the pollution of doing it
always.
"""
if len(statements) > 1:
return StatementsSequence(
statements=mergeStatements(statements), source_ref=source_ref
)
else:
return statements[0]
def makeStatementsSequence(statements, allow_none, source_ref):
if allow_none:
statements = tuple(
statement for statement in statements if statement is not None
)
if statements:
return StatementsSequence(
statements=mergeStatements(statements), source_ref=source_ref
)
else:
return None
def makeStatementsSequenceFromStatement(statement):
return StatementsSequence(
statements=mergeStatements((statement,)),
source_ref=statement.getSourceReference(),
)
def makeStatementsSequenceFromStatements(*statements):
assert statements
assert None not in statements
statements = mergeStatements(statements, allow_none=False)
return StatementsSequence(
statements=statements, source_ref=statements[0].getSourceReference()
)
def makeSequenceCreationOrConstant(sequence_kind, elements, source_ref):
# Sequence creation. Tries to avoid creations with only constant
# elements. Would be caught by optimization, but would be useless churn. For
# mutable constants we cannot do it though.
# Due to the many sequence types, there is a lot of cases here
# pylint: disable=too-many-branches
for element in elements:
if not element.isExpressionConstantRef():
constant = False
break
else:
constant = True
sequence_kind = sequence_kind.lower()
# Note: This would happen in optimization instead, but lets just do it
# immediately to save some time.
if constant:
if sequence_kind == "tuple":
const_type = tuple
elif sequence_kind == "list":
const_type = list
elif sequence_kind == "set":
const_type = set
if needsSetLiteralReverseInsertion():
elements = tuple(reversed(elements))
else:
assert False, sequence_kind
result = makeConstantRefNode(
constant=const_type(element.getConstant() for element in elements),
source_ref=source_ref,
user_provided=True,
)
else:
if sequence_kind == "tuple":
result = ExpressionMakeTuple(elements=elements, source_ref=source_ref)
elif sequence_kind == "list":
result = ExpressionMakeList(elements=elements, source_ref=source_ref)
elif sequence_kind == "set":
result = ExpressionMakeSetLiteral(elements=elements, source_ref=source_ref)
else:
assert False, sequence_kind
if elements:
result.setCompatibleSourceReference(
source_ref=elements[-1].getCompatibleSourceReference()
)
return result
def makeDictCreationOrConstant(keys, values, source_ref):
# Create dictionary node. Tries to avoid it for constant values that are not
# mutable.
assert len(keys) == len(values)
for key, value in zip(keys, values):
if not key.isExpressionConstantRef() or not key.isKnownToBeHashable():
constant = False
break
if not value.isExpressionConstantRef():
constant = False
break
else:
constant = True
# Note: This would happen in optimization instead, but lets just do it
# immediately to save some time.
if constant:
# Unless told otherwise, create the dictionary in its full size, so
# that no growing occurs and the constant becomes as similar as possible
# before being marshaled.
result = makeConstantRefNode(
constant=Constants.createConstantDict(
keys=[key.getConstant() for key in keys],
values=[value.getConstant() for value in values],
),
user_provided=True,
source_ref=source_ref,
)
else:
result = ExpressionMakeDict(
pairs=[
ExpressionKeyValuePair(
key=key, value=value, source_ref=key.getSourceReference()
)
for key, value in zip(keys, values)
],
source_ref=source_ref,
)
if values:
result.setCompatibleSourceReference(
source_ref=values[-1].getCompatibleSourceReference()
)
return result
def makeDictCreationOrConstant2(keys, values, source_ref):
# Create dictionary node. Tries to avoid it for constant values that are not
# mutable. Keys are strings.
assert len(keys) == len(values)
for value in values:
if not value.isExpressionConstantRef():
constant = False
break
else:
constant = True
# Note: This would happen in optimization instead, but lets just do it
# immediately to save some time.
if constant:
# Unless told otherwise, create the dictionary in its full size, so
# that no growing occurs and the constant becomes as similar as possible
# before being marshaled.
result = makeConstantRefNode(
constant=Constants.createConstantDict(
keys=keys, values=[value.getConstant() for value in values]
),
user_provided=True,
source_ref=source_ref,
)
else:
result = ExpressionMakeDict(
pairs=[
ExpressionKeyValuePair(
key=makeConstantRefNode(
constant=key,
source_ref=value.getSourceReference(),
user_provided=True,
),
value=value,
source_ref=value.getSourceReference(),
)
for key, value in zip(keys, values)
],
source_ref=source_ref,
)
if values:
result.setCompatibleSourceReference(
source_ref=values[-1].getCompatibleSourceReference()
)
return result
def getStatementsAppended(statement_sequence, statements):
return makeStatementsSequence(
statements=(statement_sequence, statements),
allow_none=False,
source_ref=statement_sequence.getSourceReference(),
)
def getStatementsPrepended(statement_sequence, statements):
return makeStatementsSequence(
statements=(statements, statement_sequence),
allow_none=False,
source_ref=statement_sequence.getSourceReference(),
)
def makeReraiseExceptionStatement(source_ref):
# TODO: Remove the statement sequence packaging and have users do it themselves
# in factory functions instead.
return StatementsSequence(
statements=(StatementReraiseException(source_ref=source_ref),),
source_ref=source_ref,
)
def makeAbsoluteImportNode(module_name, source_ref):
return ExpressionBuiltinImport(
name=makeConstantRefNode(module_name, source_ref, True),
globals_arg=None,
locals_arg=None,
fromlist=None,
level=makeConstantRefNode(0, source_ref, True),
source_ref=source_ref,
)
def mangleName(variable_name, owner):
if not variable_name.startswith("__") or variable_name.endswith("__"):
return variable_name
else:
# The mangling of function variable names depends on being inside a
# class.
class_container = owner.getContainingClassDictCreation()
if class_container is None:
return variable_name
else:
return "_%s%s" % (class_container.getName().lstrip("_"), variable_name)
def makeCallNode(called, *args, **kwargs):
source_ref = args[-1]
if len(args) > 1:
args = makeSequenceCreationOrConstant(
sequence_kind="tuple", elements=args[:-1], source_ref=source_ref
)
else:
args = None
if kwargs:
kwargs = makeDictCreationOrConstant2(
keys=tuple(kwargs.keys()),
values=tuple(kwargs.values()),
source_ref=source_ref,
)
else:
kwargs = None
return makeExpressionCall(
called=called, args=args, kw=kwargs, source_ref=source_ref
)
build_contexts = [None]
def pushBuildContext(value):
build_contexts.append(value)
def popBuildContext():
del build_contexts[-1]
def getBuildContext():
return build_contexts[-1]
| 32.68617 | 89 | 0.615867 |
4a237e2f5468e056524fc90d5d8468e8dd2044df | 103 | py | Python | discriminative_loss.py | ForrestPi/SegDL | 56f2ff229dfa7540704d6de50292c724693aac75 | [
"MIT"
] | 1 | 2021-07-26T08:30:11.000Z | 2021-07-26T08:30:11.000Z | discriminative_loss.py | ForrestPi/SegDL | 56f2ff229dfa7540704d6de50292c724693aac75 | [
"MIT"
] | null | null | null | discriminative_loss.py | ForrestPi/SegDL | 56f2ff229dfa7540704d6de50292c724693aac75 | [
"MIT"
] | 1 | 2021-07-21T09:18:32.000Z | 2021-07-21T09:18:32.000Z | #https://github.com/Wizaron/instance-segmentation-pytorch/blob/master/code/lib/losses/discriminative.py | 103 | 103 | 0.84466 |
4a237e6ee99b26039431a0369ba42d9f7f317a55 | 11,106 | py | Python | njunmt/tests/data_iterator.py | whr94621/NJUNMT-tf | 29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118 | [
"Apache-2.0"
] | 111 | 2017-12-29T12:48:02.000Z | 2022-03-15T00:47:13.000Z | njunmt/tests/data_iterator.py | whr94621/NJUNMT-tf | 29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118 | [
"Apache-2.0"
] | 3 | 2018-01-27T13:54:42.000Z | 2020-03-02T03:07:19.000Z | njunmt/tests/data_iterator.py | whr94621/NJUNMT-tf | 29e0b0c577ea7c81acdc80e7a94a1c4dfb85c118 | [
"Apache-2.0"
] | 44 | 2017-12-29T12:49:57.000Z | 2022-02-02T13:25:28.000Z | # This file is deprecated
# Copyright 2017 ZhaoChengqi, [email protected], Natural Language Processing Group, Nanjing University (2015-2018).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy
import tensorflow as tf
from tensorflow import gfile
from njunmt.utils.misc import open_file, close_file
from njunmt.utils.misc import padding_batch_data
from njunmt.utils.misc import deprecated
def _encode_fun():
"""
for character level chinese side
:param l_trg:
:return:
"""
if sys.version_info < (3, 0):
return lambda l_trg: [i.encode('utf-8') for i in list(l_trg)]
return lambda l_trg: list(l_trg)
encode_fun = _encode_fun()
def shuffle_data(from_binding, to_binding):
lines_list = []
fps = []
fws = []
for idx in range(len(from_binding)):
lines_list.append([])
fps.append(open(from_binding[idx], "r"))
fws.append(open(to_binding[idx], "w"))
for zip_lines in zip(*fps):
for idx in range(len(zip_lines)):
lines_list[idx].append(zip_lines[idx].strip())
for fp in fps:
fp.close()
rands = numpy.arange(len(lines_list[0]))
numpy.random.shuffle(rands)
for i in rands:
for idx in range(len(lines_list)):
fws[idx].write(lines_list[idx][i] + "\n")
for fw in fws:
fw.close()
@deprecated
class EvalTextIterator:
def __init__(self, source, target,
vocab_source, vocab_target,
batch_size=128,
n_words_src=-1,
n_words_trg=-1):
# read in batch datas
f_source = open_file(source)
if gfile.Exists(target):
f_target = open_file(target)
else:
f_target = open_file(target + "0")
ss_buf = []
tt_buf = []
for ss, tt in zip(f_source, f_target):
ss = vocab_source.convert_to_idlist(ss.strip().split(), n_words_src)
tt = vocab_target.convert_to_idlist(tt.strip().split(), n_words_trg)
ss_buf.append(ss)
tt_buf.append(tt)
f_source.close()
f_target.close()
tlen = numpy.array([len(t) for t in tt_buf])
tidx = tlen.argsort()
_ss_buf = [ss_buf[i] for i in tidx]
_tt_buf = [tt_buf[i] for i in tidx]
ss_buf = _ss_buf
tt_buf = _tt_buf
self.batch_source_buffer = []
self.batch_target_buffer = []
self.batch_data_idx = 0
self.batch_size = batch_size
while self.batch_data_idx < len(ss_buf):
self.batch_source_buffer.append(
padding_batch_data(ss_buf[self.batch_data_idx: self.batch_data_idx + batch_size], vocab_source.eos_id))
self.batch_target_buffer.append(
padding_batch_data(tt_buf[self.batch_data_idx: self.batch_data_idx + batch_size], vocab_target.eos_id))
self.batch_data_idx += batch_size
self.reset()
def __iter__(self):
return self
def reset(self):
self.batch_data_idx = 0
def __next__(self):
"""
capable for python3
:return:
"""
return self.next()
def next(self):
if self.batch_data_idx >= len(self.batch_source_buffer):
self.reset()
raise StopIteration
self.batch_data_idx += 1
return self.batch_source_buffer[self.batch_data_idx - 1], \
self.batch_target_buffer[self.batch_data_idx - 1]
@deprecated
class TrainTextIterator:
"""Simple Bitext iterator."""
def __init__(self, source, target,
vocab_source, vocab_target,
batch_size=80,
maxlen_src=50, maxlen_trg=100,
n_words_src=-1, n_words_trg=-1,
shuffle_every_epoch=None,
shuffle_before_train=None):
"""
:param source: `str`
:param target: `str`
:param vocab_source: `Vocab`
:param vocab_target: `Vocab`
:param batch_size: `int`
:param maxlen_src: `int`
:param maxlen_trg: `int`
:param n_words_src: `int`
:param n_words_trg: `int`
:param shuffle_every_epoch: if is not None, use it as postfix of shuffled data
:param shuffle_before_train: if is not None, use it as postfix of shuffled data
:return:
"""
if shuffle_before_train:
tf.logging.info("shuffling data before training\n"
"\t%s ==> %s\n\t%s ==> %s"
% (source, "./source.shuf." + shuffle_before_train,
target, "./target.shuf." + shuffle_before_train))
shuffle_data([source, target],
["./source.shuf." + shuffle_before_train,
"./target.shuf." + shuffle_before_train])
source = "./source.shuf." + shuffle_before_train
target = "./target.shuf." + shuffle_before_train
self.source_file = source
self.target_file = target
self.source = open_file(source, encoding='utf-8')
self.target = open_file(target, encoding='utf-8')
self.vocab_source = vocab_source
self.vocab_target = vocab_target
self.batch_size = batch_size
self.maxlen_src = maxlen_src
self.maxlen_trg = maxlen_trg
self.n_words_src = n_words_src
self.n_words_trg = n_words_trg
self.source_buffer = []
self.target_buffer = []
self.k = batch_size * 128
self.end_of_data = False
self.shuffle_every_epoch = shuffle_every_epoch
def __iter__(self):
return self
def reset(self):
if self.shuffle_every_epoch:
close_file(self.source)
close_file(self.target)
tf.logging.info("shuffling data among epochs")
shuffle_data([self.source_file, self.target_file],
["./source.shuf." + self.shuffle_every_epoch,
"./target.shuf." + self.shuffle_every_epoch])
self.source = open_file("./source.shuf." + self.shuffle_every_epoch)
self.target = open_file("./target.shuf." + self.shuffle_every_epoch)
else:
self.source.seek(0)
self.target.seek(0)
def __next__(self):
"""
capable for python3
:return:
"""
return self.next()
def next(self):
if self.end_of_data:
self.end_of_data = False
self.reset()
raise StopIteration
source = []
target = []
assert len(self.source_buffer) == len(self.target_buffer), 'Buffer size mismatch'
if len(self.source_buffer) == 0:
cnt = 0
while cnt < self.k:
ss = self.source.readline()
if ss == "":
break
tt = self.target.readline()
if tt == "":
break
ss = ss.strip().split()
tt = tt.strip().split()
if len(ss) > self.maxlen_src or len(tt) > self.maxlen_trg:
continue
cnt += 1
self.source_buffer.append(ss)
self.target_buffer.append(tt)
# sort by target buffer
tlen = numpy.array([len(t) for t in self.target_buffer])
tidx = tlen.argsort()
_sbuf = [self.source_buffer[i] for i in tidx]
_tbuf = [self.target_buffer[i] for i in tidx]
self.source_buffer = _sbuf
self.target_buffer = _tbuf
if len(self.source_buffer) == 0 or len(self.target_buffer) == 0:
self.end_of_data = False
self.reset()
raise StopIteration
try:
while True:
# read source
try:
ss = self.source_buffer.pop(0)
except IndexError:
break
ss = self.vocab_source.convert_to_idlist(ss, self.n_words_src)
# read target
tt = self.target_buffer.pop(0)
tt = self.vocab_target.convert_to_idlist(tt, self.n_words_trg)
source.append(ss)
target.append(tt)
if len(source) >= self.batch_size or \
len(target) >= self.batch_size:
break
except IOError:
self.end_of_data = True
if len(source) <= 0 or len(target) <= 0:
self.end_of_data = False
self.reset()
raise StopIteration
return padding_batch_data(source, self.vocab_source.eos_id), \
padding_batch_data(target, self.vocab_target.eos_id)
@deprecated
class TestTextIterator:
def __init__(self, source,
vocab_source,
batch_size=1,
n_words_src=-1):
# read in batch datas
f_source = open_file(source)
ss_buf = []
ss_str_buf = []
for ss in f_source:
# ss_str_buf.append(ss.strip())
ss_str_buf.append(vocab_source.bpe_encode(ss.strip()))
ss = vocab_source.convert_to_idlist(ss.strip().split(), n_words_src)
ss_buf.append(ss)
f_source.close()
self.batch_source_buffer = []
self.batch_source_str_buffer = []
self.batch_data_idx = 0
self.batch_size = batch_size
while self.batch_data_idx < len(ss_buf):
self.batch_source_buffer.append(
padding_batch_data(ss_buf[self.batch_data_idx: self.batch_data_idx + batch_size], vocab_source.eos_id))
self.batch_source_str_buffer.append(
ss_str_buf[self.batch_data_idx: self.batch_data_idx + batch_size])
self.batch_data_idx += batch_size
self.reset()
def __iter__(self):
return self
def reset(self):
self.batch_data_idx = 0
def __next__(self):
"""
capable for python3
:return:
"""
return self.next()
def next(self):
if self.batch_data_idx >= len(self.batch_source_buffer):
self.reset()
raise StopIteration
self.batch_data_idx += 1
return self.batch_source_str_buffer[self.batch_data_idx - 1], \
self.batch_source_buffer[self.batch_data_idx - 1]
if __name__ == "__main__":
pass
| 32.005764 | 119 | 0.577256 |
4a237ffbbf3c02ec8284e38dc87c77962b0475c2 | 197 | py | Python | Task/Introspection/Python/introspection-3.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:38.000Z | 2018-11-09T22:08:38.000Z | Task/Introspection/Python/introspection-3.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Introspection/Python/introspection-3.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2018-11-09T22:08:40.000Z | 2018-11-09T22:08:40.000Z | def sum_of_global_int_vars():
variables = vars(__builtins__).copy()
variables.update(globals())
print sum(v for v in variables.itervalues() if type(v) == int)
sum_of_global_int_vars()
| 28.142857 | 66 | 0.720812 |
4a238047033e5cbf36f7fcf8fd4f8278040c6210 | 769 | py | Python | sandbox/bad_flush.py | tacaswell/pyFAI | fd63c7d9ba35e687ef5c4ec717c01bf46564572a | [
"MIT"
] | 45 | 2016-07-16T19:43:47.000Z | 2022-03-12T16:53:47.000Z | sandbox/bad_flush.py | tacaswell/pyFAI | fd63c7d9ba35e687ef5c4ec717c01bf46564572a | [
"MIT"
] | 1,125 | 2016-06-09T07:47:57.000Z | 2022-03-31T20:34:00.000Z | sandbox/bad_flush.py | tacaswell/pyFAI | fd63c7d9ba35e687ef5c4ec717c01bf46564572a | [
"MIT"
] | 52 | 2016-06-09T07:30:46.000Z | 2022-02-14T08:25:11.000Z | #!/usr/bin/python3
import logging
logger = logging.basicConfig(level=logging.INFO)
import numpy, pyFAI, pyFAI.azimuthalIntegrator
method = ("no", "csr", "cython")
detector = pyFAI.detector_factory("Pilatus_100k")
ai = pyFAI.azimuthalIntegrator.AzimuthalIntegrator(detector=detector)
rm = max(detector.shape) * detector.pixel1
img = numpy.random.random(detector.shape)
print(ai.integrate1d(img, 5, unit="r_m", radial_range=[0, rm], method=method))
# print(ai.integrate1d(img, 5, unit="r_m", method=method))
for k, v in ai.engines.items():
print(k, v, id(v.engine))
print(ai.integrate1d(img, 5, unit="r_m", radial_range=[0, rm], method=method))
# print(ai.integrate1d(img, 5, unit="r_m", method=method))
for k, v in ai.engines.items():
print(k, v, id(v.engine))
| 40.473684 | 78 | 0.725618 |
4a2380f878e4af4063121bd5e7c6f9db2ca77183 | 1,022 | py | Python | label_studio/ml/migrations/0005_auto_20211010_1344.py | mehdibenamorr/label-studio | 213252d360b028eb031ca127969fb35a897dc2b6 | [
"Apache-2.0"
] | 8,264 | 2019-06-25T23:08:05.000Z | 2022-03-31T18:48:34.000Z | label_studio/ml/migrations/0005_auto_20211010_1344.py | mehdibenamorr/label-studio | 213252d360b028eb031ca127969fb35a897dc2b6 | [
"Apache-2.0"
] | 1,446 | 2019-08-02T13:46:25.000Z | 2022-03-31T23:09:32.000Z | label_studio/ml/migrations/0005_auto_20211010_1344.py | mehdibenamorr/label-studio | 213252d360b028eb031ca127969fb35a897dc2b6 | [
"Apache-2.0"
] | 1,092 | 2019-07-02T16:36:35.000Z | 2022-03-31T20:01:19.000Z | # Generated by Django 3.1.13 on 2021-10-10 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ml', '0004_auto_20210820_1610'),
]
operations = [
migrations.AlterField(
model_name='mlbackend',
name='is_interactive',
field=models.BooleanField(default=False, help_text='Used to interactively annotate tasks. If true, model returns one list with results', verbose_name='is_interactive'),
),
migrations.AlterField(
model_name='mlbackend',
name='timeout',
field=models.FloatField(blank=True, default=100.0, help_text='Response model timeout', verbose_name='timeout'),
),
migrations.AlterField(
model_name='mlbackendpredictionjob',
name='model_version',
field=models.TextField(blank=True, help_text='Model version this job is associated with', null=True, verbose_name='model version'),
),
]
| 35.241379 | 180 | 0.642857 |
4a23816fcbfdc2800f25d862feb86b49ebd3ebd8 | 4,233 | py | Python | scipy/sparse/linalg/isolve/tests/test_lsqr.py | smola/scipy | ff8b9d9e87a585a820846d7f459d6156ba621c4d | [
"BSD-3-Clause"
] | 1 | 2020-02-26T12:15:51.000Z | 2020-02-26T12:15:51.000Z | scipy/sparse/linalg/isolve/tests/test_lsqr.py | smola/scipy | ff8b9d9e87a585a820846d7f459d6156ba621c4d | [
"BSD-3-Clause"
] | null | null | null | scipy/sparse/linalg/isolve/tests/test_lsqr.py | smola/scipy | ff8b9d9e87a585a820846d7f459d6156ba621c4d | [
"BSD-3-Clause"
] | null | null | null | from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal)
import scipy.sparse
import scipy.sparse.linalg
from scipy.sparse.linalg import lsqr
from time import time
# Set up a test problem
n = 35
G = np.eye(n)
normal = np.random.normal
norm = np.linalg.norm
for jj in range(5):
gg = normal(size=n)
hh = gg * gg.T
G += (hh + hh.T) * 0.5
G += normal(size=n) * normal(size=n)
b = normal(size=n)
tol = 1e-10
show = False
maxit = None
def test_basic():
b_copy = b.copy()
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
assert_(np.all(b_copy == b))
svx = np.linalg.solve(G, b)
xo = X[0]
assert_(norm(svx - xo) < 1e-5)
def test_gh_2466():
row = np.array([0, 0])
col = np.array([0, 1])
val = np.array([1, -1])
A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
b = np.asarray([4])
lsqr(A, b)
def test_well_conditioned_problems():
# Test that sparse the lsqr solver returns the right solution
# on various problems with different random seeds.
# This is a non-regression test for a potential ZeroDivisionError
# raised when computing the `test2` & `test3` convergence conditions.
n = 10
A_sparse = scipy.sparse.eye(n, n)
A_dense = A_sparse.toarray()
with np.errstate(invalid='raise'):
for seed in range(30):
rng = np.random.RandomState(seed + 10)
beta = rng.rand(n)
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
b = A_sparse * beta[:, np.newaxis]
output = lsqr(A_sparse, b, show=show)
# Check that the termination condition corresponds to an approximate
# solution to Ax = b
assert_equal(output[1], 1)
solution = output[0]
# Check that we recover the ground truth solution
assert_array_almost_equal(solution, beta)
# Sanity check: compare to the dense array solver
reference_solution = np.linalg.solve(A_dense, b).ravel()
assert_array_almost_equal(solution, reference_solution)
def test_b_shapes():
# Test b being a scalar.
A = np.array([[1.0, 2.0]])
b = 3.0
x = lsqr(A, b)[0]
assert_almost_equal(norm(A.dot(x) - b), 0)
# Test b being a column vector.
A = np.eye(10)
b = np.ones((10, 1))
x = lsqr(A, b)[0]
assert_almost_equal(norm(A.dot(x) - b.ravel()), 0)
def test_initialization():
# Test the default setting is the same as zeros
b_copy = b.copy()
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
x0 = np.zeros(x_ref[0].shape)
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_(np.all(b_copy == b))
assert_array_almost_equal(x_ref[0], x[0])
# Test warm-start with single iteration
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
assert_array_almost_equal(x_ref[0], x[0])
assert_(np.all(b_copy == b))
if __name__ == "__main__":
svx = np.linalg.solve(G, b)
tic = time()
X = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
xo = X[0]
phio = X[3]
psio = X[7]
k = X[2]
chio = X[8]
mg = np.amax(G - G.T)
if mg > 1e-14:
sym = 'No'
else:
sym = 'Yes'
print('LSQR')
print("Is linear operator symmetric? " + sym)
print("n: %3g iterations: %3g" % (n, k))
print("Norms computed in %.2fs by LSQR" % (time() - tic))
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " % (chio, phio, psio))
print("Residual norms computed directly:")
print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo),
norm(G*xo - b),
norm(G.T*(G*xo-b))))
print("Direct solution norms:")
print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx - b)))
print("")
print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo))
print("")
| 30.453237 | 80 | 0.575006 |
4a2381c05606236e1b05e5a78bf22879119a5ca5 | 1,161 | py | Python | WEEKS/wk17/d2/more-examples/binaryToASCII.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/wk17/d2/more-examples/binaryToASCII.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/wk17/d2/more-examples/binaryToASCII.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | # Given a binary string (ASCII encoded), write a function that returns the equivalent decoded text.
# Every eight bits in the binary string represents one character on the ASCII table.
# Examples:
# csBinaryToASCII("011011000110000101101101011000100110010001100001") -> "lambda"
# 01101100 -> 108 -> "l"
# 01100001 -> 97 -> "a"
# 01101101 -> 109 -> "m"
# 01100010 -> 98 -> "b"
# 01100100 -> 100 -> "d"
# 01100001 -> 97 -> "a"
# csBinaryToASCII("") -> ""
# Notes:
# The input string will always be a valid binary string.
# Characters can be in the range from "00000000" to "11111111" (inclusive).
# In the case of an empty input string, your function should return an empty string.
# [execution time limit] 4 seconds (py3)
# [input] string binary
# [output] string
def csBinaryToASCII(binary):
binary_letters = []
letters = ""
if binary == "":
return ""
for index in range(0, len(binary), 8):
binary_letters.append(binary[index : index + 8])
print(binary_letters)
for string in binary_letters:
binary_int = v = chr(int(string, 2))
print(binary_int)
letters += binary_int
return letters
| 29.025 | 99 | 0.664944 |
4a23824ce4a8e7a6b3249833270527a275560155 | 403 | py | Python | covid_project/covid_project/asgi.py | Valentin-Rault/Covid-project | cd895d5317dcb920724db2a94e523e1f18efc43b | [
"MIT"
] | null | null | null | covid_project/covid_project/asgi.py | Valentin-Rault/Covid-project | cd895d5317dcb920724db2a94e523e1f18efc43b | [
"MIT"
] | 2 | 2020-11-15T14:43:53.000Z | 2021-07-03T08:34:15.000Z | covid_project/covid_project/asgi.py | Valentin-Rault/Covid-project | cd895d5317dcb920724db2a94e523e1f18efc43b | [
"MIT"
] | null | null | null | """
ASGI config for covid_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'covid_project.settings')
application = get_asgi_application()
| 23.705882 | 78 | 0.791563 |
4a2382cc23d7efb47678a84935487c85f4641865 | 498 | py | Python | data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_seinar_enhanced_level1.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_seinar_enhanced_level1.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/ship/components/reactor/shared_rct_seinar_enhanced_level1.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/reactor/shared_rct_seinar_enhanced_level1.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","rct_seinar_enhanced_level1_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.294118 | 98 | 0.751004 |
4a2383af70b55861d4b8740249558e5d82c4735c | 2,811 | py | Python | lottery/branch/prune_early.py | xhchrn/open_lth | 6b3d04a12a2f868ce851bd09b330ea57957c1de6 | [
"MIT"
] | 9 | 2021-03-30T20:43:26.000Z | 2021-12-28T06:25:17.000Z | lottery/branch/prune_early.py | xhchrn/open_lth | 6b3d04a12a2f868ce851bd09b330ea57957c1de6 | [
"MIT"
] | null | null | null | lottery/branch/prune_early.py | xhchrn/open_lth | 6b3d04a12a2f868ce851bd09b330ea57957c1de6 | [
"MIT"
] | 2 | 2021-03-31T01:19:48.000Z | 2021-08-02T13:41:32.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
from lottery.branch import base
import models.registry
import pruning.registry
from pruning.mask import Mask
from pruning.pruned_model import PrunedModel
from training import train
from platforms.platform import get_platform
from utils.tensor_utils import vectorize, unvectorize, shuffle_tensor, shuffle_state_dict
class Branch(base.Branch):
def branch_function(self,
seed: int,
strategy: str = 'sparse_global',
start_at: str = 'rewind',
layers_to_ignore: str = ''):
# Reset the masks of any layers that shouldn't be pruned.
if layers_to_ignore:
for k in layers_to_ignore.split(','): mask[k] = torch.ones_like(mask[k])
# Determine the start step.
if start_at == 'init':
start_step = self.lottery_desc.str_to_step('0ep')
state_step = start_step
elif start_at == 'end':
start_step = self.lottery_desc.str_to_step('0ep')
state_step = self.lottery_desc.train_end_step
elif start_at == 'rewind':
start_step = self.lottery_desc.train_start_step
state_step = start_step
else:
raise ValueError(f'Invalid starting point {start_at}')
# Train the model with the new mask.
model = models.registry.load(self.pretrain_root, state_step, self.lottery_desc.model_hparams)
# Get the current level mask and get the target pruning ratio
mask = Mask.load(self.level_root)
sparsity_ratio = mask.get_sparsity_ratio()
target_pruning_fraction = 1.0 - sparsity_ratio
# Run pruning
pruning_hparams = copy.deepcopy(self.lottery_desc.pruning_hparams)
pruning_hparams.pruning_strategy = strategy
pruning_hparams.pruning_fraction = target_pruning_fraction
new_mask = pruning.registry.get(pruning_hparams)(
model, Mask.ones_like(model),
self.lottery_desc.training_hparams,
self.lottery_desc.dataset_hparams, seed
)
new_mask.save(self.branch_root)
repruned_model = PrunedModel(model.to(device=get_platform().cpu_device), new_mask)
# Run training
train.standard_train(repruned_model, self.branch_root, self.lottery_desc.dataset_hparams,
self.lottery_desc.training_hparams, start_step=start_step, verbose=self.verbose)
@staticmethod
def description():
return "Reprune the model using early pruning methods."
@staticmethod
def name():
return 'prune_early'
| 37.48 | 109 | 0.668801 |
4a2384b7eb26dfa0cfff764f86aa6eaab170ed33 | 559 | py | Python | Python-For-Everyone-Horstmann/Chapter11-Recursion/nose_triangle_area.py | islayy/Books-solutions | 5fe05deb4e9f65875284d8af43bd383bf9ae145b | [
"MIT"
] | null | null | null | Python-For-Everyone-Horstmann/Chapter11-Recursion/nose_triangle_area.py | islayy/Books-solutions | 5fe05deb4e9f65875284d8af43bd383bf9ae145b | [
"MIT"
] | null | null | null | Python-For-Everyone-Horstmann/Chapter11-Recursion/nose_triangle_area.py | islayy/Books-solutions | 5fe05deb4e9f65875284d8af43bd383bf9ae145b | [
"MIT"
] | 1 | 2021-01-30T22:19:07.000Z | 2021-01-30T22:19:07.000Z | # Unit tests for triangle_area.py
# IMPORTS
from triangle_area import triangle_area
from nose.tools import eq_
class TestTriangleArea():
def test_triangle_area_side_length_0(self):
eq_(0, triangle_area(0))
def test_triangle_area_side_length_1(self):
eq_(1, triangle_area(1))
def test_triangle_area_side_length2(self):
eq_(3, triangle_area(2))
def test_triangle_area_side_length10(self):
eq_(55, triangle_area(10))
def test_triangle_area_side_length_negative(self):
eq_(0, triangle_area(-5))
| 23.291667 | 54 | 0.726297 |
4a2384f61b3ba0b16c77766c44234ae764686626 | 1,969 | py | Python | airflow/api/common/experimental/__init__.py | lukeplus/docker-airflow | dbdba3ec537913601541f99d9ed6b13dfe5f3619 | [
"Apache-2.0"
] | 2 | 2021-01-27T09:27:21.000Z | 2021-05-14T05:49:23.000Z | airflow/api/common/experimental/__init__.py | lukeplus/docker-airflow | dbdba3ec537913601541f99d9ed6b13dfe5f3619 | [
"Apache-2.0"
] | 22 | 2019-12-09T23:22:07.000Z | 2021-05-12T23:15:40.000Z | airflow/api/common/experimental/__init__.py | lukeplus/docker-airflow | dbdba3ec537913601541f99d9ed6b13dfe5f3619 | [
"Apache-2.0"
] | 5 | 2019-11-18T13:19:29.000Z | 2020-03-25T13:20:29.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Experimental APIs."""
from datetime import datetime
from typing import Optional
from airflow.exceptions import DagNotFound, TaskNotFound, DagRunNotFound
from airflow.models import DagBag, DagModel, DagRun
def check_and_get_dag(dag_id, task_id=None): # type: (str, Optional[str]) -> DagModel
"""Checks that DAG exists and in case it is specified that Task exist"""
dagbag = DagBag()
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
dag = dagbag.get_dag(dag_id)
if task_id and not dag.has_task(task_id):
error_message = 'Task {} not found in dag {}'.format(task_id, dag_id)
raise TaskNotFound(error_message)
return dag
def check_and_get_dagrun(dag, execution_date): # type: (DagModel, datetime) -> DagRun
"""Get DagRun object and check that it exists"""
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = ('Dag Run for date {} not found in dag {}'
.format(execution_date, dag.dag_id))
raise DagRunNotFound(error_message)
return dagrun
| 41.020833 | 86 | 0.722702 |
4a2384fe5e28d4f4ba701ee19078b04fc4cbd0f7 | 459 | py | Python | updateAttendance.py | Shivani-781/AI-Powered-Hourly-Attendance-Capturing-System | 46c6ba6ea95b71132d2fc87bfa0824fcbca379c5 | [
"MIT"
] | null | null | null | updateAttendance.py | Shivani-781/AI-Powered-Hourly-Attendance-Capturing-System | 46c6ba6ea95b71132d2fc87bfa0824fcbca379c5 | [
"MIT"
] | null | null | null | updateAttendance.py | Shivani-781/AI-Powered-Hourly-Attendance-Capturing-System | 46c6ba6ea95b71132d2fc87bfa0824fcbca379c5 | [
"MIT"
] | null | null | null | import json
import boto3
dynamo = boto3.resource("dynamodb")
table = dynamo.Table("Attendance_Count")
def lambda_handler(event, context):
# TODO implement
res = table.get_item(Key = {"RollNo" : event['RollNo']})
print(res['Item']['Name'])
Count = res['Item']['Count']
Count= Count+1
inp = {"RollNo" : event['RollNo'], "Count" : Count, "Name" : res['Item']['Name']}
table.put_item(Item = inp)
return "Successful" | 30.6 | 86 | 0.616558 |
4a238531d96231ab760c380ebe1f20be8314617b | 2,337 | py | Python | beetsplug/unimported.py | jcassette/beets | 10338c2a601c28289cd30debf2537b3523d95446 | [
"MIT"
] | 1 | 2022-03-17T22:44:47.000Z | 2022-03-17T22:44:47.000Z | beetsplug/unimported.py | jcassette/beets | 10338c2a601c28289cd30debf2537b3523d95446 | [
"MIT"
] | 1 | 2022-03-10T00:41:36.000Z | 2022-03-10T00:41:36.000Z | beetsplug/unimported.py | jcassette/beets | 10338c2a601c28289cd30debf2537b3523d95446 | [
"MIT"
] | 1 | 2022-03-10T00:37:26.000Z | 2022-03-10T00:37:26.000Z | # This file is part of beets.
# Copyright 2019, Joris Jensen
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""
List all files in the library folder which are not listed in the
beets library database, including art files
"""
import os
from beets import util
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
__author__ = 'https://github.com/MrNuggelz'
class Unimported(BeetsPlugin):
def __init__(self):
super().__init__()
self.config.add(
{
'ignore_extensions': [],
'ignore_subdirectories': []
}
)
def commands(self):
def print_unimported(lib, opts, args):
ignore_exts = [
('.' + x).encode()
for x in self.config["ignore_extensions"].as_str_seq()
]
ignore_dirs = [
os.path.join(lib.directory, x.encode())
for x in self.config["ignore_subdirectories"].as_str_seq()
]
in_folder = {
os.path.join(r, file)
for r, d, f in os.walk(lib.directory)
for file in f
if not any(
[file.endswith(ext) for ext in ignore_exts]
+ [r in ignore_dirs]
)
}
in_library = {x.path for x in lib.items()}
art_files = {x.artpath for x in lib.albums()}
for f in in_folder - in_library - art_files:
print_(util.displayable_path(f))
unimported = Subcommand(
'unimported',
help='list all files in the library folder which are not listed'
' in the beets library database')
unimported.func = print_unimported
return [unimported]
| 33.385714 | 76 | 0.599914 |
4a2385856a06d2aaa4ecf3b713c1e4f9acda1265 | 10,281 | py | Python | sirms/mols.py | MariBerry/sirms | acb27280c8612c67d59eb4a508b254c97ee0bb33 | [
"BSD-3-Clause"
] | 13 | 2015-03-27T14:33:32.000Z | 2021-04-08T19:50:07.000Z | sirms/mols.py | MariBerry/sirms | acb27280c8612c67d59eb4a508b254c97ee0bb33 | [
"BSD-3-Clause"
] | null | null | null | sirms/mols.py | MariBerry/sirms | acb27280c8612c67d59eb4a508b254c97ee0bb33 | [
"BSD-3-Clause"
] | 8 | 2017-10-01T22:36:45.000Z | 2020-12-03T10:41:17.000Z | #-------------------------------------------------------------------------------
# Name: mols
# Purpose: Mol class to operate with molecules
#
# Author: Pavel Polishchuk
#
# Created: 11.01.2013
# Copyright: (c) Pavel Polishchuk 2013-2015
# Licence: BSD 3-clause
#-------------------------------------------------------------------------------
from itertools import combinations
import copy
class Mol3:
# double bonds can be:
# 2 - non-steric or undefined double bond (2, 0)
# 21 - Z double bond (2, 1)
# 22 - E double bond (2, 2)
# 23 - cyclic double bond in rings of size up to 7 (such bonds are always cis) (2, 3)
def __init__(self):
self.atoms = {}
self.bonds = dict()
self.title = ""
self.stereo = False
def AddAtom(self, id, label, x, y, z, formal_charge):
self.atoms[id] = {'label': label, 'x': x, 'y': y, 'z': z,
'property': {}, # will contain dicts of type 'elm': {'label': ['C'], 'value': 'C'}
'formal_charge': formal_charge}
self.bonds[id] = dict()
def AddBond(self, id1, id2, bond_type):
if id1 not in self.bonds.keys():
self.bonds[id1] = dict()
if id2 not in self.bonds.keys():
self.bonds[id2] = dict()
# bond value is a tuple: 1 - bond order, 2 - double bond stereo type (0 - unspecified, 1 - cis, 2 -trans, 3 - cyclic)
self.bonds[id1][id2] = self.bonds[id2][id1] = (bond_type, 0)
def GetBondOrder(self, id1, id2):
return self.bonds.get(id1, dict()).get(id2, (0, 0))[0]
def GetBondType(self, id1, id2):
bond_order = self.GetBondOrder(id1, id2)
if self.stereo and bond_order == 2 and self.bonds[id1][id2][1] != 0:
return self.bonds[id1][id2][0] * 10 + self.bonds[id1][id2][1]
else:
return bond_order
def SetDoubleBondConfig(self, id1, id2, bond_stereo):
if bond_stereo not in [0, 1, 2, 3]:
raise Exception('Wrong double bond stereo!')
self.bonds[id1][id2] = self.bonds[id2][id1] = (2, bond_stereo)
def _Path(self, start_atom, list_atom, cycles_local, visited, size_range):
for a in self.bonds[start_atom].keys():
if len(list_atom) <= max(size_range) and a not in list_atom and a not in visited:
self._Path(a, list_atom + [a], cycles_local, visited, size_range)
elif len(list_atom) in size_range and a == list_atom[0]:
if tuple(set(sorted(list_atom))) not in cycles_local:
cycles_local.add(tuple(set(sorted(list_atom))))
def GetCycles(self, min_size, max_size):
cycles = set()
visited = set() # atoms which already has been tested as a cycle member can be excluded from further cycles checks
for a in sorted(self.atoms.keys()):
visited.add(a)
if len(self.bonds[a].keys()) > 1:
self._Path(a, [a], cycles, visited, range(min_size, max_size + 1))
return cycles
def SetCyclicDoubleBondsCis(self, min_size=3, max_size=7):
cycles = self.GetCycles(min_size, max_size)
for cycle in cycles:
for a1, a2 in combinations(cycle, 2):
if self.GetBondOrder(a1, a2) == 2:
self.bonds[a1][a2] = self.bonds[a2][a1] = (2, 3)
class Mol4(Mol3):
def __GetNumberConnectedComponents(self, atoms):
res = 0
visited = [False] * len(atoms)
def dfs(i):
visited[i] = True
for a in self.bonds[atoms[i]].keys():
if a in atoms and not visited[atoms.index(a)]:
dfs(atoms.index(a))
for i in range(len(atoms)):
if not visited[i]:
dfs(i)
res += 1
return res
def __GenConnectedSubgraphs(self, not_visited, min_num_atoms=4, max_num_atoms=4, curr_subset=set(), neighbors=set(), res=[]):
if min_num_atoms <= len(curr_subset) <= max_num_atoms:
res.append(curr_subset)
if not curr_subset:
candidates = set(not_visited)
else:
candidates = not_visited.intersection(neighbors)
if candidates and len(curr_subset) < max_num_atoms:
for a in candidates:
not_visited.remove(a)
tmp1 = set(not_visited)
tmp2 = set(curr_subset)
tmp2.add(a)
tmp3 = not_visited.intersection(self.bonds[a].keys())
tmp3 = neighbors.union(tmp3)
self.__GenConnectedSubgraphs(tmp1, min_num_atoms=min_num_atoms, max_num_atoms=max_num_atoms, curr_subset=tmp2, neighbors=tmp3, res=res)
def __GetAllNeighbours(self, atoms):
output = set(atoms)
for a in atoms:
output = output.union(self.bonds[a].keys())
return output
def GetAtomsCombinations(self, min_num_components=1, max_num_components=2, min_num_atoms=4, max_num_atoms=4, noH=False):
def CheckIntersection(sets, neighbour_sets, ids):
if set.intersection(*[sets[i] for i in ids]):
return True
for i in ids:
for j in ids:
if i != j:
if sets[i].intersection(neighbour_sets[j]):
return True
return False
if noH:
atoms = set(a for a in self.atoms.keys() if self.atoms[a]["label"] != 'H')
else:
atoms = set(self.atoms.keys())
# storage of results
res = []
# if only single component fragments are looking for then there is no need to search for fragments smaller than nim_num_atoms
if max_num_components == 1:
self.__GenConnectedSubgraphs(atoms, min_num_atoms=min_num_atoms, max_num_atoms=max_num_atoms, res=res)
else:
self.__GenConnectedSubgraphs(atoms, min_num_atoms=1, max_num_atoms=max_num_atoms, res=res)
# get neighbours
nb = [self.__GetAllNeighbours(v) for v in res]
results = []
for n in range(min_num_components, max_num_components + 1):
for comb in combinations(range(len(res)), n):
# if min_num_atoms <= sum(len(res[i]) for i in comb) <= max_num_atoms and (len(comb) == 1 or not set.intersection(*[nb[i] for i in comb])):
if min_num_atoms <= sum(len(res[i]) for i in comb) <= max_num_atoms and (len(comb) == 1 or not CheckIntersection(res, nb, comb)):
results.append(tuple(set.union(*[res[i] for i in comb])))
return results
class SmilesMol3(Mol4):
tosmileskeys = {0: '.', 1: '-', 2: '=', 3: '#', 4: ':', 8: '~'}
def __tosmiles(self, bond):
return self.tosmileskeys[bond]
def __getSmiles(self, trace, inter, labels_dict):
# trace: atom ids
# inter: selected atom id
strace = set(trace)
iterlist = set(self.bonds[inter].keys()).intersection(self.sub).difference(trace[-2:])
# get atom label
smi = [labels_dict[inter]]
self.nextnumb = 1
concat = []
stoplist = []
iterlen = len(iterlist) - 1
for b, i in enumerate(sorted(list(iterlist), key=self.levels.get)):
if i in strace:
if i not in stoplist:
# костыль для циклов. чтоб не было 2х проходов.
cyc = self.nextnumb
self.nextnumb += 1
concat += [(i, cyc, inter)]
smi[0] += '%s%d' % (self.__tosmiles(self.GetBondOrder(inter, i)), cyc)
if b == iterlen and len(smi) > 3:
smi[-1] = smi[-3] = ''
continue
deep = self.__getSmiles(copy.copy(trace + [i]), i, labels_dict)
strace.update(deep[0])
for j in deep[2]:
if j[0] == inter:
stoplist += [j[2]]
smi[0] += '%s%d' % (self.__tosmiles(self.GetBondOrder(inter, j[2])), j[1])
else:
concat.append(j)
smi += ['(' if iterlen - b else '', '%s' % self.__tosmiles(self.GetBondOrder(inter, i)) + deep[1],
')' if iterlen - b else '']
return strace, ''.join(smi), concat
def __get_feature_signatures(self, ids, labels):
feature_signatures = []
for i, label_i in zip(ids, labels):
sign = []
for j, label_j in zip(ids, labels):
if i != j:
bond_order = self.GetBondOrder(i, j)
if bond_order > 0:
sign.append((label_j, bond_order))
feature_signatures.append((label_i,) + tuple(sorted(sign)))
return tuple(feature_signatures)
def __getRanks(self, sub, labels):
prev_len = 0
signatures = self.__get_feature_signatures(sub, labels)
while len(sub) > len(set(signatures)) > prev_len:
prev_len = len(set(signatures))
signatures = self.__get_feature_signatures(sub, signatures)
s = sorted(signatures)
return {atom: s.index(sign) for atom, sign in zip(sub, signatures)}
def get_name(self, sub, labels):
# if {10, 11, 12}.issubset(sub):
# # if 'H' in labels:
# if set(sub).intersection([24, 20]):
# print(sub)
# self.nextnumb = self.__numb()
self.sub = set(sub)
# self.levels = self.__getWeininger(sub, labels)
self.levels = self.__getRanks(sub, labels)
inter = min(self.levels, key=self.levels.get)
res = [self.__getSmiles([inter], inter, dict(zip(sub, labels)))]
visited = res[0][0]
while visited != self.sub:
remained = {k: self.levels[k] for k in self.sub.difference(visited)}
inter = min(remained, key=remained.get)
res.append(self.__getSmiles([inter], inter, dict(zip(sub, labels))))
visited = visited.union(res[-1][0])
# to get canonical multi-component SMILES
res = sorted([r[1] for r in res])
return '.'.join(res)
# def __numb(self):
# i = 1
# while True:
# yield i
# i += 1
| 38.219331 | 155 | 0.546056 |
4a23862b035687aea25a21977a8bbe82ee13f664 | 4,345 | py | Python | seal_rookery/convert_images.py | umeboshi2/seal-rookery | 9115d8d8f8d9e0e383670bb6e824875645a4b7c6 | [
"Unlicense"
] | null | null | null | seal_rookery/convert_images.py | umeboshi2/seal-rookery | 9115d8d8f8d9e0e383670bb6e824875645a4b7c6 | [
"Unlicense"
] | null | null | null | seal_rookery/convert_images.py | umeboshi2/seal-rookery | 9115d8d8f8d9e0e383670bb6e824875645a4b7c6 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import argparse
import hashlib
import json
import os
import subprocess
import sys
from seal_rookery import seals_root, seals_data
ORIG_DIR = os.path.join(seals_root, 'orig')
def get_old_hash(img):
"""Get the old hash from seals.json"""
try:
old_hash = seals_data[img.split('.')[0]]['hash']
except KeyError:
old_hash = None
return old_hash
def get_hash_from_file(img):
"""Get the hash from the current file"""
with open(img, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
def set_new_hash(court_id, new_hash):
"""Update the json object with new values"""
seals_data[court_id]['hash'] = new_hash
def convert_images(verbose=False, forced=False):
"""
Convert the original seal images to their different scaled outputs for
use either in CourtListener or another application.
:param verbose: if True, provides detailed conversion feedback to stdout
:param forced: if True, ignores unchanged hashes and regenerates images
:return: tuple (number changed, number skipped)
"""
images = os.listdir(ORIG_DIR)
num_images = len(images)
num_changed = 0
num_skipped = 0
for i, image in enumerate(images, 1):
if verbose:
sys.stdout.write(u"\nProcessing: %s" % image)
else:
sys.stdout.write(u'\rUpdating seals: %s of %s' % (i, num_images))
sys.stdout.flush()
court_id = image.split('.')[0]
final_name = '%s.png' % court_id
path_to_orig = os.path.join(ORIG_DIR, image)
current_hash = get_hash_from_file(path_to_orig)
old_hash = get_old_hash(image)
if current_hash != old_hash or forced:
# Update the hash
set_new_hash(court_id, current_hash)
# Regenerate the images
for size in ['128', '256', '512', '1024']:
if verbose:
sys.stdout.write(u" - Making {size}x{size} image...".format(
size=size
))
path_to_output = '%s/%s/%s' % (seals_root, size, final_name)
if verbose:
sys.stdout.write(
u' - writing to %s' % (path_to_output,)
)
command = [
'convert',
'-resize',
'%sx%s' % (size, size),
'-background',
'transparent',
path_to_orig,
path_to_output,
]
subprocess.Popen(command, shell=False).communicate()
num_changed += 1
else:
if verbose:
sys.stdout.write(u' - Unchanged hash, moving on.')
num_skipped += 1
if not verbose:
sys.stdout.write(
u"\nDone:\n %s seals updated\n %s seals skipped\n" % (
num_changed,
num_skipped,
))
return num_changed, num_skipped
def save_new_json():
"""Update the JSON object on disk."""
json.dump(
seals_data,
open(os.path.join(seals_root, 'seals.json'), 'w'),
sort_keys=True,
indent=4,
)
def main(argv=None):
"""
Main function and entry point for console script
:param argv: list of command line args, probably from sys.argv
:return: tuple (number of changed images, number of skipped images)
"""
# when running as a console_script via setuptools, no args are passed,
# so we need to try grabbing sys.argv
parser = argparse.ArgumentParser(prog='update-seals')
parser.add_argument('-f',
action='count',
help='force seal update or regeneration')
parser.add_argument('-v',
action='count',
help='turn on verbose seal generation messages')
args = parser.parse_args(argv)
try:
changed, skipped = convert_images(
verbose=bool(args.v), forced=bool(args.f)
)
save_new_json()
except Exception as error:
# Note: will not catch SystemExit from parser.parse_args
print('Failed to update seals!')
print(str(error))
return 1
return 0
if __name__ == '__main__':
main(sys.argv)
| 30.384615 | 81 | 0.565017 |
4a238734324b5e6799629685af33b52d30e1791b | 146 | py | Python | login-service/app/db/dependencies.py | javapagar/fast-api_microservice | e8d350ff6be376f8e58a7f0204bde2b74f6dcd49 | [
"MIT"
] | null | null | null | login-service/app/db/dependencies.py | javapagar/fast-api_microservice | e8d350ff6be376f8e58a7f0204bde2b74f6dcd49 | [
"MIT"
] | null | null | null | login-service/app/db/dependencies.py | javapagar/fast-api_microservice | e8d350ff6be376f8e58a7f0204bde2b74f6dcd49 | [
"MIT"
] | null | null | null | from db.database import SessionLocal
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close() | 16.222222 | 36 | 0.609589 |
4a23886a570384a4d6bea6cd8d18e1ef0b7eb705 | 66,657 | py | Python | src/instructions.py | hansbonini/pynes-dev | aa5d04de0a1beb6afb93219ffc9f63e83b3907a0 | [
"MIT"
] | null | null | null | src/instructions.py | hansbonini/pynes-dev | aa5d04de0a1beb6afb93219ffc9f63e83b3907a0 | [
"MIT"
] | null | null | null | src/instructions.py | hansbonini/pynes-dev | aa5d04de0a1beb6afb93219ffc9f63e83b3907a0 | [
"MIT"
] | null | null | null | import addrmodes
# TODO: Verificar se nao existem enderecamentos maiores que 1 byte
def rel_addr(value):
if value & 0b10000000:
value &= 0b1111111
value -= 128
return value
def advancePC(cpu, size):
cpu.registers['PC'] += size
def setN(cpu, value):
if value & (1 << 7) == 1 << 7:
cpu.setStatus(cpu.statusFlags['n'], 1)
else:
cpu.setStatus(cpu.statusFlags['n'], 0)
def setZ(cpu, value):
if value == 0:
cpu.setStatus(cpu.statusFlags['z'], 1)
else:
cpu.setStatus(cpu.statusFlags['z'], 0)
def setO(cpu, value):
cpu.setStatus(cpu.statusFlags['v'], value)
def setC(cpu, value):
cpu.setStatus(cpu.statusFlags['c'], value)
def ADC_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ADC_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 255)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def AND_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.registers['A'] & cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def AND_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def AND_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def ASL_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
return nCycles
def ASL_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ASL_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ASL_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ASL_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def BCC_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['c']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BCS_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['c']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BEQ_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['z']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BIT_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value & cpu.registers['A'])
setO(cpu, (value >> 6) & 1)
return nCycles
def BIT_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value & cpu.registers['A'])
setO(cpu, (value >> 6) & 1)
return nCycles
def BMI_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['n']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BNE_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['z']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BPL_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['n']):
nCycles += 1
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 1
#cpu.registers['PC'] += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BRK_Implied(cpu):
size = 1
nCycles = 7
cpu.registers['PC'] += 2
cpu.pushStack((cpu.registers['PC'] >> 8) & 0xFF)
cpu.pushStack(cpu.registers['PC'] & 0xFF)
cpu.setStatus(cpu.statusFlags['b'], 1)
cpu.pushStack(cpu.registers['P'])
cpu.setStatus(cpu.statusFlags['i'], 1)
cpu.InterruptRequest = 0x49
advancePC(cpu, size)
return nCycles
def BVC_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if not cpu.getStatus(cpu.statusFlags['v']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def BVS_Relative(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = rel_addr(value)
if cpu.getStatus(cpu.statusFlags['v']):
if (cpu.registers['PC'] & 0xFF00) != (
(cpu.registers['PC'] + value) & 0xFF00):
nCycles += 2
else:
nCycles += 1
advancePC(cpu, value)
advancePC(cpu, size)
return nCycles
def CLC_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['c'], 0)
advancePC(cpu, size)
return nCycles
def CLD_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['d'], 0)
advancePC(cpu, size)
return nCycles
def CLI_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['i'], 0)
advancePC(cpu, size)
return nCycles
def CLV_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['v'], 0)
advancePC(cpu, size)
return nCycles
def CMP_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CMP_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['X'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def CPY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = cpu.registers['Y'] - value
advancePC(cpu, size)
setC(cpu, 1 if value >= 0 else 0)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DEC_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEC_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
value = (value - 1) & 0xFF
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def DEY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
value = (value - 1) & 0xFF
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def EOR_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value ^= cpu.registers['A']
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INC_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
value = (value + 1) & 0xFF
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def INY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
value = (value + 1) & 0xFF
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def JMP_Absolute(cpu):
size = 3
nCycles = 3
address = addrmodes.Absolute(cpu)
advancePC(cpu, size)
cpu.registers['PC'] = address
return nCycles
def JMP_Indirect(cpu):
size = 3
nCycles = 5
address = addrmodes.Indirect(cpu)
advancePC(cpu, size)
cpu.registers['PC'] = address
return nCycles
def JSR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
advancePC(cpu, 2)
cpu.pushStack((cpu.registers['PC'] >> 8) & 0xFF)
cpu.pushStack(cpu.registers['PC'] & 0xFF)
cpu.registers['PC'] = address
return nCycles
def LDA_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDA_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDX_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LDY_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
cpu.registers['Y'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LSR_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.writeMemory(address, value)
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def NOP_Implied(cpu):
size = 1
nCycles = 2
advancePC(cpu, size)
return nCycles
def ORA_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def ORA_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value |= cpu.registers['A']
advancePC(cpu, size)
cpu.registers['A'] = value
setN(cpu, value)
setZ(cpu, value)
return nCycles
def PHA_Implied(cpu):
size = 1
nCycles = 3
value = cpu.registers['A']
cpu.pushStack(value)
advancePC(cpu, size)
return nCycles
def PHP_Implied(cpu):
size = 1
nCycles = 3
value = cpu.registers['P']
cpu.pushStack(value)
advancePC(cpu, size)
return nCycles
def PLA_Implied(cpu):
size = 1
nCycles = 4
value = cpu.pullStack()
cpu.registers['A'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def PLP_Implied(cpu):
size = 1
nCycles = 4
value = cpu.pullStack()
# Don't set the break flag
cpu.registers['P'] = (value & 0xEF)
# Always set the non used flag
cpu.registers['P'] |= (1 << 5)
advancePC(cpu, size)
return nCycles
def ROL_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
return nCycles
def ROL_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROL_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROL_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROL_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Accumulator(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
if cpu.getStatus(cpu.statusFlags['c']):
value |= 0x100
setC(cpu, value & 0x01)
value >>= 1
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
return nCycles
def ROR_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def ROR_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) + carry
advancePC(cpu, size)
setN(cpu, (value >> 7) & 1)
setZ(cpu, value)
cpu.writeMemory(address, value)
return nCycles
def RTI_Implied(cpu):
size = 1
nCycles = 6
value = cpu.pullStack()
cpu.registers['P'] = value
cpu.registers['P'] |= (1 << 5)
value = cpu.pullStack()
value |= (cpu.pullStack() << 8)
cpu.registers['PC'] = value
return nCycles
def RTS_Implied(cpu):
size = 1
nCycles = 6
value = cpu.pullStack()
value += ((cpu.pullStack()) << 8)
cpu.registers['PC'] = value
advancePC(cpu, size)
return nCycles
def SBC_Immediate(cpu):
size = 2
nCycles = 2
value = cpu.readMemory(cpu.registers['PC'] + 1)
carry = cpu.getStatus(cpu.statusFlags['c'])
#Todo: Verificar o (1 - carry) depois
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Absolute_X(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SBC_Indirect_Y(cpu):
size = 2
nCycles = 5
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SEC_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['c'], 1)
advancePC(cpu, size)
return nCycles
def SED_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['d'], 1)
advancePC(cpu, size)
return nCycles
def SEI_Implied(cpu):
size = 1
nCycles = 2
cpu.setStatus(cpu.statusFlags['i'], 1)
advancePC(cpu, size)
return nCycles
def STA_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Absolute_X(cpu):
size = 3
nCycles = 5
address = addrmodes.Absolute_X(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Absolute_Y(cpu):
size = 3
nCycles = 5
address = addrmodes.Absolute_Y(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STA_Indirect_Y(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_Y(cpu)
cpu.writeMemory(address, cpu.registers['A'])
advancePC(cpu, size)
return nCycles
def STX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
cpu.writeMemory(address, cpu.registers['X'])
advancePC(cpu, size)
return nCycles
def STX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
cpu.writeMemory(address, cpu.registers['X'])
advancePC(cpu, size)
return nCycles
def STX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
cpu.writeMemory(address, cpu.registers['X'])
advancePC(cpu, size)
return nCycles
def STY_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
cpu.writeMemory(address, cpu.registers['Y'])
advancePC(cpu, size)
return nCycles
def STY_Zero_X(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_X(cpu)
cpu.writeMemory(address, cpu.registers['Y'])
advancePC(cpu, size)
return nCycles
def STY_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
cpu.writeMemory(address, cpu.registers['Y'])
advancePC(cpu, size)
return nCycles
def TAX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['X'] = value
advancePC(cpu, size)
return nCycles
def TAY_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['A']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['Y'] = value
advancePC(cpu, size)
return nCycles
def TSX_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['SP']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['X'] = value
advancePC(cpu, size)
return nCycles
def TXA_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['X']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
advancePC(cpu, size)
return nCycles
def TXS_Implied(cpu):
size = 1
nCycles = 2
cpu.registers['SP'] = cpu.registers['X']
advancePC(cpu, size)
return nCycles
def TYA_Implied(cpu):
size = 1
nCycles = 2
value = cpu.registers['Y']
setN(cpu, value)
setZ(cpu, value)
cpu.registers['A'] = value
advancePC(cpu, size)
return nCycles
# Unofficial Opcodes
def DCP_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DCP_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = (value - 1) & 0xFF
cpu.writeMemory(address, value)
value = cpu.registers['A'] - value
advancePC(cpu, size)
setC(cpu, ~value >> 8 & 0x1)
setN(cpu, value)
setZ(cpu, value & 0xFF)
return nCycles
def DOP_Immediate(cpu):
size = 2
nCycles = 2
advancePC(cpu, size)
return nCycles
def DOP_Zero(cpu):
size = 2
nCycles = 3
advancePC(cpu, size)
return nCycles
def DOP_Zero_X(cpu):
size = 2
nCycles = 4
advancePC(cpu, size)
return nCycles
def ISB_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def ISB_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
value = (value + 1) & 0xFF
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = cpu.registers['A'] - value - (1 - carry)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
setO(cpu, (((cpu.registers['A'] ^ tmp) & 0x80) != 0 and
((cpu.registers['A'] ^ value) & 0x80) != 0))
setC(cpu, 0 if tmp < 0 else 1)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def LAX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Absolute_Y(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def LAX_Indirect_Y(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
cpu.registers['A'] = value
cpu.registers['X'] = value
advancePC(cpu, size)
setN(cpu, value)
setZ(cpu, value)
return nCycles
def RLA_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RLA_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = cpu.getStatus(cpu.statusFlags['c'])
setC(cpu, (value >> 7) & 1)
value = ((value << 1) & 0xFF) + carry
cpu.registers['A'] &= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
cpu.writeMemory(address, value)
return nCycles
def RRA_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def RRA_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
carry = (cpu.getStatus(cpu.statusFlags['c']) << 7)
setC(cpu, value & 0x01)
value = (value >> 1) | carry
cpu.writeMemory(address, value)
carry = cpu.getStatus(cpu.statusFlags['c'])
tmp = value + cpu.registers['A'] + carry
setO(
cpu, not (((cpu.registers['A'] ^ value) & 0x80) != 0)
and (((cpu.registers['A'] ^ tmp) & 0x80)))
setC(cpu, tmp > 0xFF)
setN(cpu, tmp)
setZ(cpu, tmp & 0xFF)
cpu.registers['A'] = (tmp & 0xFF)
advancePC(cpu, size)
return nCycles
def SAX_Zero(cpu):
size = 2
nCycles = 3
address = addrmodes.Zero(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SAX_Zero_Y(cpu):
size = 2
nCycles = 4
address = addrmodes.Zero_Y(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SAX_Absolute(cpu):
size = 3
nCycles = 4
address = addrmodes.Absolute(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SAX_Indirect_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Indirect_X(cpu)
value = cpu.registers['X'] & cpu.registers['A']
cpu.writeMemory(address, value)
advancePC(cpu, size)
return nCycles
def SLO_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SLO_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x80)
value <<= 1
value &= 0xFF
cpu.writeMemory(address, value)
cpu.registers['A'] |= value
advancePC(cpu, size)
setN(cpu, cpu.registers['A'])
setZ(cpu, cpu.registers['A'])
return nCycles
def SRE_Zero(cpu):
size = 2
nCycles = 5
address = addrmodes.Zero(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Zero_X(cpu):
size = 2
nCycles = 6
address = addrmodes.Zero_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Absolute(cpu):
size = 3
nCycles = 6
address = addrmodes.Absolute(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Absolute_X(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Absolute_Y(cpu):
size = 3
nCycles = 7
address = addrmodes.Absolute_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Indirect_X(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_X(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def SRE_Indirect_Y(cpu):
size = 2
nCycles = 8
address = addrmodes.Indirect_Y(cpu)
value = cpu.readMemory(address)
setC(cpu, value & 0x01)
value >>= 1
cpu.registers['A'] ^= value
cpu.writeMemory(address, value)
advancePC(cpu, size)
setZ(cpu, cpu.registers['A'])
setN(cpu, cpu.registers['A'])
return nCycles
def TOP_Absolute(cpu):
size = 3
nCycles = 4
advancePC(cpu, size)
return nCycles
def TOP_Absolute_X(cpu):
size = 3
nCycles = 4
advancePC(cpu, size)
return nCycles
| 21.578828 | 72 | 0.599052 |
4a2389202c06a1615287a427c942617bda88ee87 | 952 | py | Python | venv/Lib/site-packages/nipype/algorithms/tests/test_auto_ICC.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | 585 | 2015-01-12T16:06:47.000Z | 2022-03-26T14:51:08.000Z | nipype/algorithms/tests/test_auto_ICC.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 2,329 | 2015-01-01T09:56:41.000Z | 2022-03-30T14:24:49.000Z | nipype/algorithms/tests/test_auto_ICC.py | tamires-consulting/nipype | b7879d75a63b6500b2e7d2c3eba5aa7670339274 | [
"Apache-2.0"
] | 487 | 2015-01-20T01:04:52.000Z | 2022-03-21T21:22:47.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..icc import ICC
def test_ICC_inputs():
input_map = dict(
mask=dict(
extensions=None,
mandatory=True,
),
subjects_sessions=dict(
mandatory=True,
),
)
inputs = ICC.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ICC_outputs():
output_map = dict(
icc_map=dict(
extensions=None,
),
session_var_map=dict(
extensions=None,
),
subject_var_map=dict(
extensions=None,
),
)
outputs = ICC.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 24.410256 | 67 | 0.566176 |
4a23895ba928c0d624894404fb5f9c2453f26517 | 1,117 | py | Python | CTFd/plugins/ctfd_glowworm/vulns/web_pyblog/home/ctf/blog/django_blog/urls.py | Threezh1/H1ve | 2688c68eaf213a4cfca4c8611588d7050e131838 | [
"Apache-2.0"
] | 501 | 2019-11-22T07:19:06.000Z | 2022-03-28T07:16:40.000Z | CTFd/plugins/ctfd_glowworm/vulns/web_pyblog/home/ctf/blog/django_blog/urls.py | bei-gui/jnvc-ctf | eb7002999700da81b5f9de4a28bc40b97631ddd4 | [
"Apache-2.0"
] | 58 | 2019-12-02T13:59:15.000Z | 2022-02-26T01:53:52.000Z | CTFd/plugins/ctfd_glowworm/vulns/web_pyblog/home/ctf/blog/django_blog/urls.py | bei-gui/jnvc-ctf | eb7002999700da81b5f9de4a28bc40b97631ddd4 | [
"Apache-2.0"
] | 128 | 2019-12-02T11:15:58.000Z | 2022-03-27T08:25:59.000Z | """django_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from blog import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^blog/', include('blog.urls', namespace='blog')),
url(r'^blog/$', views.Index, name='index'),
url(r'^$', views.Index),
url(r'^robots\.txt$', lambda r: HttpResponse('User-agent: *\nDisallow: /hello/\nDisallow: /api/uploadarticle/\nDisallow: /api/downloadarticle/', content_type='text/plain')),
]
| 38.517241 | 177 | 0.692032 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.