max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
mflops/model_info.py | shuncyu/mflops | 1 | 4800 | <reponame>shuncyu/mflops
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 17:38:48 2020
@author: luke
"""
import sys
from functools import partial
import torch
import torch.nn as nn
import prettytable as pt
from .basic_hook import MODULES_MAPPING
def get_model_compute_info(model, input_res,
print_per_layer_stat=False,
input_constructor=None, ost=sys.stdout,
verbose=False, ignore_modules=[],
custom_modules_hooks={}):
assert type(input_res) is tuple
assert len(input_res) >= 1
assert isinstance(model, nn.Module)
global CUSTOM_MODULES_MAPPING
CUSTOM_MODULES_MAPPING = custom_modules_hooks
compute_model = add_computing_methods(model)
compute_model.eval()
compute_model.start_compute(ost=ost, verbose=verbose, ignore_list=ignore_modules)
if input_constructor:
input = input_constructor(input_res)
_ = compute_model(**input)
else:
try:
batch = torch.ones(()).new_empty((1, *input_res),
dtype=next(compute_model.parameters()).dtype,
device=next(compute_model.parameters()).device)
except StopIteration:
batch = torch.ones(()).new_empty((1, *input_res))
_ = compute_model(batch)
flops_count, mac_count, params_count = compute_model.compute_average_compute_cost()
if print_per_layer_stat:
print_model_with_compute(compute_model, flops_count, mac_count, params_count, ost=ost)
compute_model.stop_compute()
CUSTOM_MODULES_MAPPING = {}
tb = pt.PrettyTable()
tb.field_names = ['Metrics', 'Value']
tb.add_row(['%s' %'Floating Point Operations (FLOPs)', '%8s' %to_string(flops_count)])
tb.add_row(['%s' %'Memory Access Cost (MAC)', '%8s' %to_string(mac_count)])
tb.add_row(['%s' %'Number of Parameters', '%8s' %to_string(params_count)])
print(tb)
return flops_count, mac_count, params_count
def to_string(params_num, units=None, precision=3):
if units is None:
if params_num // 10**9 > 0:
return str(round(params_num / 10**9, 3)) + ' G'
elif params_num // 10**6 > 0:
return str(round(params_num / 10**6, 3)) + ' M'
elif params_num // 10**3 > 0:
return str(round(params_num / 10**3, 3)) + ' K'
else:
return str(params_num)
else:
if units == 'G':
return str(round(params_num / 10**9, precision)) + ' ' + units
if units == 'M':
return str(round(params_num / 10**6, precision)) + ' ' + units
elif units == 'K':
return str(round(params_num / 10**3, precision)) + ' ' + units
else:
return str(params_num)
def print_model_with_compute(model, total_flops, total_mac, total_params, units='M',
precision=3, ost=sys.stdout):
def accumulate_params(self):
if is_supported_instance(self):
return self.__params__
else:
sum = 0
for m in self.children():
sum += m.accumulate_params()
return sum
def accumulate_flops(self):
if is_supported_instance(self):
return self.__flops__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_flops()
return sum
def accumulate_mac(self):
if is_supported_instance(self):
return self.__mac__ / model.__batch_counter__
else:
sum = 0
for m in self.children():
sum += m.accumulate_mac()
return sum
def compute_repr(self):
accumulated_params_num = self.accumulate_params()
accumulated_flops_cost = self.accumulate_flops()
accumulated_mac_cost = self.accumulate_mac()
return ', '.join([to_string(accumulated_params_num,
units=units, precision=precision),
'{:.3%} Params'.format(accumulated_params_num / total_params),
to_string(accumulated_flops_cost,
units=units, precision=precision),
'{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
to_string(accumulated_mac_cost,
units=units, precision=precision),
'{:.3%} MAC'.format(accumulated_mac_cost / total_mac),
'{:.3} MAC/FLOPs'.format(accumulated_mac_cost / (accumulated_flops_cost + 1e-5) \
* total_flops / (total_mac + 1e-5)),
self.original_extra_repr()])
def add_extra_repr(m):
m.accumulate_flops = accumulate_flops.__get__(m)
m.accumulate_mac = accumulate_mac.__get__(m)
m.accumulate_params = accumulate_params.__get__(m)
compute_extra_repr = compute_repr.__get__(m)
if m.extra_repr != compute_extra_repr:
m.original_extra_repr = m.extra_repr
m.extra_repr = compute_extra_repr
assert m.extra_repr != m.original_extra_repr
def del_extra_repr(m):
if hasattr(m, 'original_extra_repr'):
m.extra_repr = m.original_extra_repr
del m.original_extra_repr
if hasattr(m, 'accumulate_flops'):
del m.accumulate_flops
if hasattr(m, 'accumulate_mac'):
del m.accumulate_mac
model.apply(add_extra_repr)
print(repr(model), file=ost)
model.apply(del_extra_repr)
def get_model_parameters_number(model):
params_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params_num
def add_computing_methods(net_main_module):
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_compute = start_compute.__get__(net_main_module)
net_main_module.stop_compute = stop_compute.__get__(net_main_module)
net_main_module.reset_compute = reset_compute.__get__(net_main_module)
net_main_module.compute_average_compute_cost = compute_average_compute_cost.__get__(
net_main_module)
net_main_module.reset_compute()
return net_main_module
def compute_average_compute_cost(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Returns current mean flops/mac consumption per image.
"""
batches_count = self.__batch_counter__
flops_sum = 0
mac_sum = 0
params_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
mac_sum += module.__mac__
params_sum = get_model_parameters_number(self)
return flops_sum / batches_count, mac_sum / batches_count, params_sum
def start_compute(self, **kwargs):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Activates the computation of mean flops/mac consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
seen_types = set()
def add_compute_hook_function(module, ost, verbose, ignore_list):
if type(module) in ignore_list:
seen_types.add(type(module))
if is_supported_instance(module):
module.__params__ = 0
elif is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if type(module) in CUSTOM_MODULES_MAPPING:
handle = module.register_forward_hook(
CUSTOM_MODULES_MAPPING[type(module)])
else:
handle = module.register_forward_hook(MODULES_MAPPING[type(module)])
module.__flops_handle__ = handle
module.__mac_handle__ = handle
seen_types.add(type(module))
else:
if verbose and not type(module) in (nn.Sequential, nn.ModuleList) and \
not type(module) in seen_types:
print('Warning: module ' + type(module).__name__ +
' is treated as a zero-op.', file=ost)
seen_types.add(type(module))
self.apply(partial(add_compute_hook_function, **kwargs))
def stop_compute(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_compute_hook_function)
def reset_compute(self):
"""
A method that will be available after add_computing_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_compute_variable_or_reset)
def batch_counter_hook(module, input, output):
batch_size = 1
if len(input) > 0:
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = len(input)
else:
pass
print('Warning! No positional inputs found for a module,'
' assuming batch size is 1.')
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_compute_variable_or_reset(module):
if is_supported_instance(module):
if hasattr(module, '__flops__') or hasattr(module, '__mac__') or \
hasattr(module, '__params__'):
print('Warning: variables __flops__ or __mac__ or __params__ are already '
'defined for the module' + type(module).__name__ +
' ptflops can affect your code!')
module.__flops__ = 0
module.__mac__ = 0
module.__params__ = get_model_parameters_number(module)
def is_supported_instance(module):
if type(module) in MODULES_MAPPING or type(module) in CUSTOM_MODULES_MAPPING:
return True
return False
def remove_compute_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
if hasattr(module, '__mac_handle__'):
module.__mac_handle__.remove()
del module.__mac_handle__
| 1.601563 | 2 |
dosagelib/plugins/derideal.py | Church-/dosage | 1 | 4801 | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 <NAME>
# Copyright (C) 2019-2020 <NAME>
from ..scraper import _ParserScraper
from ..helpers import indirectStarter
class Derideal(_ParserScraper):
baseUrl = 'https://www.derideal.com/'
imageSearch = '//img[contains(@class, "comic-page")]'
prevSearch = '//a[i[contains(@class, "fa-angle-left")]]'
latestSearch = '//a[i[contains(@class, "fa-angle-double-right")]]'
starter = indirectStarter
def __init__(self, name, sub, first, last=None):
if name == 'Derideal':
super(Derideal, self).__init__(name)
else:
super(Derideal, self).__init__('Derideal/' + name)
self.url = self.baseUrl + sub
self.stripUrl = self.url + '/%s/'
self.firstStripUrl = self.stripUrl % first
self.startUrl = self.firstStripUrl
if last:
self.endOfLife = True
def starter(self):
indexPage = self.getPage(self.url)
self.chapters = indexPage.xpath('//a[contains(text(), "Read this episode")]/@href')
self.currentChapter = len(self.chapters)
return indirectStarter(self)
def namer(self, imageUrl, pageUrl):
filename = pageUrl.rstrip('/').rsplit('/', 1)[-1]
filename = filename.replace('espanol-escape-25', 'escape-26')
filename = filename.replace('espanol-w-a-l-l-y', 'w-a-l-l-y')
filename = filename.replace('hogar-prision', 'home-prison')
filename = filename.replace('strip', 'pe').replace('purpurina-effect', 'pe')
filename = filename.replace('sector-de-seguridad', 'security-sector')
filename = 'ch' + str(self.currentChapter) + '-' + filename
if pageUrl in self.chapters:
self.currentChapter -= 1
return filename
@classmethod
def getmodules(cls):
return (
cls('Derideal', 'derideal', 'cover-prime'),
cls('Legacy', 'derideal-legacy', 'the-dream-cover', last='derideal-is-on-hiatus'),
cls('LRE', 'RLE', 'the-leyend-of-the-rose-cover'),
cls('ProjectPrime', 'project-prime', 'custus-part-i-cover'),
cls('PurpurinaEffect', 'purpurina-effect', 'purpurina-effect-cover'),
cls('TheVoid', 'the-void', 'the-void-cover')
)
| 2.578125 | 3 |
LearnFunction/learnfunction01.py | subash-kc/2022-01-04-Python | 1 | 4802 | <reponame>subash-kc/2022-01-04-Python
"""
Function are subprograms which are used to compute a value or perform a task.
Type of Functions:-
Built in Functions:
print(), upper()
User define functions
Advantage of Functions
1. Write once and use it as many time as you need. This provides code reusability
2. Function facilitates ease of code maintenance
3. Divide Large task into many small task so it will help you to debug code
4. You can remove or add new feature to a function anytime.
"""
"""
We can define a function using def keyword followed by function name with parentheses. This is also called
as Creating a function, Writing a Function, Defining a FUnction.
Syntax:-
def function_name():
Local Variable
block of statement
return(variable or expression)
def function_name(param1, param2, param3, .....)
Local Variable
Block of statement
return (variable or expression)
Note - Nooed to mainitain a proper indentation
"""
# creating a list
def add():
list = [8, 2, 3, 0, 7]
total = 0;
for i in range(0, len(list)):
total = total + list[i]
print('Sum of all elements in given list: ', total)
if __name__ == '__main__':
add()
print()
# another method
def sum_list():
mylist = [8, 2, 3, 0, 7]
# Using inbuilt sum method
total = sum(mylist)
print("Sum of all elements in given list1: ", total)
if __name__ == '__main__':
sum_list()
print()
def multiplylist():
list_multiply = [8, 2, 3, -1, 7]
total = 1;
for x in list_multiply:
total = total * x
print(total)
if __name__ == '__main__':
multiplylist()
# Method 2: Unsing numpy.prid() ^ Install numpy package
import numpy
def product_total():
list_product = [8, 2, 3, -1, 7]
total = numpy.prod(list_product)
print("Another method using numpy method to find product in list: ", total)
product_total()
print()
def findingminmax(num1: int, num2: int, num3: int) -> int:
max = 0;
if (num1 > num2 and num1 > num2):
max = num1
elif (num2 > num1 and num2 > num3):
max = num2
else:
max = num3
print("The maximum number in given list is: ", max)
findingminmax(22, 26, 30)
print()
print("Another Method to find maximum")
def findingmaximum(num1: int, num2: int, num3: int) -> int:
find_max_list = (num1, num2, num3)
return max(find_max_list)
x = int(input("Enter your first Number: "))
y = int(input("Enter your second Number: "))
z = int(input("Enter your third Number: "))
print("Maximum number is ::>", findingmaximum(x, y, z))
"""Python program to print the even numbers from a given list"""
def find_even():
sample_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
for num in sample_list:
if num % 2 == 0:
print(num, end=" ")
find_even()
print()
"""
Pythhon program to find prime numbers in given list
Function should return true if the number is prime; else false
"""
def isPrime(num):
if (num < 2):
return True
for i in range (2, num//2+1):
if(num%i==0):
return False
return True
number =int(input("Enter the number you will like to check whether the number is prime or not: \n"))
if isPrime(number):
print(number, "is a Prime Number")
else:
print(number, "is not a Prime number")
"""
Another Method to find prime number
"""
| 4.03125 | 4 |
trackr/cli.py | rpedigoni/trackr | 9 | 4803 | # coding: utf-8
import click
@click.command()
@click.option('--carrier', prompt='Carrier ID', help='Example: "ect" for Correios')
@click.option('--object-id', prompt='Object ID',
help='Example: PN871429404BR')
def main(carrier, object_id):
from trackr import Trackr
from trackr.exceptions import PackageNotFound
try:
p = Trackr.track(carrier, object_id)
except PackageNotFound as e:
click.echo(click.style(
u'Package with object ID {} ({}) not found'.format(
object_id, carrier),
fg='red')
)
if e.carrier_message:
click.echo(click.style(
u'Carrier message: {}'.format(e.carrier_message),
fg='red',)
)
return
click.echo(click.style(u'Package found!', fg='green'))
for t in p.tracking_info:
click.echo(t.__unicode__())
if __name__ == "__main__":
main()
| 2.53125 | 3 |
pitop/common/notifications.py | pi-top/pi-top-Python-SDK | 28 | 4804 | <filename>pitop/common/notifications.py
from enum import Enum, auto
from subprocess import CalledProcessError, run
from pitop.common.command_runner import run_command
from pitop.common.logger import PTLogger
class NotificationAction:
def __init__(self, call_to_action_text, command_str) -> None:
self.call_to_action_text = call_to_action_text
self.command_str = command_str
class NotificationActionManager:
def __init__(self):
self.actions = list()
self.default_action = None
self.close_action = None
def add_action(self, call_to_action_text, command_str) -> None:
action = NotificationAction(call_to_action_text, command_str)
self.actions.append(action)
def set_default_action(self, command_str) -> None:
default_action = NotificationAction("", command_str)
self.default_action = default_action
def set_close_action(self, command_str) -> None:
close_action = NotificationAction("", command_str)
self.close_action = close_action
class NotificationUrgencyLevel(Enum):
low = auto()
normal = auto()
critical = auto()
def send_notification(
title: str,
text: str,
icon_name: str = "",
timeout: int = 0,
app_name: str = "",
notification_id: int = -1,
actions_manager: NotificationActionManager = None,
urgency_level: NotificationUrgencyLevel = None,
capture_notification_id: bool = True,
) -> str:
# Check that `notify-send-ng` is available, as it's not a hard dependency of the package
try:
run(["dpkg-query", "-l", "notify-send-ng"], capture_output=True, check=True)
except CalledProcessError:
raise Exception("notify-send-ng not installed")
cmd = "/usr/bin/notify-send "
cmd += "--print-id "
cmd += "--expire-time=" + str(timeout) + " "
if icon_name:
cmd += "--icon=" + icon_name + " "
if notification_id >= 0:
cmd += "--replace=" + str(notification_id) + " "
if actions_manager is not None:
for action in actions_manager.actions:
cmd += (
'--action="'
+ action.call_to_action_text
+ ":"
+ action.command_str
+ '" '
)
if actions_manager.default_action is not None:
cmd += (
"--default-action=" + actions_manager.default_action.command_str + " "
)
if actions_manager.close_action is not None:
cmd += "--close-action=" + actions_manager.close_action.command_str + " "
if app_name:
cmd += "--app-name=" + app_name + " "
if urgency_level is not None:
cmd += "--urgency=" + urgency_level.name + " "
cmd += ' "' + title + '" '
cmd += '"' + text + '"'
PTLogger.info("notify-send command: {}".format(cmd))
try:
resp_stdout = run_command(cmd, 2000, capture_output=capture_notification_id)
except Exception as e:
PTLogger.warning("Failed to show message: {}".format(e))
raise
return resp_stdout
| 2.3125 | 2 |
src/Gismo_XY To Location.py | AntonelloDN/gismo | 57 | 4805 | # xy to location
#
# Gismo is a plugin for GIS environmental analysis (GPL) started by <NAME>.
#
# This file is part of Gismo.
#
# Copyright (c) 2019, <NAME> <<EMAIL>>
# Gismo is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
#
# Gismo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/.
#
# The GPL-3.0+ license <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to calculate latitude and longitude coordinates of the _point in Rhino scene.
For example: you created some building shapes with Gismo "OSM Shapes" component, and now you would like to check what are the latitude and longtitude coordinates of particular part of the building.
-
Provided by Gismo 0.0.3
input:
_point: A point for which we would like to calculate its latitude and longitude coordinates
anchorLocation_: Represents latitude,longitude coordinates which correspond to anchorOrigin_ in Rhino scene.
-
If nothing added to this input, anchorLocation_ with both latitude and longitude set to "0" will be used as a default.
anchorOrigin_: A point in Rhino scene which corresponds to anchorLocation_.
-
If nothing added to this input, anchorOrigin will be set to: 0,0,0.
output:
readMe!: ...
location: Location (latitude, longitude coordinates) of the _point input.
"""
ghenv.Component.Name = "Gismo_XY To Location"
ghenv.Component.NickName = "XYtoLocation"
ghenv.Component.Message = "VER 0.0.3\nJAN_29_2019"
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "Gismo"
ghenv.Component.SubCategory = "1 | Gismo"
#compatibleGismoVersion = VER 0.0.3\nJAN_29_2019
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
import scriptcontext as sc
import Grasshopper
import Rhino
def main(requiredPoint, anchorLocation, anchorOrigin):
# check inputs
if (requiredPoint == None):
required_location = None
validInputData = False
printMsg = "Please add a point to this component's \"_point\" input."
return required_location, validInputData, printMsg
if (anchorLocation == None):
locationName = "unknown location"
anchor_locationLatitudeD = 0
anchor_locationLongitudeD = 0
timeZone = 0
elevation = 0
else:
locationName, anchor_locationLatitudeD, anchor_locationLongitudeD, timeZone, elevation, validLocationData, printMsg = gismo_preparation.checkLocationData(anchorLocation)
if (anchorOrigin == None):
anchorOrigin = Rhino.Geometry.Point3d(0,0,0)
unitConversionFactor, unitSystemLabel = gismo_preparation.checkUnits()
anchorOrigin_meters = Rhino.Geometry.Point3d(anchorOrigin.X*unitConversionFactor, anchorOrigin.Y*unitConversionFactor, anchorOrigin.Z*unitConversionFactor)
requiredPoint_meters = Rhino.Geometry.Point3d(requiredPoint.X*unitConversionFactor, requiredPoint.Y*unitConversionFactor, requiredPoint.Z*unitConversionFactor)
# inputCRS
EPSGcode = 4326 # WGS 84
inputCRS_dummy = gismo_gis.CRS_from_EPSGcode(EPSGcode)
# outputCRS
outputCRS_dummy = gismo_gis.UTM_CRS_from_latitude(anchor_locationLatitudeD, anchor_locationLongitudeD)
anchor_originProjected_meters = gismo_gis.convertBetweenTwoCRS(inputCRS_dummy, outputCRS_dummy, anchor_locationLongitudeD, anchor_locationLatitudeD) # in meters
# inputCRS
# based on assumption that both anchorLocation_ input and required_location belong to the same UTM zone
inputCRS = gismo_gis.UTM_CRS_from_latitude(anchor_locationLatitudeD, anchor_locationLongitudeD, anchor_locationLatitudeD, anchor_locationLongitudeD)
# outputCRS
EPSGcode = 4326
outputCRS = gismo_gis.CRS_from_EPSGcode(EPSGcode)
latitudeLongitudePt = gismo_gis.convertBetweenTwoCRS(inputCRS, outputCRS, (anchor_originProjected_meters.X - anchorOrigin_meters.X) + requiredPoint_meters.X, (anchor_originProjected_meters.Y - anchorOrigin_meters.Y) + requiredPoint_meters.Y)
required_location = gismo_preparation.constructLocation(locationName, latitudeLongitudePt.Y, latitudeLongitudePt.X, timeZone, elevation)
validInputData = True
printMsg = "ok"
return required_location, validInputData, printMsg
level = Grasshopper.Kernel.GH_RuntimeMessageLevel.Warning
if sc.sticky.has_key("gismoGismo_released"):
validVersionDate, printMsg = sc.sticky["gismo_check"].versionDate(ghenv.Component)
if validVersionDate:
gismo_preparation = sc.sticky["gismo_Preparation"]()
gismo_gis = sc.sticky["gismo_GIS"]()
location, validInputData, printMsg = main(_point, anchorLocation_, anchorOrigin_)
if not validInputData:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
else:
printMsg = "First please run the Gismo Gismo component."
print printMsg
ghenv.Component.AddRuntimeMessage(level, printMsg)
| 2.5 | 2 |
Notebooks/SentinelUtilities/SentinelAnomalyLookup/__init__.py | ytognder/Azure-Sentinel | 266 | 4806 | # pylint: disable-msg=C0103
"""
SentinelAnomalyLookup: This package is developed for Azure Sentinel Anomaly lookup
"""
# __init__.py
from .anomaly_lookup_view_helper import AnomalyLookupViewHelper
from .anomaly_finder import AnomalyQueries, AnomalyFinder
| 1.21875 | 1 |
items/coins.py | leerichoang/Legend-Of-Peach | 0 | 4807 | <reponame>leerichoang/Legend-Of-Peach<filename>items/coins.py
import pygame
from pygame.sprite import Sprite
class Coins(Sprite):
"""Coins"""
def __init__(self, hub, x, y, name='coin', state='floating'):
super().__init__()
# Values
self.name = name
self.hub = hub
self.original_pos = [x, y]
self.rest_height = y
self.rest_x = x
self.velY = 0
self.upwards = True
self.state = state
self.scale = (30, 50)
self.scale2 = (14, 50)
self.scale3 = (4, 50)
# Screen Camera
self.screen = self.hub.main_screen
self.screen_rect = self.screen.get_rect()
self.camera = hub.camera
# Images
self.index = 0
self.change_freq = 120
self.player_clock = pygame.time.get_ticks() + self.change_freq
self.frameRate = 30
self.clock = pygame.time.get_ticks() + self.frameRate
self.image_index = [pygame.image.load("imgs/Items/coin1.png"),
pygame.image.load("imgs/Items/coin2.png"),
pygame.image.load("imgs/Items/coin3.png"),
pygame.image.load("imgs/Items/coin2.png")]
self.image_index[0] = pygame.transform.scale(self.image_index[0], self.scale)
self.image_index[1] = pygame.transform.scale(self.image_index[1], self.scale2)
self.image_index[2] = pygame.transform.scale(self.image_index[2], self.scale3)
self.image_index[3] = pygame.transform.scale(self.image_index[3], self.scale2)
self.resting_index = [pygame.image.load("imgs/Items/CoinForBlackBG.png"),
pygame.image.load("imgs/Items/CoinForBlackBG1.png"),
pygame.image.load("imgs/Items/CoinForBlackBG2.png"),
pygame.image.load("imgs/Items/CoinForBlackBG1.png")]
for i in range(len(self.resting_index)):
self.resting_index[i] = pygame.transform.scale(self.resting_index[i], self.scale)
if self.state == "floating":
self.image = self.image_index[self.index]
else:
self.image = self.resting_index[self.index]
self.rect = self.image.get_rect()
self.rect.x = self.original_pos[0]
self.rect.y = self.original_pos[1]
def draw(self):
self.screen.blit(self.image, self.rect)
def update(self):
self.check_state()
def check_state(self):
if self.state == "floating":
self.start_anim()
elif self.state == "resting":
self.resting()
def start_anim(self):
"""Starts coin spin animation"""
self.velY = 5
if self.rect.y == (self.rest_height - 60):
self.upwards = False
if self.upwards:
self.rect.y -= self.velY
else:
self.rect.y += self.velY
# start timer
if pygame.time.get_ticks() > self.player_clock:
self.player_clock = pygame.time.get_ticks() + self.change_freq
if self.index == 0:
self.original_pos[0] += 8
elif self.index == 1:
self.original_pos[0] += 5
elif self.index == 2:
self.original_pos[0] -= 5
elif self.index == 3:
self.original_pos[0] -= 8
self.index += 1
self.index %= len(self.image_index)
self.image = self.image_index[self.index]
if self.rect.y == self.rest_height:
self.hub.gamemode.coins += 1
self.hub.gamemode.check_coins()
self.hub.gamemode.score += 200
self.kill()
def resting(self):
"""Starts coin rest animation"""
# start timer
if pygame.time.get_ticks() > self.player_clock:
self.player_clock = pygame.time.get_ticks() + self.change_freq
self.index += 1
self.index %= len(self.resting_index)
self.image = self.resting_index[self.index]
| 3.28125 | 3 |
python/package/geo/test/__init__.py | fiomenankiti/playground | 0 | 4808 | from geo.calc import Calc
from geo.calc import Distance
from geo.geosp import Wt
from geo.geosp import Gh
from geo.files.csv_file import check | 1.15625 | 1 |
tests/test_clean.py | tcapelle/nb_helpers | 7 | 4809 | <gh_stars>1-10
from pathlib import Path
from nb_helpers.clean import clean_all, clean_one
from tests import TEST_PATH
TEST_PATH
TEST_NB = Path("test_nb.py")
def test_clean_one():
"clean just one nb"
clean_one(TEST_NB)
def test_clean_all():
"clean all test nbs"
clean_all(path=TEST_PATH)
| 1.960938 | 2 |
apps/technical_analysis.py | KiloSat/FirstNivesh | 0 | 4810 | <filename>apps/technical_analysis.py
import streamlit as st
def app():
import yfinance as yf
import streamlit as st
import datetime
import matplotlib.pyplot as plt
import talib
import ta
import numpy as np
import matplotlib.ticker as mticker
import pandas as pd
import requests
yf.pdr_override()
st.write("""
# Technical Analysis of Securites
Shown below are the **Moving Average Crossovers**, **Bollinger Bands**, **MACD's**, **Commodity Channel Indexes**, **Relative Strength Indexes** and **Extended Market Calculators** of any stock
""")
st.sidebar.header('User Input Parameters')
today = datetime.date.today()
def user_input_features():
ticker = st.sidebar.text_input("Ticker", 'GME')
start_date = st.sidebar.text_input("Start Date", '2019-01-01')
end_date = st.sidebar.text_input("End Date", f'{today}')
return ticker, start_date, end_date
symbol, start, end = user_input_features()
def get_symbol(symbol):
cticker = yf.Ticker(symbol)
company_name = cticker.info['longName']
return company_name
company_name = get_symbol(symbol.upper())
start = pd.to_datetime(start)
end = pd.to_datetime(end)
# Read data
data = yf.download(symbol,start,end)
# Adjusted Close Price
st.header(f"""
Adjusted Close Price\n {company_name}
""")
st.line_chart(data['Adj Close'])
# ## SMA and EMA
#Simple Moving Average
data['SMA'] = talib.SMA(data['Adj Close'], timeperiod = 20)
# Exponential Moving Average
data['EMA'] = talib.EMA(data['Adj Close'], timeperiod = 20)
# Plot
st.header(f"""
Simple Moving Average vs. Exponential Moving Average\n {company_name}
""")
st.line_chart(data[['Adj Close','SMA','EMA']])
# Bollinger Bands
data['upper_band'], data['middle_band'], data['lower_band'] = talib.BBANDS(data['Adj Close'], timeperiod =20)
# Plot
st.header(f"""
Bollinger Bands\n {company_name}
""")
st.line_chart(data[['Adj Close','upper_band','middle_band','lower_band']])
# ## MACD (Moving Average Convergence Divergence)
# MACD
data['macd'], data['macdsignal'], data['macdhist'] = talib.MACD(data['Adj Close'], fastperiod=12, slowperiod=26, signalperiod=9)
# Plot
st.header(f"""Moving Average Convergence Divergence\n {company_name}""")
st.line_chart(data[['macd','macdsignal']])
## CCI (Commodity Channel Index)
# CCI
cci = ta.trend.cci(data['High'], data['Low'], data['Close'], 31, 0.015)
# Plot
st.header(f"""Commodity Channel Index\n {company_name}""")
st.line_chart(cci)
# ## RSI (Relative Strength Index)
# RSI
data['RSI'] = talib.RSI(data['Adj Close'], timeperiod=14)
# Plot
st.header(f"""Relative Strength Index\n {company_name}""")
st.line_chart(data['RSI'])
# ## OBV (On Balance Volume)
# OBV
data['OBV'] = talib.OBV(data['Adj Close'], data['Volume'])/10**6
# Plot
st.header(f"""On Balance Volume\n {company_name}""")
st.line_chart(data['OBV'])
# Extended Market
fig, ax1 = plt.subplots()
#Asks for stock ticker
sma = 50
limit = 10
data = yf.download(symbol,start, today)
#calculates sma and creates a column in the dataframe
data['SMA'+str(sma)] = data.iloc[:,4].rolling(window=sma).mean()
data['PC'] = ((data["Adj Close"]/data['SMA'+str(sma)])-1)*100
mean = round(data["PC"].mean(), 2)
stdev = round(data["PC"].std(), 2)
current= round(data["PC"][-1], 2)
yday= round(data["PC"][-2], 2)
stats = [['Mean', mean], ['Standard Deviation', stdev], ['Current', current], ['Yesterday', yday]]
frame = pd.DataFrame(stats,columns = ['Statistic', 'Value'])
st.header(f"""Extended Market Calculator\n {company_name}""")
st.dataframe(frame.style.hide_index())
# fixed bin size
bins = np.arange(-100, 100, 1)
plt.rcParams['figure.figsize'] = 15, 10
plt.xlim([data["PC"].min()-5, data["PC"].max()+5])
plt.hist(data["PC"], bins=bins, alpha=0.5)
plt.title(symbol+"-- % From "+str(sma)+" SMA Histogram since "+str(start.year))
plt.xlabel('Percent from '+str(sma)+' SMA (bin size = 1)')
plt.ylabel('Count')
plt.axvline( x=mean, ymin=0, ymax=1, color='k', linestyle='--')
plt.axvline( x=stdev+mean, ymin=0, ymax=1, color='gray', alpha=1, linestyle='--')
plt.axvline( x=2*stdev+mean, ymin=0, ymax=1, color='gray',alpha=.75, linestyle='--')
plt.axvline( x=3*stdev+mean, ymin=0, ymax=1, color='gray', alpha=.5, linestyle='--')
plt.axvline( x=-stdev+mean, ymin=0, ymax=1, color='gray', alpha=1, linestyle='--')
plt.axvline( x=-2*stdev+mean, ymin=0, ymax=1, color='gray',alpha=.75, linestyle='--')
plt.axvline( x=-3*stdev+mean, ymin=0, ymax=1, color='gray', alpha=.5, linestyle='--')
plt.axvline( x=current, ymin=0, ymax=1, color='r', label = 'today')
plt.axvline( x=yday, ymin=0, ymax=1, color='blue', label = 'yesterday')
#add more x axis labels
ax1.xaxis.set_major_locator(mticker.MaxNLocator(14))
st.pyplot(fig)
#Create Plots
fig2, ax2 = plt.subplots()
data=data[-150:]
data['PC'].plot(label='close',color='k')
plt.title(symbol+"-- % From "+str(sma)+" SMA Over last 100 days")
plt.xlabel('Date')
plt.ylabel('Percent from '+str(sma)+' EMA')
#add more x axis labels
ax2.xaxis.set_major_locator(mticker.MaxNLocator(8))
plt.axhline( y=limit, xmin=0, xmax=1, color='r')
plt.rcParams['figure.figsize'] = 15, 10
st.pyplot(fig2)
| 3.125 | 3 |
tests/sentry/mediators/sentry_apps/test_creator.py | pombredanne/django-sentry | 0 | 4811 | <reponame>pombredanne/django-sentry
from __future__ import absolute_import
from mock import patch
from django.db import IntegrityError
from sentry.mediators.sentry_apps import Creator
from sentry.models import (
AuditLogEntry,
AuditLogEntryEvent,
ApiApplication,
IntegrationFeature,
SentryApp,
SentryAppComponent,
User,
)
from sentry.testutils import TestCase
class TestCreator(TestCase):
def setUp(self):
self.user = self.create_user()
self.org = self.create_organization(owner=self.user)
self.creator = Creator(
name="nulldb",
user=self.user,
author="Sentry",
organization=self.org,
scopes=("project:read",),
webhook_url="http://example.com",
schema={"elements": [self.create_issue_link_schema()]},
is_internal=False,
)
def test_slug(self):
app = self.creator.call()
assert app.slug == "nulldb"
def test_creates_proxy_user(self):
self.creator.call()
assert User.objects.get(username__contains="nulldb", is_sentry_app=True)
def test_creates_api_application(self):
self.creator.call()
proxy = User.objects.get(username__contains="nulldb")
assert ApiApplication.objects.get(owner=proxy)
def test_creates_sentry_app(self):
self.creator.call()
proxy = User.objects.get(username__contains="nulldb")
app = ApiApplication.objects.get(owner=proxy)
sentry_app = SentryApp.objects.get(
name="nulldb", application=app, owner=self.org, proxy_user=proxy
)
assert sentry_app
assert sentry_app.scope_list == ["project:read"]
def test_expands_rolled_up_events(self):
self.creator.events = ["issue"]
app = self.creator.call()
sentry_app = SentryApp.objects.get(id=app.id)
assert "issue.created" in sentry_app.events
def test_creates_ui_components(self):
self.creator.schema = {
"elements": [self.create_issue_link_schema(), self.create_alert_rule_action_schema()]
}
app = self.creator.call()
assert SentryAppComponent.objects.filter(sentry_app_id=app.id, type="issue-link").exists()
assert SentryAppComponent.objects.filter(
sentry_app_id=app.id, type="alert-rule-action"
).exists()
def test_creates_integration_feature(self):
app = self.creator.call()
assert IntegrationFeature.objects.filter(sentry_app=app).exists()
@patch("sentry.mediators.sentry_apps.creator.Creator.log")
@patch("sentry.models.integrationfeature.IntegrationFeature.objects.create")
def test_raises_error_creating_integration_feature(self, mock_create, mock_log):
mock_create.side_effect = IntegrityError()
self.creator.call()
mock_log.assert_called_with(sentry_app="nulldb", error_message="")
def test_creates_audit_log_entry(self):
request = self.make_request(user=self.user, method="GET")
Creator.run(
name="nulldb",
user=self.user,
author="Sentry",
organization=self.org,
scopes=("project:read",),
webhook_url="http://example.com",
schema={"elements": [self.create_issue_link_schema()]},
request=request,
is_internal=False,
)
assert AuditLogEntry.objects.filter(event=AuditLogEntryEvent.SENTRY_APP_ADD).exists()
def test_blank_schema(self):
self.creator.schema = ""
assert self.creator.call()
def test_none_schema(self):
self.creator.schema = None
assert self.creator.call()
def test_schema_with_no_elements(self):
self.creator.schema = {"elements": []}
assert self.creator.call()
@patch("sentry.analytics.record")
def test_records_analytics(self, record):
sentry_app = Creator.run(
name="nulldb",
user=self.user,
author="Sentry",
organization=self.org,
scopes=("project:read",),
webhook_url="http://example.com",
schema={"elements": [self.create_issue_link_schema()]},
request=self.make_request(user=self.user, method="GET"),
is_internal=False,
)
record.assert_called_with(
"sentry_app.created",
user_id=self.user.id,
organization_id=self.org.id,
sentry_app=sentry_app.slug,
)
def test_allows_name_that_exists_as_username_already(self):
self.create_user(username="nulldb")
assert self.creator.call()
| 1.976563 | 2 |
python/Model_Files/LFV_3/parameters.py | ZAKI1905/HEP-Phen | 1 | 4812 | # This file was automatically created by FeynRules 2.3.32
# Mathematica version: 11.3.0 for Mac OS X x86 (64-bit) (March 7, 2018)
# Date: Sat 21 Apr 2018 20:48:39
from object_library import all_parameters, Parameter
from function_library import complexconjugate, re, im, csc, sec, acsc, asec, cot
# This is a default parameter object representing 0.
ZERO = Parameter(name = 'ZERO',
nature = 'internal',
type = 'real',
value = '0.0',
texname = '0')
# User-defined parameters.
cabi = Parameter(name = 'cabi',
nature = 'external',
type = 'real',
value = 0.227736,
texname = '\\theta _c',
lhablock = 'CKMBLOCK',
lhacode = [ 1 ])
aEWM1 = Parameter(name = 'aEWM1',
nature = 'external',
type = 'real',
value = 127.9,
texname = '\\text{aEWM1}',
lhablock = 'SMINPUTS',
lhacode = [ 1 ])
Gf = Parameter(name = 'Gf',
nature = 'external',
type = 'real',
value = 0.0000116637,
texname = 'G_f',
lhablock = 'SMINPUTS',
lhacode = [ 2 ])
aS = Parameter(name = 'aS',
nature = 'external',
type = 'real',
value = 0.1184,
texname = '\\alpha _s',
lhablock = 'SMINPUTS',
lhacode = [ 3 ])
ymdo = Parameter(name = 'ymdo',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{ymdo}',
lhablock = 'YUKAWA',
lhacode = [ 1 ])
ymup = Parameter(name = 'ymup',
nature = 'external',
type = 'real',
value = 0.00255,
texname = '\\text{ymup}',
lhablock = 'YUKAWA',
lhacode = [ 2 ])
yms = Parameter(name = 'yms',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{yms}',
lhablock = 'YUKAWA',
lhacode = [ 3 ])
ymc = Parameter(name = 'ymc',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{ymc}',
lhablock = 'YUKAWA',
lhacode = [ 4 ])
ymb = Parameter(name = 'ymb',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{ymb}',
lhablock = 'YUKAWA',
lhacode = [ 5 ])
ymt = Parameter(name = 'ymt',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{ymt}',
lhablock = 'YUKAWA',
lhacode = [ 6 ])
yme = Parameter(name = 'yme',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{yme}',
lhablock = 'YUKAWA',
lhacode = [ 11 ])
ymm = Parameter(name = 'ymm',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{ymm}',
lhablock = 'YUKAWA',
lhacode = [ 13 ])
ymtau = Parameter(name = 'ymtau',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{ymtau}',
lhablock = 'YUKAWA',
lhacode = [ 15 ])
kq = Parameter(name = 'kq',
nature = 'external',
type = 'real',
value = 0.001,
texname = 'k_q',
lhablock = 'FRBlock',
lhacode = [ 1 ])
lamf = Parameter(name = 'lamf',
nature = 'external',
type = 'real',
value = 0.1,
texname = 'l_{\\text{fi}}',
lhablock = 'FRBlock',
lhacode = [ 2 ])
yf1x1 = Parameter(name = 'yf1x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x1}',
lhablock = 'FRBlock6',
lhacode = [ 1, 1 ])
yf1x2 = Parameter(name = 'yf1x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x2}',
lhablock = 'FRBlock6',
lhacode = [ 1, 2 ])
yf1x3 = Parameter(name = 'yf1x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf1x3}',
lhablock = 'FRBlock6',
lhacode = [ 1, 3 ])
yf2x1 = Parameter(name = 'yf2x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x1}',
lhablock = 'FRBlock6',
lhacode = [ 2, 1 ])
yf2x2 = Parameter(name = 'yf2x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf2x2}',
lhablock = 'FRBlock6',
lhacode = [ 2, 2 ])
yf2x3 = Parameter(name = 'yf2x3',
nature = 'external',
type = 'complex',
value = 1.e-6,
texname = '\\text{yf2x3}',
lhablock = 'FRBlock6',
lhacode = [ 2, 3 ])
yf3x1 = Parameter(name = 'yf3x1',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x1}',
lhablock = 'FRBlock6',
lhacode = [ 3, 1 ])
yf3x2 = Parameter(name = 'yf3x2',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x2}',
lhablock = 'FRBlock6',
lhacode = [ 3, 2 ])
yf3x3 = Parameter(name = 'yf3x3',
nature = 'external',
type = 'complex',
value = 0,
texname = '\\text{yf3x3}',
lhablock = 'FRBlock6',
lhacode = [ 3, 3 ])
MZ = Parameter(name = 'MZ',
nature = 'external',
type = 'real',
value = 91.1876,
texname = '\\text{MZ}',
lhablock = 'MASS',
lhacode = [ 23 ])
Me = Parameter(name = 'Me',
nature = 'external',
type = 'real',
value = 0.000511,
texname = '\\text{Me}',
lhablock = 'MASS',
lhacode = [ 11 ])
MMU = Parameter(name = 'MMU',
nature = 'external',
type = 'real',
value = 0.10566,
texname = '\\text{MMU}',
lhablock = 'MASS',
lhacode = [ 13 ])
MTA = Parameter(name = 'MTA',
nature = 'external',
type = 'real',
value = 1.777,
texname = '\\text{MTA}',
lhablock = 'MASS',
lhacode = [ 15 ])
MU = Parameter(name = 'MU',
nature = 'external',
type = 'real',
value = 0.00255,
texname = 'M',
lhablock = 'MASS',
lhacode = [ 2 ])
MC = Parameter(name = 'MC',
nature = 'external',
type = 'real',
value = 1.27,
texname = '\\text{MC}',
lhablock = 'MASS',
lhacode = [ 4 ])
MT = Parameter(name = 'MT',
nature = 'external',
type = 'real',
value = 172,
texname = '\\text{MT}',
lhablock = 'MASS',
lhacode = [ 6 ])
MD = Parameter(name = 'MD',
nature = 'external',
type = 'real',
value = 0.00504,
texname = '\\text{MD}',
lhablock = 'MASS',
lhacode = [ 1 ])
MS = Parameter(name = 'MS',
nature = 'external',
type = 'real',
value = 0.101,
texname = '\\text{MS}',
lhablock = 'MASS',
lhacode = [ 3 ])
MB = Parameter(name = 'MB',
nature = 'external',
type = 'real',
value = 4.7,
texname = '\\text{MB}',
lhablock = 'MASS',
lhacode = [ 5 ])
MH = Parameter(name = 'MH',
nature = 'external',
type = 'real',
value = 125,
texname = '\\text{MH}',
lhablock = 'MASS',
lhacode = [ 25 ])
MP = Parameter(name = 'MP',
nature = 'external',
type = 'real',
value = 120,
texname = '\\text{MP}',
lhablock = 'MASS',
lhacode = [ 9000005 ])
Mfi = Parameter(name = 'Mfi',
nature = 'external',
type = 'real',
value = 10,
texname = '\\text{Mfi}',
lhablock = 'MASS',
lhacode = [ 9000006 ])
WZ = Parameter(name = 'WZ',
nature = 'external',
type = 'real',
value = 2.4952,
texname = '\\text{WZ}',
lhablock = 'DECAY',
lhacode = [ 23 ])
WW = Parameter(name = 'WW',
nature = 'external',
type = 'real',
value = 2.085,
texname = '\\text{WW}',
lhablock = 'DECAY',
lhacode = [ 24 ])
WT = Parameter(name = 'WT',
nature = 'external',
type = 'real',
value = 1.50833649,
texname = '\\text{WT}',
lhablock = 'DECAY',
lhacode = [ 6 ])
WH = Parameter(name = 'WH',
nature = 'external',
type = 'real',
value = 0.00589569,
texname = '\\text{WH}',
lhablock = 'DECAY',
lhacode = [ 25 ])
WH1 = Parameter(name = 'WH1',
nature = 'external',
type = 'real',
value = 0.00575308848,
texname = '\\text{WH1}',
lhablock = 'DECAY',
lhacode = [ 9000005 ])
Wfi = Parameter(name = 'Wfi',
nature = 'external',
type = 'real',
value = 6.03044e-9,
texname = '\\text{Wfi}',
lhablock = 'DECAY',
lhacode = [ 9000006 ])
aEW = Parameter(name = 'aEW',
nature = 'internal',
type = 'real',
value = '1/aEWM1',
texname = '\\alpha _{\\text{EW}}')
G = Parameter(name = 'G',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aS)*cmath.sqrt(cmath.pi)',
texname = 'G')
CKM1x1 = Parameter(name = 'CKM1x1',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM1x1}')
CKM1x2 = Parameter(name = 'CKM1x2',
nature = 'internal',
type = 'complex',
value = 'cmath.sin(cabi)',
texname = '\\text{CKM1x2}')
CKM1x3 = Parameter(name = 'CKM1x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM1x3}')
CKM2x1 = Parameter(name = 'CKM2x1',
nature = 'internal',
type = 'complex',
value = '-cmath.sin(cabi)',
texname = '\\text{CKM2x1}')
CKM2x2 = Parameter(name = 'CKM2x2',
nature = 'internal',
type = 'complex',
value = 'cmath.cos(cabi)',
texname = '\\text{CKM2x2}')
CKM2x3 = Parameter(name = 'CKM2x3',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM2x3}')
CKM3x1 = Parameter(name = 'CKM3x1',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x1}')
CKM3x2 = Parameter(name = 'CKM3x2',
nature = 'internal',
type = 'complex',
value = '0',
texname = '\\text{CKM3x2}')
CKM3x3 = Parameter(name = 'CKM3x3',
nature = 'internal',
type = 'complex',
value = '1',
texname = '\\text{CKM3x3}')
MW = Parameter(name = 'MW',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(MZ**2/2. + cmath.sqrt(MZ**4/4. - (aEW*cmath.pi*MZ**2)/(Gf*cmath.sqrt(2))))',
texname = 'M_W')
ee = Parameter(name = 'ee',
nature = 'internal',
type = 'real',
value = '2*cmath.sqrt(aEW)*cmath.sqrt(cmath.pi)',
texname = 'e')
sw2 = Parameter(name = 'sw2',
nature = 'internal',
type = 'real',
value = '1 - MW**2/MZ**2',
texname = '\\text{sw2}')
cw = Parameter(name = 'cw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(1 - sw2)',
texname = 'c_w')
sw = Parameter(name = 'sw',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(sw2)',
texname = 's_w')
g1 = Parameter(name = 'g1',
nature = 'internal',
type = 'real',
value = 'ee/cw',
texname = 'g_1')
gw = Parameter(name = 'gw',
nature = 'internal',
type = 'real',
value = 'ee/sw',
texname = 'g_w')
vev = Parameter(name = 'vev',
nature = 'internal',
type = 'real',
value = '(2*MW*sw)/ee',
texname = '\\text{vev}')
mfi = Parameter(name = 'mfi',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(100 - (kq*vev**2)/2.)',
texname = 'M_{\\text{fi}}')
AH = Parameter(name = 'AH',
nature = 'internal',
type = 'real',
value = '(47*ee**2*(1 - (2*MH**4)/(987.*MT**4) - (14*MH**2)/(705.*MT**2) + (213*MH**12)/(2.634632e7*MW**12) + (5*MH**10)/(119756.*MW**10) + (41*MH**8)/(180950.*MW**8) + (87*MH**6)/(65800.*MW**6) + (57*MH**4)/(6580.*MW**4) + (33*MH**2)/(470.*MW**2)))/(72.*cmath.pi**2*vev)',
texname = 'A_H')
GH = Parameter(name = 'GH',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + (13*MH**6)/(16800.*MT**6) + MH**4/(168.*MT**4) + (7*MH**2)/(120.*MT**2)))/(12.*cmath.pi**2*vev)',
texname = 'G_H')
Gphi = Parameter(name = 'Gphi',
nature = 'internal',
type = 'real',
value = '-(G**2*(1 + MH**6/(560.*MT**6) + MH**4/(90.*MT**4) + MH**2/(12.*MT**2)))/(8.*cmath.pi**2*vev)',
texname = 'G_h')
lam = Parameter(name = 'lam',
nature = 'internal',
type = 'real',
value = 'MH**2/(2.*vev**2)',
texname = '\\text{lam}')
yb = Parameter(name = 'yb',
nature = 'internal',
type = 'real',
value = '(ymb*cmath.sqrt(2))/vev',
texname = '\\text{yb}')
yc = Parameter(name = 'yc',
nature = 'internal',
type = 'real',
value = '(ymc*cmath.sqrt(2))/vev',
texname = '\\text{yc}')
ydo = Parameter(name = 'ydo',
nature = 'internal',
type = 'real',
value = '(ymdo*cmath.sqrt(2))/vev',
texname = '\\text{ydo}')
ye = Parameter(name = 'ye',
nature = 'internal',
type = 'real',
value = '(yme*cmath.sqrt(2))/vev',
texname = '\\text{ye}')
ym = Parameter(name = 'ym',
nature = 'internal',
type = 'real',
value = '(ymm*cmath.sqrt(2))/vev',
texname = '\\text{ym}')
ys = Parameter(name = 'ys',
nature = 'internal',
type = 'real',
value = '(yms*cmath.sqrt(2))/vev',
texname = '\\text{ys}')
yt = Parameter(name = 'yt',
nature = 'internal',
type = 'real',
value = '(ymt*cmath.sqrt(2))/vev',
texname = '\\text{yt}')
ytau = Parameter(name = 'ytau',
nature = 'internal',
type = 'real',
value = '(ymtau*cmath.sqrt(2))/vev',
texname = '\\text{ytau}')
yup = Parameter(name = 'yup',
nature = 'internal',
type = 'real',
value = '(ymup*cmath.sqrt(2))/vev',
texname = '\\text{yup}')
muH = Parameter(name = 'muH',
nature = 'internal',
type = 'real',
value = 'cmath.sqrt(lam*vev**2)',
texname = '\\mu')
| 1.71875 | 2 |
musicLrc.py | xiangxing98/Rhythm-Enlightment | 0 | 4813 | <gh_stars>0
import time
musicLrc = """
[00:03.50]传奇
[00:19.10]作词:刘兵 作曲:李健
[00:20.60]演唱:王菲
[00:26.60]
[04:40.75][02:39.90][00:36.25]只是因为在人群中多看了你一眼
[04:49.00]
[02:47.44][00:43.69]再也没能忘掉你容颜
[02:54.83][00:51.24]梦想着偶然能有一天再相见
[03:02.32][00:58.75]从此我开始孤单思念
[03:08.15][01:04.30]
[03:09.35][01:05.50]想你时你在天边
[03:16.90][01:13.13]想你时你在眼前
[03:24.42][01:20.92]想你时你在脑海
[03:31.85][01:28.44]想你时你在心田
[03:38.67][01:35.05]
[04:09.96][03:39.87][01:36.25]宁愿相信我们前世有约
[04:16.37][03:46.38][01:42.47]今生的爱情故事 不会再改变
[04:24.82][03:54.83][01:51.18]宁愿用这一生等你发现
[04:31.38][04:01.40][01:57.43]我一直在你身旁 从未走远
[04:39.55][04:09.00][02:07.85]
"""
lrcDict = {}
musicLrcList = musicLrc.splitlines()
#print(musicLrcList)
for lrcLine in musicLrcList:
#[04:40.75][02:39.90][00:36.25]只是因为在人群中多看了你一眼
#[04:40.75 [02:39.90 [00:36.25 只是因为在人群中多看了你一眼
#[00:20.60]演唱:王菲
lrcLineList = lrcLine.split("]")
for index in range(len(lrcLineList) - 1):
timeStr = lrcLineList[index][1:]
#print(timeStr)
#00:03.50
timeList = timeStr.split(":")
timelrc = float(timeList[0]) * 60 + float(timeList[1])
#print(time)
lrcDict[timelrc] = lrcLineList[-1]
print(lrcDict)
allTimeList = []
for t in lrcDict:
allTimeList.append(t)
allTimeList.sort()
#print(allTimeList)
'''
while 1:
getTime = float(input("请输入一个时间"))
for n in range(len(allTimeList)):
tempTime = allTimeList[n]
if getTime < tempTime:
break
if n == 0:
print("时间太小")
else:
print(lrcDict[allTimeList[n - 1]])
'''
getTime = 0
while 1:
for n in range(len(allTimeList)):
tempTime = allTimeList[n]
if getTime < tempTime:
break
lrc = lrcDict.get(allTimeList[n - 1])
if lrc == None:
pass
else:
print(lrc)
time.sleep(1)
getTime += 1 | 1.890625 | 2 |
octoprint_octopod/__init__.py | mnebelung/OctoPrint-OctoPod | 52 | 4814 | <reponame>mnebelung/OctoPrint-OctoPod
# coding=utf-8
from __future__ import absolute_import
import datetime
import logging
import sys
import flask
import octoprint.plugin
from octoprint.events import eventManager, Events
from octoprint.server import user_permission
from octoprint.util import RepeatedTimer
from .bed_notifications import BedNotifications
from .custom_notifications import CustomNotifications
from .ifttt_notifications import IFTTTAlerts
from .job_notifications import JobNotifications
from .layer_notifications import LayerNotifications
from .libs.sbc import SBCFactory, SBC, RPi
from .mmu import MMUAssistance
from .palette2 import Palette2Notifications
from .paused_for_user import PausedForUser
from .soc_temp_notifications import SocTempNotifications
from .thermal_protection_notifications import ThermalProtectionNotifications
from .tools_notifications import ToolsNotifications
# Plugin that stores APNS tokens reported from iOS devices to know which iOS devices to alert
# when print is done or other relevant events
debug_soc_temp = False
class OctopodPlugin(octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.StartupPlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.ProgressPlugin):
def __init__(self):
super(OctopodPlugin, self).__init__()
self._logger = logging.getLogger("octoprint.plugins.octopod")
self._checkTempTimer = None
self._ifttt_alerts = IFTTTAlerts(self._logger)
self._job_notifications = JobNotifications(self._logger, self._ifttt_alerts)
self._tool_notifications = ToolsNotifications(self._logger, self._ifttt_alerts)
self._bed_notifications = BedNotifications(self._logger, self._ifttt_alerts)
self._mmu_assitance = MMUAssistance(self._logger, self._ifttt_alerts)
self._paused_for_user = PausedForUser(self._logger, self._ifttt_alerts)
self._palette2 = Palette2Notifications(self._logger, self._ifttt_alerts)
self._layerNotifications = LayerNotifications(self._logger, self._ifttt_alerts)
self._check_soc_temp_timer = None
self._soc_timer_interval = 5.0 if debug_soc_temp else 30.0
self._soc_temp_notifications = SocTempNotifications(self._logger, self._ifttt_alerts, self._soc_timer_interval,
debug_soc_temp)
self._custom_notifications = CustomNotifications(self._logger)
self._thermal_protection_notifications = ThermalProtectionNotifications(self._logger, self._ifttt_alerts)
# StartupPlugin mixin
def on_after_startup(self):
self._logger.info("OctoPod loaded!")
# Set logging level to what we have in the settings
if self._settings.get_boolean(["debug_logging"]):
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
# Register to listen for messages from other plugins
self._plugin_manager.register_message_receiver(self.on_plugin_message)
# Start timer that will check bed temperature and send notifications if needed
self._restart_timer()
# if running on linux then check soc temperature
if sys.platform.startswith("linux") or debug_soc_temp:
sbc = RPi(self._logger) if debug_soc_temp else SBCFactory().factory(self._logger)
if sbc.is_supported:
self._soc_temp_notifications.sbc = sbc
sbc.debugMode = debug_soc_temp
self._soc_temp_notifications.send_plugin_message = self.send_plugin_message
self.start_soc_timer(self._soc_timer_interval)
# SettingsPlugin mixin
def get_settings_defaults(self):
return dict(
debug_logging=False,
server_url='http://octopodprint.com/',
camera_snapshot_url='http://localhost:8080/?action=snapshot',
tokens=[],
sound_notification='default',
temp_interval=5,
tool0_low=0,
tool0_target_temp=False,
bed_low=30,
bed_target_temp_hold=10,
mmu_interval=5,
pause_interval=5,
palette2_printing_error_codes=[103, 104, 111, 121],
progress_type='50', # 0=disabled, 25=every 25%, 50=every 50%, 100=only when finished
ifttt_key='',
ifttt_name='',
soc_temp_high=75,
thermal_runway_threshold=10,
thermal_threshold_minutes_frequency=10,
thermal_cooldown_seconds_threshold=14,
thermal_warmup_bed_seconds_threshold=19,
thermal_warmup_hotend_seconds_threshold=39,
thermal_warmup_chamber_seconds_threshold=19,
thermal_below_target_threshold=5,
webcam_flipH=False,
webcam_flipV=False,
webcam_rotate90=False,
notify_first_X_layers=1,
print_complete_delay_seconds=0
)
def on_settings_save(self, data):
old_debug_logging = self._settings.get_boolean(["debug_logging"])
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
new_debug_logging = self._settings.get_boolean(["debug_logging"])
if old_debug_logging != new_debug_logging:
if new_debug_logging:
self._logger.setLevel(logging.DEBUG)
else:
self._logger.setLevel(logging.INFO)
def get_settings_version(self):
return 13
def on_settings_migrate(self, target, current):
if current is None or current == 1:
# add the 2 new values included
self._settings.set(['temp_interval'], self.get_settings_defaults()["temp_interval"])
self._settings.set(['bed_low'], self.get_settings_defaults()["bed_low"])
if current is None or current <= 2:
self._settings.set(['bed_target_temp_hold'], self.get_settings_defaults()["bed_target_temp_hold"])
if current is None or current <= 3:
self._settings.set(['mmu_interval'], self.get_settings_defaults()["mmu_interval"])
if current is None or current <= 4:
self._settings.set(['pause_interval'], self.get_settings_defaults()["pause_interval"])
if current is None or current <= 5:
self._settings.set(['tool0_low'], self.get_settings_defaults()["tool0_low"])
if current is None or current <= 6:
self._settings.set(['palette2_printing_error_codes'],
self.get_settings_defaults()["palette2_printing_error_codes"])
if current is None or current <= 7:
self._settings.set(['progress_type'], self.get_settings_defaults()["progress_type"])
if current is None or current <= 8:
self._settings.set(['ifttt_key'], self.get_settings_defaults()["ifttt_key"])
self._settings.set(['ifttt_name'], self.get_settings_defaults()["ifttt_name"])
if current is None or current <= 9:
self._settings.set(['soc_temp_high'], self.get_settings_defaults()["soc_temp_high"])
self._settings.set(['webcam_flipH'], self._settings.global_get(["webcam", "flipH"]))
self._settings.set(['webcam_flipV'], self._settings.global_get(["webcam", "flipV"]))
self._settings.set(['webcam_rotate90'], self._settings.global_get(["webcam", "rotate90"]))
if current is None or current <= 10:
self._settings.set(['tool0_target_temp'], self.get_settings_defaults()["tool0_target_temp"])
if current is None or current <= 11:
self._settings.set(['thermal_runway_threshold'], self.get_settings_defaults()["thermal_runway_threshold"])
self._settings.set(['thermal_threshold_minutes_frequency'], self.get_settings_defaults()["thermal_threshold_minutes_frequency"])
self._settings.set(['sound_notification'], self.get_settings_defaults()["sound_notification"])
if current is None or current <= 12:
self._settings.set(['thermal_cooldown_seconds_threshold'], self.get_settings_defaults()["thermal_cooldown_seconds_threshold"])
self._settings.set(['thermal_below_target_threshold'], self.get_settings_defaults()["thermal_below_target_threshold"])
self._settings.set(['thermal_warmup_bed_seconds_threshold'], self.get_settings_defaults()["thermal_warmup_bed_seconds_threshold"])
self._settings.set(['thermal_warmup_hotend_seconds_threshold'], self.get_settings_defaults()["thermal_warmup_hotend_seconds_threshold"])
self._settings.set(['thermal_warmup_chamber_seconds_threshold'], self.get_settings_defaults()["thermal_warmup_chamber_seconds_threshold"])
if current is None or current <= 13:
self._settings.set(['notify_first_X_layers'], self.get_settings_defaults()["notify_first_X_layers"])
# AssetPlugin mixin
def get_assets(self):
# Define your plugin's asset files to automatically include in the
# core UI here.
return dict(
js=["js/octopod.js"],
css=["css/octopod.css"],
)
# ProgressPlugin
# progress-hook
def on_print_progress(self, storage, path, progress):
# progress 0 - 100
self._job_notifications.on_print_progress(self._settings, progress)
# EventHandlerPlugin mixin
def on_event(self, event, payload):
if event == Events.PRINTER_STATE_CHANGED:
self._job_notifications.send__print_job_notification(self._settings, self._printer, payload)
elif event == "DisplayLayerProgress_layerChanged":
# Event sent from DisplayLayerProgress plugin when there was a detected layer changed
self._layerNotifications.layer_changed(self._settings, payload["currentLayer"])
elif event == Events.PRINT_STARTED or event == Events.PRINT_DONE or event == Events.PRINT_CANCELLED \
or event == Events.PRINT_FAILED:
# Reset layers for which we need to send a notification. Each new print job has its own
self._layerNotifications.reset_layers()
# SimpleApiPlugin mixin
def update_token(self, old_token, new_token, device_name, printer_id, printer_name, language_code):
self._logger.debug("Received tokens for %s." % device_name)
existing_tokens = self._settings.get(["tokens"])
# Safety check in case a user manually modified config.yaml and left invalid JSON
if existing_tokens is None:
existing_tokens = []
found = False
updated = False
for token in existing_tokens:
# Check if existing token has been updated
if token["apnsToken"] == old_token and token["printerID"] == printer_id:
if old_token != new_token:
self._logger.debug("Updating token for %s." % device_name)
# Token that exists needs to be updated with new token
token["apnsToken"] = new_token
token["date"] = datetime.datetime.now().strftime("%x %X")
updated = True
found = True
elif token["apnsToken"] == new_token and token["printerID"] == printer_id:
found = True
if found:
if printer_name is not None and ("printerName" not in token or token["printerName"] != printer_name):
# Printer name in OctoPod has been updated
token["printerName"] = printer_name
token["date"] = datetime.datetime.now().strftime("%x %X")
updated = True
if language_code is not None and (
"languageCode" not in token or token["languageCode"] != language_code):
# Language being used by OctoPod has been updated
token["languageCode"] = language_code
token["date"] = datetime.datetime.now().strftime("%x %X")
updated = True
break
if not found:
self._logger.debug("Adding token for %s." % device_name)
# Token was not found so we need to add it
existing_tokens.append(
{'apnsToken': new_token, 'deviceName': device_name, 'date': datetime.datetime.now().strftime("%x %X"),
'printerID': printer_id, 'printerName': printer_name, 'languageCode': language_code})
updated = True
if updated:
# Save new settings
self._settings.set(["tokens"], existing_tokens)
self._settings.save()
eventManager().fire(Events.SETTINGS_UPDATED)
self._logger.debug("Tokens saved")
def get_api_commands(self):
return dict(updateToken=["oldToken", "newToken", "deviceName", "printerID"], test=[],
snooze=["eventCode", "minutes"], addLayer=["layer"], removeLayer=["layer"], getLayers=[],
getSoCTemps=[])
def on_api_command(self, command, data):
if not user_permission.can():
return flask.make_response("Insufficient rights", 403)
if command == 'updateToken':
# Convert from ASCII to UTF-8 since some chars will fail otherwise (e.g. apostrophe) - Only for Python 2
if sys.version_info[0] == 2:
data["deviceName"] = data["deviceName"].encode("utf-8")
printer_name = data["printerName"] if 'printerName' in data else None
language_code = data["languageCode"] if 'languageCode' in data else None
self.update_token("{oldToken}".format(**data), "{newToken}".format(**data), "{deviceName}".format(**data),
"{printerID}".format(**data), printer_name, language_code)
elif command == 'test':
payload = dict(
state_id="OPERATIONAL",
state_string="Operational"
)
code = self._job_notifications.send__print_job_notification(self._settings, self._printer, payload,
data["server_url"], data["camera_snapshot_url"],
data["camera_flip_h"], data["camera_flip_v"],
data["camera_rotate90"],
True)
return flask.jsonify(dict(code=code))
elif command == 'snooze':
if data["eventCode"] == 'mmu-event':
self._mmu_assitance.snooze(data["minutes"])
else:
return flask.make_response("Snooze for unknown event", 400)
elif command == 'addLayer':
self._layerNotifications.add_layer(data["layer"])
elif command == 'removeLayer':
self._layerNotifications.remove_layer(data["layer"])
elif command == 'getLayers':
return flask.jsonify(dict(layers=self._layerNotifications.get_layers()))
elif command == 'getSoCTemps':
return flask.jsonify(self._soc_temp_notifications.get_soc_temps())
else:
return flask.make_response("Unknown command", 400)
# TemplatePlugin mixin
def get_template_configs(self):
return [
dict(type="settings", name="OctoPod Notifications", custom_bindings=True)
]
# Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
octopod=dict(
displayName="OctoPod Plugin",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="gdombiak",
repo="OctoPrint-OctoPod",
current=self._plugin_version,
# update method: pip
pip="https://github.com/gdombiak/OctoPrint-OctoPod/archive/{target_version}.zip"
)
)
# Plugin messages
def on_plugin_message(self, plugin, data, permissions=None):
self._palette2.check_plugin_message(self._settings, plugin, data)
def send_plugin_message(self, data):
self._plugin_manager.send_plugin_message(self._identifier, data)
# Timer functions
def _restart_timer(self):
# stop the timer
if self._checkTempTimer:
self._logger.debug(u"Stopping Timer...")
self._checkTempTimer.cancel()
self._checkTempTimer = None
# start a new timer
interval = self._settings.get_int(['temp_interval'])
if interval:
self._logger.debug(u"Starting Timer...")
self._checkTempTimer = RepeatedTimer(interval, self.run_timer_job, None, None, True)
self._checkTempTimer.start()
def run_timer_job(self):
self._bed_notifications.check_temps(self._settings, self._printer)
self._tool_notifications.check_temps(self._settings, self._printer)
self._thermal_protection_notifications.check_temps(self._settings, self._printer)
def start_soc_timer(self, interval):
self._logger.debug(u"Monitoring SoC temp with Timer")
self._check_soc_temp_timer = RepeatedTimer(interval, self.update_soc_temp, run_first=True)
self._check_soc_temp_timer.start()
def update_soc_temp(self):
self._soc_temp_notifications.check_soc_temp(self._settings)
# GCODE hook
def process_gcode(self, comm, line, *args, **kwargs):
line = self._paused_for_user.process_gcode(self._settings, self._printer, line)
return self._mmu_assitance.process_gcode(self._settings, line)
# Helper functions
def push_notification(self, message, image=None):
"""
Send arbitrary push notification to OctoPod app running on iPhone (includes Apple Watch and iPad)
via the OctoPod APNS service.
:param message: (String) Message to include in the notification
:param image: Optional. (PIL Image) Image to include in the notification
:return: True if the notification was successfully sent
"""
return self._custom_notifications.send_notification(self._settings, message, image)
# If you want your plugin to be registered within OctoPrint under a different name than what you defined in setup.py
# ("OctoPrint-PluginSkeleton"), you may define that here. Same goes for the other metadata derived from setup.py that
# can be overwritten via __plugin_xyz__ control properties. See the documentation for that.
__plugin_name__ = "OctoPod Plugin"
__plugin_pythoncompat__ = ">=2.7,<4"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = OctopodPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information,
"octoprint.comm.protocol.gcode.received": __plugin_implementation__.process_gcode
}
global __plugin_helpers__
__plugin_helpers__ = {
"apns_notification": __plugin_implementation__.push_notification
}
| 1.789063 | 2 |
tests/test_buffers.py | romanchyla/CSPatterns | 0 | 4815 | from cspatterns.datastructures import buffer
def test_circular_buffer():
b = buffer.CircularBuffer(2, ['n'])
assert len(b.next) == 2
assert b.n is None
b = buffer.CircularBuffer.create(2, attrs=['n', 'fib'])
curr = b
out = [0, 1, ]
curr.prev[-2].n = 0
curr.prev[-2].fib = 1
curr.prev[-1].n = 1
curr.prev[-1].fib = 1
# we are going to calculate fibonacci
while curr.prev[-1].n < 12:
curr.n = curr.prev[-1].n + 1
curr.fib = curr.prev[-1].fib + curr.prev[-2].fib
out.append(curr.fib)
curr = curr.next[1]
assert out == [0, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] | 3.0625 | 3 |
plugins/redacted/migrations/0001_initial.py | 2600box/harvest | 9 | 4816 | # Generated by Django 2.1.7 on 2019-02-17 14:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RedactedClientConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.TextField()),
('password', models.TextField()),
('cookies', models.TextField(null=True)),
('authkey', models.TextField(null=True)),
('passkey', models.TextField(null=True)),
('last_login_failed', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='RedactedThrottledRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField()),
('url', models.CharField(max_length=2048)),
],
options={
'abstract': False,
},
),
]
| 1.90625 | 2 |
code_examples/plotting_data/hexbin.py | ezcitron/BasemapTutorial | 99 | 4817 | from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numpy import array
from numpy import max
map = Basemap(llcrnrlon=-0.5,llcrnrlat=39.8,urcrnrlon=4.,urcrnrlat=43.,
resolution='i', projection='tmerc', lat_0 = 39.5, lon_0 = 1)
map.readshapefile('../sample_files/lightnings', 'lightnings')
x = []
y = []
c = []
for info, lightning in zip(map.lightnings_info, map.lightnings):
x.append(lightning[0])
y.append(lightning[1])
if float(info['amplitude']) < 0:
c.append(-1 * float(info['amplitude']))
else:
c.append(float(info['amplitude']))
plt.figure(0)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y))
map.colorbar(location='bottom')
plt.figure(1)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', bins='log')
map.colorbar(location='bottom', format='%.1f', label='log(# lightnings)')
plt.figure(2)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), gridsize=20, mincnt=1, cmap='summer', norm=colors.LogNorm())
cb = map.colorbar(location='bottom', format='%d', label='# lightnings')
cb.set_ticks([1, 5, 10, 15, 20, 25, 30])
cb.set_ticklabels([1, 5, 10, 15, 20, 25, 30])
plt.figure(3)
map.drawcoastlines()
map.readshapefile('../sample_files/comarques', 'comarques')
map.hexbin(array(x), array(y), C = array(c), reduce_C_function = max, gridsize=20, mincnt=1, cmap='YlOrBr', linewidths=0.5, edgecolors='k')
map.colorbar(location='bottom', label='Mean amplitude (kA)')
plt.show() | 2.140625 | 2 |
src/plugins/sjsy.py | 2443391447/nonebot2 | 1 | 4818 | <filename>src/plugins/sjsy.py
from nonebot import on_keyword, on_command
from nonebot.typing import T_State
from nonebot.adapters.cqhttp import Message, Bot, Event # 这两个没用的别删
from nonebot.adapters.cqhttp.message import MessageSegment
import requests
from nonebot.permission import *
from nonebot.rule import to_me
from aiocqhttp.exceptions import Error as CQHttpError
sheying = on_keyword({'随机摄影'})
@sheying.handle()
async def main(bot: Bot, event: Event, state: T_State):
msg = await downloads()
try:
await sheying.send(message=Message(msg))
except CQHttpError:
pass
async def downloads():
url = "https://yanghanwen.xyz/tu/ren.php"
resp = requests.get(url).json()
url_ing = resp['data']
xians = f"[CQ:image,file={url_ing}]"
return xians
# await xians.send("正在爬取图片,请稍后……")
# await xians.send(MessageSegment.at(id) + xians + "精选摄影") | 2.203125 | 2 |
src/extractors/emojiextractor.py | chmduquesne/rofimoji | 574 | 4819 | import html
from collections import namedtuple
from pathlib import Path
from typing import List, Dict
import requests
from bs4 import BeautifulSoup
from lxml import etree
from lxml.etree import XPath
Emoji = namedtuple('Emoji', 'char name')
class EmojiExtractor(object):
def __init__(self):
self.all_emojis = self.fetch_emoji_list()
self.annotations = self.fetch_annotations()
self.base_emojis = self.fetch_base_emojis()
def fetch_emoji_list(self: 'EmojiExtractor') -> List[Emoji]:
print('Downloading list of all emojis')
data = requests.get(
'https://unicode.org/emoji/charts-14.0/full-emoji-list.html',
timeout=120
) # type: requests.Response
html = BeautifulSoup(data.text, 'lxml')
emojis = []
for row in html.find('table').find_all('tr'):
if not row.th:
emoji = row.find('td', {'class': 'chars'}).string
description = row.find('td', {'class': 'name'}).string.replace('⊛ ', '')
emojis.append(Emoji(emoji, description))
return emojis
def fetch_annotations(self: 'EmojiExtractor') -> Dict[chr, List[str]]:
print('Downloading annotations')
data = requests.get(
'https://raw.githubusercontent.com/unicode-org/cldr/latest/common/annotations/en.xml',
timeout=60
) # type: requests.Response
xpath = XPath('./annotations/annotation[not(@type="tts")]')
return {element.get('cp'): element.text.split(' | ')
for element in xpath(etree.fromstring(data.content))}
def fetch_base_emojis(self: 'EmojiExtractor') -> List[chr]:
print('Downloading list of human emojis...')
data = requests.get(
'https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt',
timeout=60
) # type: requests.Response
started = False
emojis = []
for line in data.text.split('\n'):
if not started and line != '# All omitted code points have Emoji_Modifier_Base=No ':
continue
started = True
if line == '# Total elements: 132':
break
if line and not line.startswith('#'):
emojis.extend(self.resolve_character_range(line.split(';')[0].strip()))
return emojis
def resolve_character_range(self, line: str) -> List[str]:
try:
(start, end) = line.split('..')
return [chr(char) for char in range(int(start, 16), int(end, 16) + 1)]
except ValueError:
return [self.resolve_character(line)]
def resolve_character(self, string: str) -> str:
return "".join(chr(int(character, 16)) for character in string.split(' '))
def write_symbol_file(self: 'EmojiExtractor'):
print('Writing collected emojis to symbol file')
with Path('../picker/data/emojis.csv').open('w') as symbol_file:
for entry in self.compile_entries(self.all_emojis):
symbol_file.write(entry + "\n")
def compile_entries(self: 'EmojiExtractor', emojis: List[Emoji]) -> List[str]:
annotated_emojis = []
for emoji in emojis:
entry = f"{emoji.char} {html.escape(emoji.name)}"
if emoji.char in self.annotations:
entry += f" <small>({html.escape(', '.join([annotation for annotation in self.annotations[emoji.char] if annotation != emoji.name]))})</small>"
annotated_emojis.append(entry)
return annotated_emojis
def write_metadata_file(self: 'EmojiExtractor'):
print('Writing metadata to metadata file')
with Path('../picker/copyme.py').open('w') as metadata_file:
metadata_file.write('skin_tone_selectable_emojis={\'')
metadata_file.write('\', \''.join(self.base_emojis))
metadata_file.write('\'}\n')
def extract(self: 'EmojiExtractor'):
self.write_symbol_file()
self.write_metadata_file()
| 2.875 | 3 |
tests/integration_tests/security/test_seccomp.py | gregbdunn/firecracker | 2 | 4820 | <filename>tests/integration_tests/security/test_seccomp.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that the seccomp filters don't let blacklisted syscalls through."""
import os
from subprocess import run
import pytest
import host_tools.cargo_build as host # pylint:disable=import-error
@pytest.fixture
def tmp_basic_jailer(test_session_root_path):
"""Build `demo_basic_jailer`, required for the basic seccomp tests.
:return: The paths of the built binary.
"""
binaries_srcdir = os.path.normpath(
os.path.join(
os.getcwd(),
'integration_tests/security/demo_advanced_seccomp/'
)
)
build_path = os.path.join(
test_session_root_path,
host.CARGO_RELEASE_REL_PATH
)
run("cd {} && CARGO_TARGET_DIR={} cargo build --release".format(
binaries_srcdir, build_path), shell=True, check=True)
release_binaries_path = os.path.join(
host.CARGO_RELEASE_REL_PATH,
host.RELEASE_BINARIES_REL_PATH
)
release_binaries_path = os.path.join(
test_session_root_path,
release_binaries_path
)
demo_basic_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_basic_jailer'
)
)
yield demo_basic_jailer
os.remove(demo_basic_jailer)
@pytest.fixture
def tmp_advanced_seccomp_binaries(test_session_root_path):
"""
Build binaries required for the advanced seccomp tests.
Build `demo_advanced_jailer`, `demo_harmless_firecracker`, and
`demo_malicious_firecracker.
:return: The paths of the built binaries.
"""
binaries_srcdir = os.path.normpath(
os.path.join(
os.getcwd(),
'integration_tests/security/demo_advanced_seccomp/'
)
)
build_path = os.path.join(
test_session_root_path,
host.CARGO_RELEASE_REL_PATH
)
run("cd {} && CARGO_TARGET_DIR={} cargo build --release".format(
binaries_srcdir, build_path), shell=True, check=True)
release_binaries_path = os.path.join(
host.CARGO_RELEASE_REL_PATH,
host.RELEASE_BINARIES_REL_PATH
)
release_binaries_path = os.path.join(
test_session_root_path,
release_binaries_path
)
demo_advanced_jailer = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_advanced_jailer'
)
)
demo_harmless_firecracker = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_harmless_firecracker'
)
)
demo_malicious_firecracker = os.path.normpath(
os.path.join(
release_binaries_path,
'demo_malicious_firecracker'
)
)
yield \
demo_advanced_jailer, \
demo_harmless_firecracker, \
demo_malicious_firecracker
os.remove(demo_advanced_jailer)
os.remove(demo_harmless_firecracker)
os.remove(demo_malicious_firecracker)
def test_seccomp_ls(tmp_basic_jailer):
"""Assert that the seccomp filters deny a blacklisted syscall."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
# Path to the `ls` binary, which attempts to execute `SYS_access`,
# blacklisted for Firecracker.
ls_command_path = '/bin/ls'
demo_jailer = tmp_basic_jailer
assert os.path.exists(demo_jailer)
# Compile the mini jailer.
outcome = run([demo_jailer, ls_command_path])
# The seccomp filters should send SIGSYS (31) to the binary. `ls` doesn't
# handle it, so it will exit with error.
assert outcome.returncode != 0
def test_advanced_seccomp_harmless(tmp_advanced_seccomp_binaries):
"""
Test `demo_harmless_firecracker`.
Test that the built demo jailer allows the built demo harmless firecracker.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
demo_advanced_jailer, demo_harmless_firecracker, _ =\
tmp_advanced_seccomp_binaries
assert os.path.exists(demo_advanced_jailer)
assert os.path.exists(demo_harmless_firecracker)
outcome = run([demo_advanced_jailer, demo_harmless_firecracker])
# The demo harmless firecracker should have terminated gracefully.
assert outcome.returncode == 0
def test_advanced_seccomp_malicious(tmp_advanced_seccomp_binaries):
"""
Test `demo_malicious_firecracker`.
Test that the built demo jailer denies the built demo malicious
firecracker.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
demo_advanced_jailer, _, demo_malicious_firecracker =\
tmp_advanced_seccomp_binaries
assert os.path.exists(demo_advanced_jailer)
assert os.path.exists(demo_malicious_firecracker)
outcome = run([demo_advanced_jailer, demo_malicious_firecracker])
# The demo malicious firecracker should have received `SIGSYS`.
assert outcome.returncode != 0
| 2 | 2 |
cluster/density/test.py | michealowen/MachingLearning | 2 | 4821 | <filename>cluster/density/test.py
class a:
def __init__(self,da):
self.da = da
return
def go(self):
dd()
return None
def dd():
print('ok')
return None
aa = a(1)
aa.go() | 2.109375 | 2 |
REM/Tool/IDA 7.3/python/ida_hexrays.py | dodieboy/Np_class | 0 | 4822 | <reponame>dodieboy/Np_class<gh_stars>0
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
IDA Plugin SDK API wrapper: hexrays
"""
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ida_hexrays', [dirname(__file__)])
except ImportError:
import _ida_hexrays
return _ida_hexrays
if fp is not None:
try:
_mod = imp.load_module('_ida_hexrays', fp, pathname, description)
finally:
fp.close()
return _mod
_ida_hexrays = swig_import_helper()
del swig_import_helper
else:
import _ida_hexrays
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import ida_idaapi
import sys
_BC695 = sys.modules["__main__"].IDAPYTHON_COMPAT_695_API
if _BC695:
def bc695redef(func):
ida_idaapi._BC695.replace_fun(func)
return func
import ida_pro
import ida_xref
import ida_typeinf
import ida_idp
def _kludge_use_TPopupMenu(*args):
"""
_kludge_use_TPopupMenu(m)
"""
return _ida_hexrays._kludge_use_TPopupMenu(*args)
class array_of_bitsets(object):
"""
Proxy of C++ qvector<(bitset_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> array_of_bitsets
__init__(self, x) -> array_of_bitsets
"""
this = _ida_hexrays.new_array_of_bitsets(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_array_of_bitsets
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.array_of_bitsets_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.array_of_bitsets_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.array_of_bitsets_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.array_of_bitsets_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.array_of_bitsets_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.array_of_bitsets_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=bitset_t())
"""
return _ida_hexrays.array_of_bitsets_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.array_of_bitsets_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.array_of_bitsets_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.array_of_bitsets_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.array_of_bitsets_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.array_of_bitsets_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.array_of_bitsets___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.array_of_bitsets___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> bitset_t
begin(self) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_begin(self, *args)
def end(self, *args):
"""
end(self) -> bitset_t
end(self) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> bitset_t
erase(self, first, last) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> bitset_t
find(self, x) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.array_of_bitsets_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.array_of_bitsets_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.array_of_bitsets__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.array_of_bitsets___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> bitset_t
"""
return _ida_hexrays.array_of_bitsets___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.array_of_bitsets___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
array_of_bitsets_swigregister = _ida_hexrays.array_of_bitsets_swigregister
array_of_bitsets_swigregister(array_of_bitsets)
class mopvec_t(object):
"""
Proxy of C++ qvector<(mop_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> mopvec_t
__init__(self, x) -> mopvec_t
"""
this = _ida_hexrays.new_mopvec_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_mopvec_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> mop_t
"""
return _ida_hexrays.mopvec_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.mopvec_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.mopvec_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.mopvec_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> mop_t
"""
return _ida_hexrays.mopvec_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.mopvec_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.mopvec_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.mopvec_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=mop_t())
"""
return _ida_hexrays.mopvec_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.mopvec_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.mopvec_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.mopvec_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.mopvec_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> mop_t
"""
return _ida_hexrays.mopvec_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.mopvec_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.mopvec_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.mopvec_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> mop_t
begin(self) -> mop_t
"""
return _ida_hexrays.mopvec_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> mop_t
end(self) -> mop_t
"""
return _ida_hexrays.mopvec_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> mop_t
"""
return _ida_hexrays.mopvec_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> mop_t
erase(self, first, last) -> mop_t
"""
return _ida_hexrays.mopvec_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> mop_t
find(self, x) -> mop_t
"""
return _ida_hexrays.mopvec_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.mopvec_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.mopvec_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.mopvec_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.mopvec_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> mop_t
"""
return _ida_hexrays.mopvec_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.mopvec_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
mopvec_t_swigregister = _ida_hexrays.mopvec_t_swigregister
mopvec_t_swigregister(mopvec_t)
class mcallargs_t(object):
"""
Proxy of C++ qvector<(mcallarg_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> mcallargs_t
__init__(self, x) -> mcallargs_t
"""
this = _ida_hexrays.new_mcallargs_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_mcallargs_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.mcallargs_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.mcallargs_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.mcallargs_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.mcallargs_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.mcallargs_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.mcallargs_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=mcallarg_t())
"""
return _ida_hexrays.mcallargs_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.mcallargs_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.mcallargs_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.mcallargs_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.mcallargs_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.mcallargs_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.mcallargs_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.mcallargs_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> mcallarg_t
begin(self) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> mcallarg_t
end(self) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> mcallarg_t
erase(self, first, last) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> mcallarg_t
find(self, x) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.mcallargs_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.mcallargs_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.mcallargs_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.mcallargs_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> mcallarg_t
"""
return _ida_hexrays.mcallargs_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.mcallargs_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
mcallargs_t_swigregister = _ida_hexrays.mcallargs_t_swigregister
mcallargs_t_swigregister(mcallargs_t)
class block_chains_vec_t(object):
"""
Proxy of C++ qvector<(block_chains_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> block_chains_vec_t
__init__(self, x) -> block_chains_vec_t
"""
this = _ida_hexrays.new_block_chains_vec_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_block_chains_vec_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.block_chains_vec_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.block_chains_vec_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.block_chains_vec_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.block_chains_vec_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.block_chains_vec_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.block_chains_vec_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=block_chains_t())
"""
return _ida_hexrays.block_chains_vec_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.block_chains_vec_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.block_chains_vec_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.block_chains_vec_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.block_chains_vec_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.block_chains_vec_t_inject(self, *args)
def begin(self, *args):
"""
begin(self) -> block_chains_t
begin(self) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> block_chains_t
end(self) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> block_chains_t
erase(self, first, last) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t_erase(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.block_chains_vec_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> block_chains_t
"""
return _ida_hexrays.block_chains_vec_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.block_chains_vec_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
block_chains_vec_t_swigregister = _ida_hexrays.block_chains_vec_t_swigregister
block_chains_vec_t_swigregister(block_chains_vec_t)
class user_numforms_t(object):
"""
Proxy of C++ std::map<(operand_locator_t,number_format_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> number_format_t
"""
return _ida_hexrays.user_numforms_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.user_numforms_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_numforms_t
"""
this = _ida_hexrays.new_user_numforms_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_numforms_t
__del__ = lambda self : None;
user_numforms_t_swigregister = _ida_hexrays.user_numforms_t_swigregister
user_numforms_t_swigregister(user_numforms_t)
class lvar_mapping_t(object):
"""
Proxy of C++ std::map<(lvar_locator_t,lvar_locator_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> lvar_locator_t
"""
return _ida_hexrays.lvar_mapping_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.lvar_mapping_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> lvar_mapping_t
"""
this = _ida_hexrays.new_lvar_mapping_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_lvar_mapping_t
__del__ = lambda self : None;
lvar_mapping_t_swigregister = _ida_hexrays.lvar_mapping_t_swigregister
lvar_mapping_t_swigregister(lvar_mapping_t)
class hexwarns_t(object):
"""
Proxy of C++ qvector<(hexwarn_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> hexwarns_t
__init__(self, x) -> hexwarns_t
"""
this = _ida_hexrays.new_hexwarns_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_hexwarns_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.hexwarns_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.hexwarns_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.hexwarns_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.hexwarns_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.hexwarns_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.hexwarns_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=hexwarn_t())
"""
return _ida_hexrays.hexwarns_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.hexwarns_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.hexwarns_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.hexwarns_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.hexwarns_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.hexwarns_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.hexwarns_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.hexwarns_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> hexwarn_t
begin(self) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> hexwarn_t
end(self) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> hexwarn_t
erase(self, first, last) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> hexwarn_t
find(self, x) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.hexwarns_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.hexwarns_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.hexwarns_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.hexwarns_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> hexwarn_t
"""
return _ida_hexrays.hexwarns_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.hexwarns_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
hexwarns_t_swigregister = _ida_hexrays.hexwarns_t_swigregister
hexwarns_t_swigregister(hexwarns_t)
class ctree_items_t(object):
"""
Proxy of C++ qvector<(p.citem_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> ctree_items_t
__init__(self, x) -> ctree_items_t
"""
this = _ida_hexrays.new_ctree_items_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ctree_items_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> citem_t *&
"""
return _ida_hexrays.ctree_items_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.ctree_items_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.ctree_items_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.ctree_items_t_empty(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.ctree_items_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.ctree_items_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.ctree_items_t_resize(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.ctree_items_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.ctree_items_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.ctree_items_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.ctree_items_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> citem_t **
"""
return _ida_hexrays.ctree_items_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.ctree_items_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ctree_items_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ctree_items_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> qvector< citem_t * >::iterator
begin(self) -> qvector< citem_t * >::const_iterator
"""
return _ida_hexrays.ctree_items_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> qvector< citem_t * >::iterator
end(self) -> qvector< citem_t * >::const_iterator
"""
return _ida_hexrays.ctree_items_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> qvector< citem_t * >::iterator
"""
return _ida_hexrays.ctree_items_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> qvector< citem_t * >::iterator
erase(self, first, last) -> qvector< citem_t * >::iterator
"""
return _ida_hexrays.ctree_items_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> qvector< citem_t * >::iterator
find(self, x) -> qvector< citem_t * >::const_iterator
"""
return _ida_hexrays.ctree_items_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.ctree_items_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.ctree_items_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.ctree_items_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.ctree_items_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> citem_t
"""
return _ida_hexrays.ctree_items_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.ctree_items_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
def at(self, *args):
"""
at(self, n) -> citem_t
"""
return _ida_hexrays.ctree_items_t_at(self, *args)
ctree_items_t_swigregister = _ida_hexrays.ctree_items_t_swigregister
ctree_items_t_swigregister(ctree_items_t)
class user_labels_t(object):
"""
Proxy of C++ std::map<(int,qstring)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> _qstring< char > &
"""
return _ida_hexrays.user_labels_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.user_labels_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_labels_t
"""
this = _ida_hexrays.new_user_labels_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_labels_t
__del__ = lambda self : None;
user_labels_t_swigregister = _ida_hexrays.user_labels_t_swigregister
user_labels_t_swigregister(user_labels_t)
class user_cmts_t(object):
"""
Proxy of C++ std::map<(treeloc_t,citem_cmt_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> citem_cmt_t
"""
return _ida_hexrays.user_cmts_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.user_cmts_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_cmts_t
"""
this = _ida_hexrays.new_user_cmts_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_cmts_t
__del__ = lambda self : None;
user_cmts_t_swigregister = _ida_hexrays.user_cmts_t_swigregister
user_cmts_t_swigregister(user_cmts_t)
class user_iflags_t(object):
"""
Proxy of C++ std::map<(citem_locator_t,int32)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> int &
"""
return _ida_hexrays.user_iflags_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.user_iflags_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_iflags_t
"""
this = _ida_hexrays.new_user_iflags_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_iflags_t
__del__ = lambda self : None;
user_iflags_t_swigregister = _ida_hexrays.user_iflags_t_swigregister
user_iflags_t_swigregister(user_iflags_t)
class user_unions_t(object):
"""
Proxy of C++ std::map<(ea_t,intvec_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> intvec_t
"""
return _ida_hexrays.user_unions_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.user_unions_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_unions_t
"""
this = _ida_hexrays.new_user_unions_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_unions_t
__del__ = lambda self : None;
user_unions_t_swigregister = _ida_hexrays.user_unions_t_swigregister
user_unions_t_swigregister(user_unions_t)
class cinsnptrvec_t(object):
"""
Proxy of C++ qvector<(p.cinsn_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> cinsnptrvec_t
__init__(self, x) -> cinsnptrvec_t
"""
this = _ida_hexrays.new_cinsnptrvec_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cinsnptrvec_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> cinsn_t *&
"""
return _ida_hexrays.cinsnptrvec_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.cinsnptrvec_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.cinsnptrvec_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.cinsnptrvec_t_empty(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.cinsnptrvec_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.cinsnptrvec_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.cinsnptrvec_t_resize(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.cinsnptrvec_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.cinsnptrvec_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.cinsnptrvec_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.cinsnptrvec_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> cinsn_t **
"""
return _ida_hexrays.cinsnptrvec_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.cinsnptrvec_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cinsnptrvec_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cinsnptrvec_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> qvector< cinsn_t * >::iterator
begin(self) -> qvector< cinsn_t * >::const_iterator
"""
return _ida_hexrays.cinsnptrvec_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> qvector< cinsn_t * >::iterator
end(self) -> qvector< cinsn_t * >::const_iterator
"""
return _ida_hexrays.cinsnptrvec_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> qvector< cinsn_t * >::iterator
"""
return _ida_hexrays.cinsnptrvec_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> qvector< cinsn_t * >::iterator
erase(self, first, last) -> qvector< cinsn_t * >::iterator
"""
return _ida_hexrays.cinsnptrvec_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> qvector< cinsn_t * >::iterator
find(self, x) -> qvector< cinsn_t * >::const_iterator
"""
return _ida_hexrays.cinsnptrvec_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.cinsnptrvec_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.cinsnptrvec_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.cinsnptrvec_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.cinsnptrvec_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> cinsn_t
"""
return _ida_hexrays.cinsnptrvec_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.cinsnptrvec_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
def at(self, *args):
"""
at(self, n) -> cinsn_t
"""
return _ida_hexrays.cinsnptrvec_t_at(self, *args)
cinsnptrvec_t_swigregister = _ida_hexrays.cinsnptrvec_t_swigregister
cinsnptrvec_t_swigregister(cinsnptrvec_t)
class eamap_t(object):
"""
Proxy of C++ std::map<(ea_t,cinsnptrvec_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> cinsnptrvec_t
"""
return _ida_hexrays.eamap_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.eamap_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> eamap_t
"""
this = _ida_hexrays.new_eamap_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_eamap_t
__del__ = lambda self : None;
eamap_t_swigregister = _ida_hexrays.eamap_t_swigregister
eamap_t_swigregister(eamap_t)
class boundaries_t(object):
"""
Proxy of C++ std::map<(p.cinsn_t,rangeset_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def at(self, *args):
"""
at(self, _Keyval) -> rangeset_t
"""
return _ida_hexrays.boundaries_t_at(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.boundaries_t_size(self, *args)
def __init__(self, *args):
"""
__init__(self) -> boundaries_t
"""
this = _ida_hexrays.new_boundaries_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_boundaries_t
__del__ = lambda self : None;
boundaries_t_swigregister = _ida_hexrays.boundaries_t_swigregister
boundaries_t_swigregister(boundaries_t)
def user_iflags_second(*args):
"""
user_iflags_second(p) -> int32 const &
Get reference to the current map value.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_second(*args)
class cfuncptr_t(object):
"""
Proxy of C++ qrefcnt_t<(cfunc_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, p) -> cfuncptr_t
__init__(self, r) -> cfuncptr_t
"""
this = _ida_hexrays.new_cfuncptr_t(*args)
try: self.this.append(this)
except: self.this = this
def reset(self, *args):
"""
reset(self)
"""
return _ida_hexrays.cfuncptr_t_reset(self, *args)
def __deref__(self, *args):
"""
__deref__(self) -> cfunc_t
"""
return _ida_hexrays.cfuncptr_t___deref__(self, *args)
def __ref__(self, *args):
"""
__ref__(self) -> cfunc_t
"""
return _ida_hexrays.cfuncptr_t___ref__(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cfuncptr_t
__del__ = lambda self : None;
entry_ea = _swig_property(_ida_hexrays.cfuncptr_t_entry_ea_get, _ida_hexrays.cfuncptr_t_entry_ea_set)
mba = _swig_property(_ida_hexrays.cfuncptr_t_mba_get, _ida_hexrays.cfuncptr_t_mba_set)
body = _swig_property(_ida_hexrays.cfuncptr_t_body_get, _ida_hexrays.cfuncptr_t_body_set)
argidx = _swig_property(_ida_hexrays.cfuncptr_t_argidx_get)
maturity = _swig_property(_ida_hexrays.cfuncptr_t_maturity_get, _ida_hexrays.cfuncptr_t_maturity_set)
user_labels = _swig_property(_ida_hexrays.cfuncptr_t_user_labels_get, _ida_hexrays.cfuncptr_t_user_labels_set)
user_cmts = _swig_property(_ida_hexrays.cfuncptr_t_user_cmts_get, _ida_hexrays.cfuncptr_t_user_cmts_set)
numforms = _swig_property(_ida_hexrays.cfuncptr_t_numforms_get, _ida_hexrays.cfuncptr_t_numforms_set)
user_iflags = _swig_property(_ida_hexrays.cfuncptr_t_user_iflags_get, _ida_hexrays.cfuncptr_t_user_iflags_set)
user_unions = _swig_property(_ida_hexrays.cfuncptr_t_user_unions_get, _ida_hexrays.cfuncptr_t_user_unions_set)
refcnt = _swig_property(_ida_hexrays.cfuncptr_t_refcnt_get, _ida_hexrays.cfuncptr_t_refcnt_set)
statebits = _swig_property(_ida_hexrays.cfuncptr_t_statebits_get, _ida_hexrays.cfuncptr_t_statebits_set)
hdrlines = _swig_property(_ida_hexrays.cfuncptr_t_hdrlines_get, _ida_hexrays.cfuncptr_t_hdrlines_set)
treeitems = _swig_property(_ida_hexrays.cfuncptr_t_treeitems_get, _ida_hexrays.cfuncptr_t_treeitems_set)
def release(self, *args):
"""
release(self)
"""
return _ida_hexrays.cfuncptr_t_release(self, *args)
def build_c_tree(self, *args):
"""
build_c_tree(self)
"""
return _ida_hexrays.cfuncptr_t_build_c_tree(self, *args)
def verify(self, *args):
"""
verify(self, aul, even_without_debugger)
"""
return _ida_hexrays.cfuncptr_t_verify(self, *args)
def print_dcl(self, *args):
"""
print_dcl(self)
"""
return _ida_hexrays.cfuncptr_t_print_dcl(self, *args)
def print_func(self, *args):
"""
print_func(self, vp)
"""
return _ida_hexrays.cfuncptr_t_print_func(self, *args)
def get_func_type(self, *args):
"""
get_func_type(self, type) -> bool
"""
return _ida_hexrays.cfuncptr_t_get_func_type(self, *args)
def get_lvars(self, *args):
"""
get_lvars(self) -> lvars_t
"""
return _ida_hexrays.cfuncptr_t_get_lvars(self, *args)
def get_stkoff_delta(self, *args):
"""
get_stkoff_delta(self) -> sval_t
"""
return _ida_hexrays.cfuncptr_t_get_stkoff_delta(self, *args)
def find_label(self, *args):
"""
find_label(self, label) -> citem_t
"""
return _ida_hexrays.cfuncptr_t_find_label(self, *args)
def remove_unused_labels(self, *args):
"""
remove_unused_labels(self)
"""
return _ida_hexrays.cfuncptr_t_remove_unused_labels(self, *args)
def get_user_cmt(self, *args):
"""
get_user_cmt(self, loc, rt) -> char const *
"""
return _ida_hexrays.cfuncptr_t_get_user_cmt(self, *args)
def set_user_cmt(self, *args):
"""
set_user_cmt(self, loc, cmt)
"""
return _ida_hexrays.cfuncptr_t_set_user_cmt(self, *args)
def get_user_iflags(self, *args):
"""
get_user_iflags(self, loc) -> int32
"""
return _ida_hexrays.cfuncptr_t_get_user_iflags(self, *args)
def set_user_iflags(self, *args):
"""
set_user_iflags(self, loc, iflags)
"""
return _ida_hexrays.cfuncptr_t_set_user_iflags(self, *args)
def has_orphan_cmts(self, *args):
"""
has_orphan_cmts(self) -> bool
"""
return _ida_hexrays.cfuncptr_t_has_orphan_cmts(self, *args)
def del_orphan_cmts(self, *args):
"""
del_orphan_cmts(self) -> int
"""
return _ida_hexrays.cfuncptr_t_del_orphan_cmts(self, *args)
def get_user_union_selection(self, *args):
"""
get_user_union_selection(self, ea, path) -> bool
"""
return _ida_hexrays.cfuncptr_t_get_user_union_selection(self, *args)
def set_user_union_selection(self, *args):
"""
set_user_union_selection(self, ea, path)
"""
return _ida_hexrays.cfuncptr_t_set_user_union_selection(self, *args)
def save_user_labels(self, *args):
"""
save_user_labels(self)
"""
return _ida_hexrays.cfuncptr_t_save_user_labels(self, *args)
def save_user_cmts(self, *args):
"""
save_user_cmts(self)
"""
return _ida_hexrays.cfuncptr_t_save_user_cmts(self, *args)
def save_user_numforms(self, *args):
"""
save_user_numforms(self)
"""
return _ida_hexrays.cfuncptr_t_save_user_numforms(self, *args)
def save_user_iflags(self, *args):
"""
save_user_iflags(self)
"""
return _ida_hexrays.cfuncptr_t_save_user_iflags(self, *args)
def save_user_unions(self, *args):
"""
save_user_unions(self)
"""
return _ida_hexrays.cfuncptr_t_save_user_unions(self, *args)
def get_line_item(self, *args):
"""
get_line_item(self, line, x, is_ctree_line, phead, pitem, ptail) -> bool
"""
return _ida_hexrays.cfuncptr_t_get_line_item(self, *args)
def get_warnings(self, *args):
"""
get_warnings(self) -> hexwarns_t
"""
return _ida_hexrays.cfuncptr_t_get_warnings(self, *args)
def get_eamap(self, *args):
"""
get_eamap(self) -> eamap_t
"""
return _ida_hexrays.cfuncptr_t_get_eamap(self, *args)
def get_boundaries(self, *args):
"""
get_boundaries(self) -> boundaries_t
"""
return _ida_hexrays.cfuncptr_t_get_boundaries(self, *args)
def get_pseudocode(self, *args):
"""
get_pseudocode(self) -> strvec_t
"""
return _ida_hexrays.cfuncptr_t_get_pseudocode(self, *args)
def refresh_func_ctext(self, *args):
"""
refresh_func_ctext(self)
"""
return _ida_hexrays.cfuncptr_t_refresh_func_ctext(self, *args)
def gather_derefs(self, *args):
"""
gather_derefs(self, ci, udm=None) -> bool
"""
return _ida_hexrays.cfuncptr_t_gather_derefs(self, *args)
def find_item_coords(self, *args):
"""
find_item_coords(self, item, px, py) -> bool
find_item_coords(self, item) -> PyObject *
"""
return _ida_hexrays.cfuncptr_t_find_item_coords(self, *args)
def __str__(self, *args):
"""
__str__(self) -> qstring
"""
return _ida_hexrays.cfuncptr_t___str__(self, *args)
cfuncptr_t_swigregister = _ida_hexrays.cfuncptr_t_swigregister
cfuncptr_t_swigregister(cfuncptr_t)
class qvector_history_t(object):
"""
Proxy of C++ qvector<(history_item_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> qvector_history_t
__init__(self, x) -> qvector_history_t
"""
this = _ida_hexrays.new_qvector_history_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_qvector_history_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.qvector_history_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.qvector_history_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.qvector_history_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.qvector_history_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.qvector_history_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.qvector_history_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=history_item_t())
"""
return _ida_hexrays.qvector_history_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.qvector_history_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.qvector_history_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.qvector_history_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.qvector_history_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.qvector_history_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.qvector_history_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.qvector_history_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> history_item_t
begin(self) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> history_item_t
end(self) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> history_item_t
erase(self, first, last) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> history_item_t
find(self, x) -> history_item_t
"""
return _ida_hexrays.qvector_history_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.qvector_history_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.qvector_history_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.qvector_history_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.qvector_history_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> history_item_t
"""
return _ida_hexrays.qvector_history_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.qvector_history_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
qvector_history_t_swigregister = _ida_hexrays.qvector_history_t_swigregister
qvector_history_t_swigregister(qvector_history_t)
class history_t(qvector_history_t):
"""
Proxy of C++ qstack<(history_item_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def pop(self, *args):
"""
pop(self) -> history_item_t
"""
return _ida_hexrays.history_t_pop(self, *args)
def top(self, *args):
"""
top(self) -> history_item_t
top(self) -> history_item_t
"""
return _ida_hexrays.history_t_top(self, *args)
def push(self, *args):
"""
push(self, v)
"""
return _ida_hexrays.history_t_push(self, *args)
def __init__(self, *args):
"""
__init__(self) -> history_t
"""
this = _ida_hexrays.new_history_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_history_t
__del__ = lambda self : None;
history_t_swigregister = _ida_hexrays.history_t_swigregister
history_t_swigregister(history_t)
class qlist_cinsn_t_iterator(object):
"""
Proxy of C++ qlist_cinsn_t_iterator class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
cur = _swig_property(_ida_hexrays.qlist_cinsn_t_iterator_cur_get)
def next(self, *args):
"""
next(self)
"""
return _ida_hexrays.qlist_cinsn_t_iterator_next(self, *args)
def __eq__(self, *args):
"""
__eq__(self, x) -> bool
"""
return _ida_hexrays.qlist_cinsn_t_iterator___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, x) -> bool
"""
return _ida_hexrays.qlist_cinsn_t_iterator___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> qlist_cinsn_t_iterator
"""
this = _ida_hexrays.new_qlist_cinsn_t_iterator(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_qlist_cinsn_t_iterator
__del__ = lambda self : None;
qlist_cinsn_t_iterator_swigregister = _ida_hexrays.qlist_cinsn_t_iterator_swigregister
qlist_cinsn_t_iterator_swigregister(qlist_cinsn_t_iterator)
class qvector_lvar_t(object):
"""
Proxy of C++ qvector<(lvar_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> qvector_lvar_t
__init__(self, x) -> qvector_lvar_t
"""
this = _ida_hexrays.new_qvector_lvar_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_qvector_lvar_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.qvector_lvar_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.qvector_lvar_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.qvector_lvar_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.qvector_lvar_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.qvector_lvar_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.qvector_lvar_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=lvar_t())
"""
return _ida_hexrays.qvector_lvar_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.qvector_lvar_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.qvector_lvar_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.qvector_lvar_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.qvector_lvar_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.qvector_lvar_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.qvector_lvar_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.qvector_lvar_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> lvar_t
begin(self) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> lvar_t
end(self) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> lvar_t
erase(self, first, last) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> lvar_t
find(self, x) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.qvector_lvar_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.qvector_lvar_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.qvector_lvar_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.qvector_lvar_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> lvar_t
"""
return _ida_hexrays.qvector_lvar_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.qvector_lvar_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
qvector_lvar_t_swigregister = _ida_hexrays.qvector_lvar_t_swigregister
qvector_lvar_t_swigregister(qvector_lvar_t)
class qlist_cinsn_t(object):
"""
Proxy of C++ qlist<(cinsn_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> qlist_cinsn_t
__init__(self, x) -> qlist_cinsn_t
"""
this = _ida_hexrays.new_qlist_cinsn_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_qlist_cinsn_t
__del__ = lambda self : None;
def swap(self, *args):
"""
swap(self, x)
"""
return _ida_hexrays.qlist_cinsn_t_swap(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.qlist_cinsn_t_empty(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.qlist_cinsn_t_size(self, *args)
def front(self, *args):
"""
front(self) -> cinsn_t
front(self) -> cinsn_t
"""
return _ida_hexrays.qlist_cinsn_t_front(self, *args)
def back(self, *args):
"""
back(self) -> cinsn_t
back(self) -> cinsn_t
"""
return _ida_hexrays.qlist_cinsn_t_back(self, *args)
def rbegin(self, *args):
"""
rbegin(self) -> qlist< cinsn_t >::reverse_iterator
rbegin(self) -> qlist< cinsn_t >::const_reverse_iterator
"""
return _ida_hexrays.qlist_cinsn_t_rbegin(self, *args)
def rend(self, *args):
"""
rend(self) -> qlist< cinsn_t >::reverse_iterator
rend(self) -> qlist< cinsn_t >::const_reverse_iterator
"""
return _ida_hexrays.qlist_cinsn_t_rend(self, *args)
def push_front(self, *args):
"""
push_front(self, x)
"""
return _ida_hexrays.qlist_cinsn_t_push_front(self, *args)
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> cinsn_t
"""
return _ida_hexrays.qlist_cinsn_t_push_back(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.qlist_cinsn_t_clear(self, *args)
def pop_front(self, *args):
"""
pop_front(self)
"""
return _ida_hexrays.qlist_cinsn_t_pop_front(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.qlist_cinsn_t_pop_back(self, *args)
def __eq__(self, *args):
"""
__eq__(self, x) -> bool
"""
return _ida_hexrays.qlist_cinsn_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, x) -> bool
"""
return _ida_hexrays.qlist_cinsn_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> qlist_cinsn_t_iterator
"""
return _ida_hexrays.qlist_cinsn_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> qlist_cinsn_t_iterator
"""
return _ida_hexrays.qlist_cinsn_t_end(self, *args)
def insert(self, *args):
"""
insert(self, p, x) -> qlist< cinsn_t >::iterator
insert(self, p) -> qlist< cinsn_t >::iterator
insert(self, p, x) -> qlist_cinsn_t_iterator
"""
return _ida_hexrays.qlist_cinsn_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, p) -> qlist< cinsn_t >::iterator
erase(self, p1, p2)
erase(self, p)
"""
return _ida_hexrays.qlist_cinsn_t_erase(self, *args)
qlist_cinsn_t_swigregister = _ida_hexrays.qlist_cinsn_t_swigregister
qlist_cinsn_t_swigregister(qlist_cinsn_t)
class qvector_carg_t(object):
"""
Proxy of C++ qvector<(carg_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> qvector_carg_t
__init__(self, x) -> qvector_carg_t
"""
this = _ida_hexrays.new_qvector_carg_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_qvector_carg_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.qvector_carg_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.qvector_carg_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.qvector_carg_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.qvector_carg_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.qvector_carg_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.qvector_carg_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=carg_t())
"""
return _ida_hexrays.qvector_carg_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.qvector_carg_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.qvector_carg_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.qvector_carg_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.qvector_carg_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.qvector_carg_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.qvector_carg_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.qvector_carg_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> carg_t
begin(self) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> carg_t
end(self) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> carg_t
erase(self, first, last) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> carg_t
find(self, x) -> carg_t
"""
return _ida_hexrays.qvector_carg_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.qvector_carg_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.qvector_carg_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.qvector_carg_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.qvector_carg_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> carg_t
"""
return _ida_hexrays.qvector_carg_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.qvector_carg_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
qvector_carg_t_swigregister = _ida_hexrays.qvector_carg_t_swigregister
qvector_carg_t_swigregister(qvector_carg_t)
class qvector_ccase_t(object):
"""
Proxy of C++ qvector<(ccase_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> qvector_ccase_t
__init__(self, x) -> qvector_ccase_t
"""
this = _ida_hexrays.new_qvector_ccase_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_qvector_ccase_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.qvector_ccase_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.qvector_ccase_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.qvector_ccase_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.qvector_ccase_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.qvector_ccase_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.qvector_ccase_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=ccase_t())
"""
return _ida_hexrays.qvector_ccase_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.qvector_ccase_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.qvector_ccase_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.qvector_ccase_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.qvector_ccase_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.qvector_ccase_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.qvector_ccase_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.qvector_ccase_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> ccase_t
begin(self) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> ccase_t
end(self) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> ccase_t
erase(self, first, last) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> ccase_t
find(self, x) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.qvector_ccase_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.qvector_ccase_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.qvector_ccase_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.qvector_ccase_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> ccase_t
"""
return _ida_hexrays.qvector_ccase_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.qvector_ccase_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
qvector_ccase_t_swigregister = _ida_hexrays.qvector_ccase_t_swigregister
qvector_ccase_t_swigregister(qvector_ccase_t)
class lvar_saved_infos_t(object):
"""
Proxy of C++ qvector<(lvar_saved_info_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> lvar_saved_infos_t
__init__(self, x) -> lvar_saved_infos_t
"""
this = _ida_hexrays.new_lvar_saved_infos_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_lvar_saved_infos_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.lvar_saved_infos_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.lvar_saved_infos_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.lvar_saved_infos_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.lvar_saved_infos_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.lvar_saved_infos_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.lvar_saved_infos_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=lvar_saved_info_t())
"""
return _ida_hexrays.lvar_saved_infos_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.lvar_saved_infos_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.lvar_saved_infos_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.lvar_saved_infos_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.lvar_saved_infos_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.lvar_saved_infos_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.lvar_saved_infos_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.lvar_saved_infos_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> lvar_saved_info_t
begin(self) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> lvar_saved_info_t
end(self) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> lvar_saved_info_t
erase(self, first, last) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> lvar_saved_info_t
find(self, x) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.lvar_saved_infos_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.lvar_saved_infos_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.lvar_saved_infos_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.lvar_saved_infos_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_saved_infos_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.lvar_saved_infos_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
lvar_saved_infos_t_swigregister = _ida_hexrays.lvar_saved_infos_t_swigregister
lvar_saved_infos_t_swigregister(lvar_saved_infos_t)
class ui_stroff_ops_t(object):
"""
Proxy of C++ qvector<(ui_stroff_op_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> ui_stroff_ops_t
__init__(self, x) -> ui_stroff_ops_t
"""
this = _ida_hexrays.new_ui_stroff_ops_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ui_stroff_ops_t
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.ui_stroff_ops_t_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.ui_stroff_ops_t_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.ui_stroff_ops_t_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.ui_stroff_ops_t_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.ui_stroff_ops_t_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.ui_stroff_ops_t_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=ui_stroff_op_t())
"""
return _ida_hexrays.ui_stroff_ops_t_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.ui_stroff_ops_t_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.ui_stroff_ops_t_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.ui_stroff_ops_t_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.ui_stroff_ops_t_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.ui_stroff_ops_t_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ui_stroff_ops_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ui_stroff_ops_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> ui_stroff_op_t
begin(self) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> ui_stroff_op_t
end(self) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> ui_stroff_op_t
erase(self, first, last) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> ui_stroff_op_t
find(self, x) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.ui_stroff_ops_t_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.ui_stroff_ops_t_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.ui_stroff_ops_t__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.ui_stroff_ops_t___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> ui_stroff_op_t
"""
return _ida_hexrays.ui_stroff_ops_t___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.ui_stroff_ops_t___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
ui_stroff_ops_t_swigregister = _ida_hexrays.ui_stroff_ops_t_swigregister
ui_stroff_ops_t_swigregister(ui_stroff_ops_t)
def qswap(*args):
"""
qswap(a, b)
"""
return _ida_hexrays.qswap(*args)
class fnum_array(object):
"""
Proxy of C++ wrapped_array_t<(uint16,6)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
data = _swig_property(_ida_hexrays.fnum_array_data_get)
def __init__(self, *args):
"""
__init__(self, data) -> fnum_array
"""
this = _ida_hexrays.new_fnum_array(*args)
try: self.this.append(this)
except: self.this = this
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.fnum_array___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> unsigned short const &
"""
return _ida_hexrays.fnum_array___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.fnum_array___setitem__(self, *args)
__iter__ = ida_idaapi._bounded_getitem_iterator
__swig_destroy__ = _ida_hexrays.delete_fnum_array
__del__ = lambda self : None;
fnum_array_swigregister = _ida_hexrays.fnum_array_swigregister
fnum_array_swigregister(fnum_array)
def debug_hexrays_ctree(*args):
"""
debug_hexrays_ctree(msg)
"""
return _ida_hexrays.debug_hexrays_ctree(*args)
def init_hexrays_plugin(*args):
"""
init_hexrays_plugin(flags=0) -> bool
Initialize your plugin for hex-rays decompiler. This function must be
called before calling any other decompiler function. It initializes
the pointer to the dispatcher.
@param flags: reserved, must be 0 (C++: int)
@return: true if the decompiler exists and the dispatcher pointer is
ready to use.
"""
return _ida_hexrays.init_hexrays_plugin(*args)
def get_widget_vdui(*args):
"""
get_widget_vdui(f) -> vdui_t
Get the 'vdui_t' instance associated to the TWidget
@param f: pointer to window (C++: TWidget *)
@return: a vdui_t *, or NULL
"""
return _ida_hexrays.get_widget_vdui(*args)
def boundaries_find(*args):
"""
boundaries_find(map, key) -> boundaries_iterator_t
Find the specified key in boundaries_t.
@param map (C++: const boundaries_t *)
@param key (C++: const cinsn_t *&)
"""
return _ida_hexrays.boundaries_find(*args)
def boundaries_insert(*args):
"""
boundaries_insert(map, key, val) -> boundaries_iterator_t
Insert new ( 'cinsn_t' *, 'rangeset_t' ) pair into boundaries_t.
@param map (C++: boundaries_t *)
@param key (C++: const cinsn_t *&)
@param val (C++: const rangeset_t &)
"""
return _ida_hexrays.boundaries_insert(*args)
def term_hexrays_plugin(*args):
"""
term_hexrays_plugin()
Stop working with hex-rays decompiler.
"""
return _ida_hexrays.term_hexrays_plugin(*args)
class Hexrays_Hooks(object):
"""
Proxy of C++ Hexrays_Hooks class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, _flags=0) -> Hexrays_Hooks
"""
if self.__class__ == Hexrays_Hooks:
_self = None
else:
_self = self
this = _ida_hexrays.new_Hexrays_Hooks(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_Hexrays_Hooks
__del__ = lambda self : None;
def hook(self, *args):
"""
hook(self) -> bool
"""
return _ida_hexrays.Hexrays_Hooks_hook(self, *args)
def unhook(self, *args):
"""
unhook(self) -> bool
"""
return _ida_hexrays.Hexrays_Hooks_unhook(self, *args)
def flowchart(self, *args):
"""
flowchart(self, fc) -> int
"""
return _ida_hexrays.Hexrays_Hooks_flowchart(self, *args)
def stkpnts(self, *args):
"""
stkpnts(self, mba, _sps) -> int
"""
return _ida_hexrays.Hexrays_Hooks_stkpnts(self, *args)
def prolog(self, *args):
"""
prolog(self, mba, fc, reachable_blocks) -> int
"""
return _ida_hexrays.Hexrays_Hooks_prolog(self, *args)
def microcode(self, *args):
"""
microcode(self, mba) -> int
"""
return _ida_hexrays.Hexrays_Hooks_microcode(self, *args)
def preoptimized(self, *args):
"""
preoptimized(self, mba) -> int
"""
return _ida_hexrays.Hexrays_Hooks_preoptimized(self, *args)
def locopt(self, *args):
"""
locopt(self, mba) -> int
"""
return _ida_hexrays.Hexrays_Hooks_locopt(self, *args)
def prealloc(self, *args):
"""
prealloc(self, mba) -> int
"""
return _ida_hexrays.Hexrays_Hooks_prealloc(self, *args)
def glbopt(self, *args):
"""
glbopt(self, mba) -> int
"""
return _ida_hexrays.Hexrays_Hooks_glbopt(self, *args)
def structural(self, *args):
"""
structural(self, ct) -> int
"""
return _ida_hexrays.Hexrays_Hooks_structural(self, *args)
def maturity(self, *args):
"""
maturity(self, cfunc, new_maturity) -> int
"""
return _ida_hexrays.Hexrays_Hooks_maturity(self, *args)
def interr(self, *args):
"""
interr(self, errcode) -> int
"""
return _ida_hexrays.Hexrays_Hooks_interr(self, *args)
def combine(self, *args):
"""
combine(self, blk, insn) -> int
"""
return _ida_hexrays.Hexrays_Hooks_combine(self, *args)
def print_func(self, *args):
"""
print_func(self, cfunc, vp) -> int
"""
return _ida_hexrays.Hexrays_Hooks_print_func(self, *args)
def func_printed(self, *args):
"""
func_printed(self, cfunc) -> int
"""
return _ida_hexrays.Hexrays_Hooks_func_printed(self, *args)
def resolve_stkaddrs(self, *args):
"""
resolve_stkaddrs(self, mba) -> int
"""
return _ida_hexrays.Hexrays_Hooks_resolve_stkaddrs(self, *args)
def open_pseudocode(self, *args):
"""
open_pseudocode(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_open_pseudocode(self, *args)
def switch_pseudocode(self, *args):
"""
switch_pseudocode(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_switch_pseudocode(self, *args)
def refresh_pseudocode(self, *args):
"""
refresh_pseudocode(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_refresh_pseudocode(self, *args)
def close_pseudocode(self, *args):
"""
close_pseudocode(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_close_pseudocode(self, *args)
def keyboard(self, *args):
"""
keyboard(self, vu, key_code, shift_state) -> int
"""
return _ida_hexrays.Hexrays_Hooks_keyboard(self, *args)
def right_click(self, *args):
"""
right_click(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_right_click(self, *args)
def double_click(self, *args):
"""
double_click(self, vu, shift_state) -> int
"""
return _ida_hexrays.Hexrays_Hooks_double_click(self, *args)
def curpos(self, *args):
"""
curpos(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_curpos(self, *args)
def create_hint(self, *args):
"""
create_hint(self, vu) -> PyObject *
"""
return _ida_hexrays.Hexrays_Hooks_create_hint(self, *args)
def text_ready(self, *args):
"""
text_ready(self, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_text_ready(self, *args)
def populating_popup(self, *args):
"""
populating_popup(self, widget, popup_handle, vu) -> int
"""
return _ida_hexrays.Hexrays_Hooks_populating_popup(self, *args)
def lvar_name_changed(self, *args):
"""
lvar_name_changed(self, vu, v, name, is_user_name) -> int
"""
return _ida_hexrays.Hexrays_Hooks_lvar_name_changed(self, *args)
def lvar_type_changed(self, *args):
"""
lvar_type_changed(self, vu, v, tinfo) -> int
"""
return _ida_hexrays.Hexrays_Hooks_lvar_type_changed(self, *args)
def lvar_cmt_changed(self, *args):
"""
lvar_cmt_changed(self, vu, v, cmt) -> int
"""
return _ida_hexrays.Hexrays_Hooks_lvar_cmt_changed(self, *args)
def lvar_mapping_changed(self, *args):
"""
lvar_mapping_changed(self, vu, frm, to) -> int
"""
return _ida_hexrays.Hexrays_Hooks_lvar_mapping_changed(self, *args)
def cmt_changed(self, *args):
"""
cmt_changed(self, cfunc, loc, cmt) -> int
"""
return _ida_hexrays.Hexrays_Hooks_cmt_changed(self, *args)
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_Hexrays_Hooks(self)
return weakref_proxy(self)
Hexrays_Hooks_swigregister = _ida_hexrays.Hexrays_Hooks_swigregister
Hexrays_Hooks_swigregister(Hexrays_Hooks)
class uval_ivl_t(object):
"""
Proxy of C++ ivl_tpl<(uval_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
off = _swig_property(_ida_hexrays.uval_ivl_t_off_get, _ida_hexrays.uval_ivl_t_off_set)
size = _swig_property(_ida_hexrays.uval_ivl_t_size_get, _ida_hexrays.uval_ivl_t_size_set)
def __init__(self, *args):
"""
__init__(self, _off, _size) -> uval_ivl_t
"""
this = _ida_hexrays.new_uval_ivl_t(*args)
try: self.this.append(this)
except: self.this = this
def valid(self, *args):
"""
valid(self) -> bool
"""
return _ida_hexrays.uval_ivl_t_valid(self, *args)
def end(self, *args):
"""
end(self) -> unsigned long long
"""
return _ida_hexrays.uval_ivl_t_end(self, *args)
def last(self, *args):
"""
last(self) -> unsigned long long
"""
return _ida_hexrays.uval_ivl_t_last(self, *args)
__swig_destroy__ = _ida_hexrays.delete_uval_ivl_t
__del__ = lambda self : None;
uval_ivl_t_swigregister = _ida_hexrays.uval_ivl_t_swigregister
uval_ivl_t_swigregister(uval_ivl_t)
class uval_ivl_ivlset_t(object):
"""
Proxy of C++ ivlset_tpl<(ivl_t,uval_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> uval_ivl_ivlset_t
__init__(self, ivl) -> uval_ivl_ivlset_t
"""
this = _ida_hexrays.new_uval_ivl_ivlset_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.uval_ivl_ivlset_t_swap(self, *args)
def getivl(self, *args):
"""
getivl(self, idx) -> ivl_t
"""
return _ida_hexrays.uval_ivl_ivlset_t_getivl(self, *args)
def lastivl(self, *args):
"""
lastivl(self) -> ivl_t
"""
return _ida_hexrays.uval_ivl_ivlset_t_lastivl(self, *args)
def nivls(self, *args):
"""
nivls(self) -> size_t
"""
return _ida_hexrays.uval_ivl_ivlset_t_nivls(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.uval_ivl_ivlset_t_empty(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.uval_ivl_ivlset_t_clear(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.uval_ivl_ivlset_t_qclear(self, *args)
def all_values(self, *args):
"""
all_values(self) -> bool
"""
return _ida_hexrays.uval_ivl_ivlset_t_all_values(self, *args)
def set_all_values(self, *args):
"""
set_all_values(self)
"""
return _ida_hexrays.uval_ivl_ivlset_t_set_all_values(self, *args)
def single_value(self, *args):
"""
single_value(self, v) -> bool
"""
return _ida_hexrays.uval_ivl_ivlset_t_single_value(self, *args)
def __eq__(self, *args):
"""
__eq__(self, v) -> bool
"""
return _ida_hexrays.uval_ivl_ivlset_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, v) -> bool
"""
return _ida_hexrays.uval_ivl_ivlset_t___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> ivlset_tpl< ivl_t,unsigned long long >::const_iterator
begin(self) -> ivlset_tpl< ivl_t,unsigned long long >::iterator
"""
return _ida_hexrays.uval_ivl_ivlset_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> ivlset_tpl< ivl_t,unsigned long long >::const_iterator
end(self) -> ivlset_tpl< ivl_t,unsigned long long >::iterator
"""
return _ida_hexrays.uval_ivl_ivlset_t_end(self, *args)
__swig_destroy__ = _ida_hexrays.delete_uval_ivl_ivlset_t
__del__ = lambda self : None;
uval_ivl_ivlset_t_swigregister = _ida_hexrays.uval_ivl_ivlset_t_swigregister
uval_ivl_ivlset_t_swigregister(uval_ivl_ivlset_t)
class array_of_ivlsets(object):
"""
Proxy of C++ qvector<(ivlset_t)> class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> array_of_ivlsets
__init__(self, x) -> array_of_ivlsets
"""
this = _ida_hexrays.new_array_of_ivlsets(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_array_of_ivlsets
__del__ = lambda self : None;
def push_back(self, *args):
"""
push_back(self, x)
push_back(self) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_push_back(self, *args)
def pop_back(self, *args):
"""
pop_back(self)
"""
return _ida_hexrays.array_of_ivlsets_pop_back(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.array_of_ivlsets_size(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.array_of_ivlsets_empty(self, *args)
def at(self, *args):
"""
at(self, _idx) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_at(self, *args)
def qclear(self, *args):
"""
qclear(self)
"""
return _ida_hexrays.array_of_ivlsets_qclear(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.array_of_ivlsets_clear(self, *args)
def resize(self, *args):
"""
resize(self, _newsize, x)
resize(self, _newsize)
"""
return _ida_hexrays.array_of_ivlsets_resize(self, *args)
def grow(self, *args):
"""
grow(self, x=ivlset_t())
"""
return _ida_hexrays.array_of_ivlsets_grow(self, *args)
def capacity(self, *args):
"""
capacity(self) -> size_t
"""
return _ida_hexrays.array_of_ivlsets_capacity(self, *args)
def reserve(self, *args):
"""
reserve(self, cnt)
"""
return _ida_hexrays.array_of_ivlsets_reserve(self, *args)
def truncate(self, *args):
"""
truncate(self)
"""
return _ida_hexrays.array_of_ivlsets_truncate(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.array_of_ivlsets_swap(self, *args)
def extract(self, *args):
"""
extract(self) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_extract(self, *args)
def inject(self, *args):
"""
inject(self, s, len)
"""
return _ida_hexrays.array_of_ivlsets_inject(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.array_of_ivlsets___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.array_of_ivlsets___ne__(self, *args)
def begin(self, *args):
"""
begin(self) -> ivlset_t
begin(self) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_begin(self, *args)
def end(self, *args):
"""
end(self) -> ivlset_t
end(self) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_end(self, *args)
def insert(self, *args):
"""
insert(self, it, x) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_insert(self, *args)
def erase(self, *args):
"""
erase(self, it) -> ivlset_t
erase(self, first, last) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_erase(self, *args)
def find(self, *args):
"""
find(self, x) -> ivlset_t
find(self, x) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets_find(self, *args)
def has(self, *args):
"""
has(self, x) -> bool
"""
return _ida_hexrays.array_of_ivlsets_has(self, *args)
def add_unique(self, *args):
"""
add_unique(self, x) -> bool
"""
return _ida_hexrays.array_of_ivlsets_add_unique(self, *args)
def _del(self, *args):
"""
_del(self, x) -> bool
"""
return _ida_hexrays.array_of_ivlsets__del(self, *args)
def __len__(self, *args):
"""
__len__(self) -> size_t
"""
return _ida_hexrays.array_of_ivlsets___len__(self, *args)
def __getitem__(self, *args):
"""
__getitem__(self, i) -> ivlset_t
"""
return _ida_hexrays.array_of_ivlsets___getitem__(self, *args)
def __setitem__(self, *args):
"""
__setitem__(self, i, v)
"""
return _ida_hexrays.array_of_ivlsets___setitem__(self, *args)
front = ida_idaapi._qvector_front
back = ida_idaapi._qvector_back
__iter__ = ida_idaapi._bounded_getitem_iterator
array_of_ivlsets_swigregister = _ida_hexrays.array_of_ivlsets_swigregister
array_of_ivlsets_swigregister(array_of_ivlsets)
MAX_SUPPORTED_STACK_SIZE = _ida_hexrays.MAX_SUPPORTED_STACK_SIZE
def hexrays_alloc(*args):
"""
hexrays_alloc(size) -> void *
"""
return _ida_hexrays.hexrays_alloc(*args)
def hexrays_free(*args):
"""
hexrays_free(ptr)
"""
return _ida_hexrays.hexrays_free(*args)
MAX_VLR_SIZE = _ida_hexrays.MAX_VLR_SIZE
CMP_NZ = _ida_hexrays.CMP_NZ
CMP_Z = _ida_hexrays.CMP_Z
CMP_AE = _ida_hexrays.CMP_AE
CMP_B = _ida_hexrays.CMP_B
CMP_A = _ida_hexrays.CMP_A
CMP_BE = _ida_hexrays.CMP_BE
CMP_GT = _ida_hexrays.CMP_GT
CMP_GE = _ida_hexrays.CMP_GE
CMP_LT = _ida_hexrays.CMP_LT
CMP_LE = _ida_hexrays.CMP_LE
class valrng_t(object):
"""
Proxy of C++ valrng_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, size_=MAX_VLR_SIZE) -> valrng_t
__init__(self, r) -> valrng_t
"""
this = _ida_hexrays.new_valrng_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_valrng_t
__del__ = lambda self : None;
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.valrng_t_swap(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.valrng_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.valrng_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.valrng_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.valrng_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.valrng_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.valrng_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.valrng_t_compare(self, *args)
def set_none(self, *args):
"""
set_none(self)
"""
return _ida_hexrays.valrng_t_set_none(self, *args)
def set_all(self, *args):
"""
set_all(self)
"""
return _ida_hexrays.valrng_t_set_all(self, *args)
def set_unk(self, *args):
"""
set_unk(self)
"""
return _ida_hexrays.valrng_t_set_unk(self, *args)
def set_eq(self, *args):
"""
set_eq(self, v)
"""
return _ida_hexrays.valrng_t_set_eq(self, *args)
def set_cmp(self, *args):
"""
set_cmp(self, cmp, _value)
"""
return _ida_hexrays.valrng_t_set_cmp(self, *args)
def reduce_size(self, *args):
"""
reduce_size(self, new_size) -> bool
"""
return _ida_hexrays.valrng_t_reduce_size(self, *args)
def intersect_with(self, *args):
"""
intersect_with(self, r) -> bool
"""
return _ida_hexrays.valrng_t_intersect_with(self, *args)
def unite_with(self, *args):
"""
unite_with(self, r) -> bool
"""
return _ida_hexrays.valrng_t_unite_with(self, *args)
def inverse(self, *args):
"""
inverse(self)
"""
return _ida_hexrays.valrng_t_inverse(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.valrng_t_empty(self, *args)
def all_values(self, *args):
"""
all_values(self) -> bool
"""
return _ida_hexrays.valrng_t_all_values(self, *args)
def is_unknown(self, *args):
"""
is_unknown(self) -> bool
"""
return _ida_hexrays.valrng_t_is_unknown(self, *args)
def has(self, *args):
"""
has(self, v) -> bool
"""
return _ida_hexrays.valrng_t_has(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.valrng_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.valrng_t_dstr(self, *args)
def cvt_to_single_value(self, *args):
"""
cvt_to_single_value(self) -> bool
"""
return _ida_hexrays.valrng_t_cvt_to_single_value(self, *args)
def cvt_to_cmp(self, *args):
"""
cvt_to_cmp(self, strict) -> bool
"""
return _ida_hexrays.valrng_t_cvt_to_cmp(self, *args)
def get_size(self, *args):
"""
get_size(self) -> int
"""
return _ida_hexrays.valrng_t_get_size(self, *args)
def max_value(self, *args):
"""
max_value(self, size_) -> uvlr_t
max_value(self) -> uvlr_t
"""
return _ida_hexrays.valrng_t_max_value(self, *args)
def min_svalue(self, *args):
"""
min_svalue(self, size_) -> uvlr_t
min_svalue(self) -> uvlr_t
"""
return _ida_hexrays.valrng_t_min_svalue(self, *args)
def max_svalue(self, *args):
"""
max_svalue(self, size_) -> uvlr_t
max_svalue(self) -> uvlr_t
"""
return _ida_hexrays.valrng_t_max_svalue(self, *args)
def _register(self, *args):
"""
_register(self)
"""
return _ida_hexrays.valrng_t__register(self, *args)
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.valrng_t__deregister(self, *args)
valrng_t_swigregister = _ida_hexrays.valrng_t_swigregister
valrng_t_swigregister(valrng_t)
cvar = _ida_hexrays.cvar
MAX_VALUE = cvar.MAX_VALUE
MAX_SVALUE = cvar.MAX_SVALUE
MIN_SVALUE = cvar.MIN_SVALUE
NO_ACCESS = _ida_hexrays.NO_ACCESS
WRITE_ACCESS = _ida_hexrays.WRITE_ACCESS
READ_ACCESS = _ida_hexrays.READ_ACCESS
RW_ACCESS = _ida_hexrays.RW_ACCESS
def is_may_access(*args):
"""
is_may_access(maymust) -> bool
"""
return _ida_hexrays.is_may_access(*args)
MERR_OK = _ida_hexrays.MERR_OK
MERR_BLOCK = _ida_hexrays.MERR_BLOCK
MERR_INTERR = _ida_hexrays.MERR_INTERR
MERR_INSN = _ida_hexrays.MERR_INSN
MERR_MEM = _ida_hexrays.MERR_MEM
MERR_BADBLK = _ida_hexrays.MERR_BADBLK
MERR_BADSP = _ida_hexrays.MERR_BADSP
MERR_PROLOG = _ida_hexrays.MERR_PROLOG
MERR_SWITCH = _ida_hexrays.MERR_SWITCH
MERR_EXCEPTION = _ida_hexrays.MERR_EXCEPTION
MERR_HUGESTACK = _ida_hexrays.MERR_HUGESTACK
MERR_LVARS = _ida_hexrays.MERR_LVARS
MERR_BITNESS = _ida_hexrays.MERR_BITNESS
MERR_BADCALL = _ida_hexrays.MERR_BADCALL
MERR_BADFRAME = _ida_hexrays.MERR_BADFRAME
MERR_UNKTYPE = _ida_hexrays.MERR_UNKTYPE
MERR_BADIDB = _ida_hexrays.MERR_BADIDB
MERR_SIZEOF = _ida_hexrays.MERR_SIZEOF
MERR_REDO = _ida_hexrays.MERR_REDO
MERR_CANCELED = _ida_hexrays.MERR_CANCELED
MERR_RECDEPTH = _ida_hexrays.MERR_RECDEPTH
MERR_OVERLAP = _ida_hexrays.MERR_OVERLAP
MERR_PARTINIT = _ida_hexrays.MERR_PARTINIT
MERR_COMPLEX = _ida_hexrays.MERR_COMPLEX
MERR_LICENSE = _ida_hexrays.MERR_LICENSE
MERR_ONLY32 = _ida_hexrays.MERR_ONLY32
MERR_ONLY64 = _ida_hexrays.MERR_ONLY64
MERR_BUSY = _ida_hexrays.MERR_BUSY
MERR_FARPTR = _ida_hexrays.MERR_FARPTR
MERR_EXTERN = _ida_hexrays.MERR_EXTERN
MERR_FUNCSIZE = _ida_hexrays.MERR_FUNCSIZE
MERR_BADRANGES = _ida_hexrays.MERR_BADRANGES
MERR_STOP = _ida_hexrays.MERR_STOP
MERR_MAX_ERR = _ida_hexrays.MERR_MAX_ERR
MERR_LOOP = _ida_hexrays.MERR_LOOP
def get_merror_desc(*args):
"""
get_merror_desc(code, mba) -> ea_t
Get textual description of an error code
@param code: Microcode error codes (C++: merror_t)
@param mba: the microcode array (C++: mbl_array_t *)
@return: the error address
"""
return _ida_hexrays.get_merror_desc(*args)
def reg2mreg(*args):
"""
reg2mreg(reg) -> mreg_t
Map a processor register to microregister.
@param reg: processor register number (C++: int)
@return: microregister register id or mr_none
"""
return _ida_hexrays.reg2mreg(*args)
def mreg2reg(*args):
"""
mreg2reg(reg, width) -> int
Map a microregister to processor register.
@param reg: microregister number (C++: mreg_t)
@param width: size of microregister in bytes (C++: int)
@return: processor register id or -1
"""
return _ida_hexrays.mreg2reg(*args)
class optinsn_t(object):
"""
Proxy of C++ optinsn_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def func(self, *args):
"""
func(self, blk, ins) -> int
"""
return _ida_hexrays.optinsn_t_func(self, *args)
def install(self, *args):
"""
install(self)
"""
return _ida_hexrays.optinsn_t_install(self, *args)
def remove(self, *args):
"""
remove(self) -> bool
"""
return _ida_hexrays.optinsn_t_remove(self, *args)
__swig_destroy__ = _ida_hexrays.delete_optinsn_t
__del__ = lambda self : None;
def __init__(self, *args):
"""
__init__(self) -> optinsn_t
"""
if self.__class__ == optinsn_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_optinsn_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_optinsn_t(self)
return weakref_proxy(self)
optinsn_t_swigregister = _ida_hexrays.optinsn_t_swigregister
optinsn_t_swigregister(optinsn_t)
MUST_ACCESS = cvar.MUST_ACCESS
MAY_ACCESS = cvar.MAY_ACCESS
MAYMUST_ACCESS_MASK = cvar.MAYMUST_ACCESS_MASK
ONE_ACCESS_TYPE = cvar.ONE_ACCESS_TYPE
INCLUDE_SPOILED_REGS = cvar.INCLUDE_SPOILED_REGS
EXCLUDE_PASS_REGS = cvar.EXCLUDE_PASS_REGS
FULL_XDSU = cvar.FULL_XDSU
WITH_ASSERTS = cvar.WITH_ASSERTS
EXCLUDE_VOLATILE = cvar.EXCLUDE_VOLATILE
INCLUDE_UNUSED_SRC = cvar.INCLUDE_UNUSED_SRC
INCLUDE_DEAD_RETREGS = cvar.INCLUDE_DEAD_RETREGS
INCLUDE_RESTRICTED = cvar.INCLUDE_RESTRICTED
CALL_SPOILS_ONLY_ARGS = cvar.CALL_SPOILS_ONLY_ARGS
class optblock_t(object):
"""
Proxy of C++ optblock_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def func(self, *args):
"""
func(self, blk) -> int
"""
return _ida_hexrays.optblock_t_func(self, *args)
def install(self, *args):
"""
install(self)
"""
return _ida_hexrays.optblock_t_install(self, *args)
def remove(self, *args):
"""
remove(self) -> bool
"""
return _ida_hexrays.optblock_t_remove(self, *args)
__swig_destroy__ = _ida_hexrays.delete_optblock_t
__del__ = lambda self : None;
def __init__(self, *args):
"""
__init__(self) -> optblock_t
"""
if self.__class__ == optblock_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_optblock_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_optblock_t(self)
return weakref_proxy(self)
optblock_t_swigregister = _ida_hexrays.optblock_t_swigregister
optblock_t_swigregister(optblock_t)
m_nop = _ida_hexrays.m_nop
m_stx = _ida_hexrays.m_stx
m_ldx = _ida_hexrays.m_ldx
m_ldc = _ida_hexrays.m_ldc
m_mov = _ida_hexrays.m_mov
m_neg = _ida_hexrays.m_neg
m_lnot = _ida_hexrays.m_lnot
m_bnot = _ida_hexrays.m_bnot
m_xds = _ida_hexrays.m_xds
m_xdu = _ida_hexrays.m_xdu
m_low = _ida_hexrays.m_low
m_high = _ida_hexrays.m_high
m_add = _ida_hexrays.m_add
m_sub = _ida_hexrays.m_sub
m_mul = _ida_hexrays.m_mul
m_udiv = _ida_hexrays.m_udiv
m_sdiv = _ida_hexrays.m_sdiv
m_umod = _ida_hexrays.m_umod
m_smod = _ida_hexrays.m_smod
m_or = _ida_hexrays.m_or
m_and = _ida_hexrays.m_and
m_xor = _ida_hexrays.m_xor
m_shl = _ida_hexrays.m_shl
m_shr = _ida_hexrays.m_shr
m_sar = _ida_hexrays.m_sar
m_cfadd = _ida_hexrays.m_cfadd
m_ofadd = _ida_hexrays.m_ofadd
m_cfshl = _ida_hexrays.m_cfshl
m_cfshr = _ida_hexrays.m_cfshr
m_sets = _ida_hexrays.m_sets
m_seto = _ida_hexrays.m_seto
m_setp = _ida_hexrays.m_setp
m_setnz = _ida_hexrays.m_setnz
m_setz = _ida_hexrays.m_setz
m_setae = _ida_hexrays.m_setae
m_setb = _ida_hexrays.m_setb
m_seta = _ida_hexrays.m_seta
m_setbe = _ida_hexrays.m_setbe
m_setg = _ida_hexrays.m_setg
m_setge = _ida_hexrays.m_setge
m_setl = _ida_hexrays.m_setl
m_setle = _ida_hexrays.m_setle
m_jcnd = _ida_hexrays.m_jcnd
m_jnz = _ida_hexrays.m_jnz
m_jz = _ida_hexrays.m_jz
m_jae = _ida_hexrays.m_jae
m_jb = _ida_hexrays.m_jb
m_ja = _ida_hexrays.m_ja
m_jbe = _ida_hexrays.m_jbe
m_jg = _ida_hexrays.m_jg
m_jge = _ida_hexrays.m_jge
m_jl = _ida_hexrays.m_jl
m_jle = _ida_hexrays.m_jle
m_jtbl = _ida_hexrays.m_jtbl
m_ijmp = _ida_hexrays.m_ijmp
m_goto = _ida_hexrays.m_goto
m_call = _ida_hexrays.m_call
m_icall = _ida_hexrays.m_icall
m_ret = _ida_hexrays.m_ret
m_push = _ida_hexrays.m_push
m_pop = _ida_hexrays.m_pop
m_und = _ida_hexrays.m_und
m_ext = _ida_hexrays.m_ext
m_f2i = _ida_hexrays.m_f2i
m_f2u = _ida_hexrays.m_f2u
m_i2f = _ida_hexrays.m_i2f
m_u2f = _ida_hexrays.m_u2f
m_f2f = _ida_hexrays.m_f2f
m_fneg = _ida_hexrays.m_fneg
m_fadd = _ida_hexrays.m_fadd
m_fsub = _ida_hexrays.m_fsub
m_fmul = _ida_hexrays.m_fmul
m_fdiv = _ida_hexrays.m_fdiv
def must_mcode_close_block(*args):
"""
must_mcode_close_block(mcode, including_calls) -> bool
Must an instruction with the given opcode be the last one in a block?
Such opcodes are called closing opcodes.
@param mcode: instruction opcode (C++: mcode_t)
@param including_calls: should m_call/m_icall be considered as the
closing opcodes? If this function returns
true, the opcode cannot appear in the middle
of a block. Calls are a special case because
before MMAT_CALLS they are closing opcodes.
Afteer MMAT_CALLS that are not considered as
closing opcodes. (C++: bool)
"""
return _ida_hexrays.must_mcode_close_block(*args)
def is_mcode_propagatable(*args):
"""
is_mcode_propagatable(mcode) -> bool
May opcode be propagated? Such opcodes can be used in sub-instructions
(nested instructions) There is a handful of non-propagatable opcodes,
like jumps, ret, nop, etc All other regular opcodes are propagatable
and may appear in a nested instruction.
@param mcode (C++: mcode_t)
"""
return _ida_hexrays.is_mcode_propagatable(*args)
def is_mcode_addsub(*args):
"""
is_mcode_addsub(mcode) -> bool
"""
return _ida_hexrays.is_mcode_addsub(*args)
def is_mcode_xdsu(*args):
"""
is_mcode_xdsu(mcode) -> bool
"""
return _ida_hexrays.is_mcode_xdsu(*args)
def is_mcode_set(*args):
"""
is_mcode_set(mcode) -> bool
"""
return _ida_hexrays.is_mcode_set(*args)
def is_mcode_set1(*args):
"""
is_mcode_set1(mcode) -> bool
"""
return _ida_hexrays.is_mcode_set1(*args)
def is_mcode_j1(*args):
"""
is_mcode_j1(mcode) -> bool
"""
return _ida_hexrays.is_mcode_j1(*args)
def is_mcode_jcond(*args):
"""
is_mcode_jcond(mcode) -> bool
"""
return _ida_hexrays.is_mcode_jcond(*args)
def is_mcode_convertible_to_jmp(*args):
"""
is_mcode_convertible_to_jmp(mcode) -> bool
"""
return _ida_hexrays.is_mcode_convertible_to_jmp(*args)
def is_mcode_convertible_to_set(*args):
"""
is_mcode_convertible_to_set(mcode) -> bool
"""
return _ida_hexrays.is_mcode_convertible_to_set(*args)
def is_mcode_call(*args):
"""
is_mcode_call(mcode) -> bool
"""
return _ida_hexrays.is_mcode_call(*args)
def is_mcode_fpu(*args):
"""
is_mcode_fpu(mcode) -> bool
"""
return _ida_hexrays.is_mcode_fpu(*args)
def is_mcode_commutative(*args):
"""
is_mcode_commutative(mcode) -> bool
"""
return _ida_hexrays.is_mcode_commutative(*args)
def is_mcode_shift(*args):
"""
is_mcode_shift(mcode) -> bool
"""
return _ida_hexrays.is_mcode_shift(*args)
def is_mcode_divmod(*args):
"""
is_mcode_divmod(op) -> bool
"""
return _ida_hexrays.is_mcode_divmod(*args)
def set2jcnd(*args):
"""
set2jcnd(code) -> mcode_t
"""
return _ida_hexrays.set2jcnd(*args)
def jcnd2set(*args):
"""
jcnd2set(code) -> mcode_t
"""
return _ida_hexrays.jcnd2set(*args)
def negate_mcode_relation(*args):
"""
negate_mcode_relation(code) -> mcode_t
"""
return _ida_hexrays.negate_mcode_relation(*args)
def swap_mcode_relation(*args):
"""
swap_mcode_relation(code) -> mcode_t
"""
return _ida_hexrays.swap_mcode_relation(*args)
def get_signed_mcode(*args):
"""
get_signed_mcode(code) -> mcode_t
"""
return _ida_hexrays.get_signed_mcode(*args)
def get_unsigned_mcode(*args):
"""
get_unsigned_mcode(code) -> mcode_t
"""
return _ida_hexrays.get_unsigned_mcode(*args)
def is_signed_mcode(*args):
"""
is_signed_mcode(code) -> bool
"""
return _ida_hexrays.is_signed_mcode(*args)
def is_unsigned_mcode(*args):
"""
is_unsigned_mcode(code) -> bool
"""
return _ida_hexrays.is_unsigned_mcode(*args)
def mcode_modifies_d(*args):
"""
mcode_modifies_d(mcode) -> bool
"""
return _ida_hexrays.mcode_modifies_d(*args)
class operand_locator_t(object):
"""
Proxy of C++ operand_locator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.operand_locator_t_ea_get, _ida_hexrays.operand_locator_t_ea_set)
opnum = _swig_property(_ida_hexrays.operand_locator_t_opnum_get, _ida_hexrays.operand_locator_t_opnum_set)
def __init__(self, *args):
"""
__init__(self, _ea, _opnum) -> operand_locator_t
"""
this = _ida_hexrays.new_operand_locator_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.operand_locator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.operand_locator_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.operand_locator_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.operand_locator_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.operand_locator_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.operand_locator_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.operand_locator_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_operand_locator_t
__del__ = lambda self : None;
operand_locator_t_swigregister = _ida_hexrays.operand_locator_t_swigregister
operand_locator_t_swigregister(operand_locator_t)
mr_none = cvar.mr_none
mr_cf = cvar.mr_cf
mr_zf = cvar.mr_zf
mr_sf = cvar.mr_sf
mr_of = cvar.mr_of
mr_pf = cvar.mr_pf
cc_count = cvar.cc_count
mr_cc = cvar.mr_cc
mr_first = cvar.mr_first
class number_format_t(object):
"""
Proxy of C++ number_format_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
flags = _swig_property(_ida_hexrays.number_format_t_flags_get, _ida_hexrays.number_format_t_flags_set)
opnum = _swig_property(_ida_hexrays.number_format_t_opnum_get, _ida_hexrays.number_format_t_opnum_set)
props = _swig_property(_ida_hexrays.number_format_t_props_get, _ida_hexrays.number_format_t_props_set)
serial = _swig_property(_ida_hexrays.number_format_t_serial_get, _ida_hexrays.number_format_t_serial_set)
org_nbytes = _swig_property(_ida_hexrays.number_format_t_org_nbytes_get, _ida_hexrays.number_format_t_org_nbytes_set)
type_name = _swig_property(_ida_hexrays.number_format_t_type_name_get, _ida_hexrays.number_format_t_type_name_set)
def __init__(self, *args):
"""
__init__(self, _opnum=0) -> number_format_t
"""
this = _ida_hexrays.new_number_format_t(*args)
try: self.this.append(this)
except: self.this = this
def get_radix(self, *args):
"""
get_radix(self) -> int
"""
return _ida_hexrays.number_format_t_get_radix(self, *args)
def is_fixed(self, *args):
"""
is_fixed(self) -> bool
"""
return _ida_hexrays.number_format_t_is_fixed(self, *args)
def is_hex(self, *args):
"""
is_hex(self) -> bool
"""
return _ida_hexrays.number_format_t_is_hex(self, *args)
def is_dec(self, *args):
"""
is_dec(self) -> bool
"""
return _ida_hexrays.number_format_t_is_dec(self, *args)
def is_oct(self, *args):
"""
is_oct(self) -> bool
"""
return _ida_hexrays.number_format_t_is_oct(self, *args)
def is_enum(self, *args):
"""
is_enum(self) -> bool
"""
return _ida_hexrays.number_format_t_is_enum(self, *args)
def is_char(self, *args):
"""
is_char(self) -> bool
"""
return _ida_hexrays.number_format_t_is_char(self, *args)
def is_stroff(self, *args):
"""
is_stroff(self) -> bool
"""
return _ida_hexrays.number_format_t_is_stroff(self, *args)
def is_numop(self, *args):
"""
is_numop(self) -> bool
"""
return _ida_hexrays.number_format_t_is_numop(self, *args)
def needs_to_be_inverted(self, *args):
"""
needs_to_be_inverted(self) -> bool
"""
return _ida_hexrays.number_format_t_needs_to_be_inverted(self, *args)
__swig_destroy__ = _ida_hexrays.delete_number_format_t
__del__ = lambda self : None;
number_format_t_swigregister = _ida_hexrays.number_format_t_swigregister
number_format_t_swigregister(number_format_t)
NF_FIXED = _ida_hexrays.NF_FIXED
"""
number format has been defined by the user
"""
NF_NEGDONE = _ida_hexrays.NF_NEGDONE
"""
temporary internal bit: negation has been performed
"""
NF_BINVDONE = _ida_hexrays.NF_BINVDONE
"""
temporary internal bit: inverting bits is done
"""
NF_NEGATE = _ida_hexrays.NF_NEGATE
"""
The user asked to negate the constant.
"""
NF_BITNOT = _ida_hexrays.NF_BITNOT
"""
The user asked to invert bits of the constant.
"""
NF_STROFF = _ida_hexrays.NF_STROFF
"""
internal bit: used as stroff, valid iff 'is_stroff()'
"""
class vd_printer_t(object):
"""
Proxy of C++ vd_printer_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
tmpbuf = _swig_property(_ida_hexrays.vd_printer_t_tmpbuf_get, _ida_hexrays.vd_printer_t_tmpbuf_set)
hdrlines = _swig_property(_ida_hexrays.vd_printer_t_hdrlines_get, _ida_hexrays.vd_printer_t_hdrlines_set)
def _print(self, *args):
"""
_print(self, indent, format) -> int
"""
return _ida_hexrays.vd_printer_t__print(self, *args)
def __init__(self, *args):
"""
__init__(self) -> vd_printer_t
"""
if self.__class__ == vd_printer_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_vd_printer_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_vd_printer_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_vd_printer_t(self)
return weakref_proxy(self)
vd_printer_t_swigregister = _ida_hexrays.vd_printer_t_swigregister
vd_printer_t_swigregister(vd_printer_t)
class vc_printer_t(vd_printer_t):
"""
Proxy of C++ vc_printer_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
func = _swig_property(_ida_hexrays.vc_printer_t_func_get, _ida_hexrays.vc_printer_t_func_set)
lastchar = _swig_property(_ida_hexrays.vc_printer_t_lastchar_get, _ida_hexrays.vc_printer_t_lastchar_set)
def __init__(self, *args):
"""
__init__(self, f) -> vc_printer_t
"""
if self.__class__ == vc_printer_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_vc_printer_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def oneliner(self, *args):
"""
oneliner(self) -> bool
"""
return _ida_hexrays.vc_printer_t_oneliner(self, *args)
__swig_destroy__ = _ida_hexrays.delete_vc_printer_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_vc_printer_t(self)
return weakref_proxy(self)
vc_printer_t_swigregister = _ida_hexrays.vc_printer_t_swigregister
vc_printer_t_swigregister(vc_printer_t)
class qstring_printer_t(vc_printer_t):
"""
Proxy of C++ qstring_printer_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
with_tags = _swig_property(_ida_hexrays.qstring_printer_t_with_tags_get, _ida_hexrays.qstring_printer_t_with_tags_set)
s = _swig_property(_ida_hexrays.qstring_printer_t_s_get, _ida_hexrays.qstring_printer_t_s_set)
def _print(self, *args):
"""
_print(self, indent, format) -> int
"""
return _ida_hexrays.qstring_printer_t__print(self, *args)
def __init__(self, *args):
"""
__init__(self, f, tags) -> qstring_printer_t
"""
this = _ida_hexrays.new_qstring_printer_t(*args)
try: self.this.append(this)
except: self.this = this
def get_s(self, *args):
"""
get_s(self) -> qstring
"""
return _ida_hexrays.qstring_printer_t_get_s(self, *args)
s = property(lambda self: self.get_s())
qstring_printer_t_swigregister = _ida_hexrays.qstring_printer_t_swigregister
qstring_printer_t_swigregister(qstring_printer_t)
def dstr(*args):
"""
dstr(tif) -> char const *
Print the specified type info. This function can be used from a
debugger by typing "tif->dstr()"
@param tif (C++: const tinfo_t *)
"""
return _ida_hexrays.dstr(*args)
def is_type_correct(*args):
"""
is_type_correct(ptr) -> bool
Verify a type string.
@param ptr (C++: const type_t *)
@return: true if type string is correct
"""
return _ida_hexrays.is_type_correct(*args)
def is_small_udt(*args):
"""
is_small_udt(tif) -> bool
Is a small structure or union?
@param tif (C++: const tinfo_t &)
@return: true if the type is a small UDT (user defined type). Small
UDTs fit into a register (or pair or registers) as a rule.
"""
return _ida_hexrays.is_small_udt(*args)
def is_nonbool_type(*args):
"""
is_nonbool_type(type) -> bool
Is definitely a non-boolean type?
@param type (C++: const tinfo_t &)
@return: true if the type is a non-boolean type (non bool and well
defined)
"""
return _ida_hexrays.is_nonbool_type(*args)
def is_bool_type(*args):
"""
is_bool_type(type) -> bool
Is a boolean type?
@param type (C++: const tinfo_t &)
@return: true if the type is a boolean type
"""
return _ida_hexrays.is_bool_type(*args)
def is_ptr_or_array(*args):
"""
is_ptr_or_array(t) -> bool
Is a pointer or array type?
@param t (C++: type_t)
"""
return _ida_hexrays.is_ptr_or_array(*args)
def is_paf(*args):
"""
is_paf(t) -> bool
Is a pointer, array, or function type?
@param t (C++: type_t)
"""
return _ida_hexrays.is_paf(*args)
def is_inplace_def(*args):
"""
is_inplace_def(type) -> bool
Is struct/union/enum definition (not declaration)?
@param type (C++: const tinfo_t &)
"""
return _ida_hexrays.is_inplace_def(*args)
def partial_type_num(*args):
"""
partial_type_num(type) -> int
Calculate number of partial subtypes.
@param type (C++: const tinfo_t &)
@return: number of partial subtypes. The bigger is this number, the
uglier is the type.
"""
return _ida_hexrays.partial_type_num(*args)
def get_float_type(*args):
"""
get_float_type(width) -> tinfo_t
Get a type of a floating point value with the specified width
@param width: width of the desired type (C++: int)
@return: type info object
"""
return _ida_hexrays.get_float_type(*args)
def get_int_type_by_width_and_sign(*args):
"""
get_int_type_by_width_and_sign(srcwidth, sign) -> tinfo_t
Create a type info by width and sign. Returns a simple type (examples:
int, short) with the given width and sign.
@param srcwidth: size of the type in bytes (C++: int)
@param sign: sign of the type (C++: type_sign_t)
"""
return _ida_hexrays.get_int_type_by_width_and_sign(*args)
def get_unk_type(*args):
"""
get_unk_type(size) -> tinfo_t
Create a partial type info by width. Returns a partially defined type
(examples: _DWORD, _BYTE) with the given width.
@param size: size of the type in bytes (C++: int)
"""
return _ida_hexrays.get_unk_type(*args)
def dummy_ptrtype(*args):
"""
dummy_ptrtype(ptrsize, isfp) -> tinfo_t
Generate a dummy pointer type
@param ptrsize: size of pointed object (C++: int)
@param isfp: is floating point object? (C++: bool)
"""
return _ida_hexrays.dummy_ptrtype(*args)
def get_member_type(*args):
"""
get_member_type(mptr, type) -> bool
Get type of a structure field. This function performs validity checks
of the field type. Wrong types are rejected.
@param mptr: structure field (C++: const member_t *)
@param type: pointer to the variable where the type is returned. This
parameter can be NULL. (C++: tinfo_t *)
@return: false if failed
"""
return _ida_hexrays.get_member_type(*args)
def make_pointer(*args):
"""
make_pointer(type) -> tinfo_t
Create a pointer type. This function performs the following
conversion: "type" -> "type*"
@param type: object type. (C++: const tinfo_t &)
@return: "type*". for example, if 'char' is passed as the argument,
"""
return _ida_hexrays.make_pointer(*args)
def create_typedef(*args):
"""
create_typedef(name) -> tinfo_t
create_typedef(n) -> tinfo_t
Create a reference to a named type.
@param name: type name (C++: const char *)
@return: type which refers to the specified name. For example, if name
is "DWORD", the type info which refers to "DWORD" is created.
"""
return _ida_hexrays.create_typedef(*args)
GUESSED_NONE = _ida_hexrays.GUESSED_NONE
GUESSED_WEAK = _ida_hexrays.GUESSED_WEAK
GUESSED_FUNC = _ida_hexrays.GUESSED_FUNC
GUESSED_DATA = _ida_hexrays.GUESSED_DATA
TS_NOELL = _ida_hexrays.TS_NOELL
TS_SHRINK = _ida_hexrays.TS_SHRINK
TS_DONTREF = _ida_hexrays.TS_DONTREF
TS_MASK = _ida_hexrays.TS_MASK
def get_type(*args):
"""
get_type(id, tif, guess) -> bool
Get a global type. Global types are types of addressable objects and
struct/union/enum types
@param id: address or id of the object (C++: uval_t)
@param tif: buffer for the answer (C++: tinfo_t *)
@param guess: what kind of types to consider (C++: type_source_t)
@return: success
"""
return _ida_hexrays.get_type(*args)
def set_type(*args):
"""
set_type(id, tif, source, force=False) -> bool
Set a global type.
@param id: address or id of the object (C++: uval_t)
@param tif: new type info (C++: const tinfo_t &)
@param source: where the type comes from (C++: type_source_t)
@param force: true means to set the type as is, false means to merge
the new type with the possibly existing old type info.
(C++: bool)
@return: success
"""
return _ida_hexrays.set_type(*args)
class vdloc_t(ida_typeinf.argloc_t):
"""
Proxy of C++ vdloc_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def reg1(self, *args):
"""
reg1(self) -> int
"""
return _ida_hexrays.vdloc_t_reg1(self, *args)
def _set_reg1(self, *args):
"""
_set_reg1(self, r1)
"""
return _ida_hexrays.vdloc_t__set_reg1(self, *args)
def set_reg1(self, *args):
"""
set_reg1(self, r1)
"""
return _ida_hexrays.vdloc_t_set_reg1(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.vdloc_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.vdloc_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.vdloc_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.vdloc_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.vdloc_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.vdloc_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.vdloc_t_compare(self, *args)
def is_aliasable(self, *args):
"""
is_aliasable(self, mb, size) -> bool
"""
return _ida_hexrays.vdloc_t_is_aliasable(self, *args)
def __init__(self, *args):
"""
__init__(self) -> vdloc_t
"""
this = _ida_hexrays.new_vdloc_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_vdloc_t
__del__ = lambda self : None;
vdloc_t_swigregister = _ida_hexrays.vdloc_t_swigregister
vdloc_t_swigregister(vdloc_t)
def print_vdloc(*args):
"""
print_vdloc(loc, nbytes)
Print vdloc. Since vdloc does not always carry the size info, we pass
it as NBYTES..
@param loc (C++: const vdloc_t &)
@param nbytes (C++: int)
"""
return _ida_hexrays.print_vdloc(*args)
def arglocs_overlap(*args):
"""
arglocs_overlap(loc1, w1, loc2, w2) -> bool
Do two arglocs overlap?
@param loc1 (C++: const vdloc_t &)
@param w1 (C++: size_t)
@param loc2 (C++: const vdloc_t &)
@param w2 (C++: size_t)
"""
return _ida_hexrays.arglocs_overlap(*args)
class lvar_locator_t(object):
"""
Proxy of C++ lvar_locator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
location = _swig_property(_ida_hexrays.lvar_locator_t_location_get, _ida_hexrays.lvar_locator_t_location_set)
defea = _swig_property(_ida_hexrays.lvar_locator_t_defea_get, _ida_hexrays.lvar_locator_t_defea_set)
def __init__(self, *args):
"""
__init__(self) -> lvar_locator_t
__init__(self, loc, ea) -> lvar_locator_t
"""
this = _ida_hexrays.new_lvar_locator_t(*args)
try: self.this.append(this)
except: self.this = this
def get_stkoff(self, *args):
"""
get_stkoff(self) -> sval_t
"""
return _ida_hexrays.lvar_locator_t_get_stkoff(self, *args)
def is_reg1(self, *args):
"""
is_reg1(self) -> bool
"""
return _ida_hexrays.lvar_locator_t_is_reg1(self, *args)
def is_reg2(self, *args):
"""
is_reg2(self) -> bool
"""
return _ida_hexrays.lvar_locator_t_is_reg2(self, *args)
def is_reg_var(self, *args):
"""
is_reg_var(self) -> bool
"""
return _ida_hexrays.lvar_locator_t_is_reg_var(self, *args)
def is_stk_var(self, *args):
"""
is_stk_var(self) -> bool
"""
return _ida_hexrays.lvar_locator_t_is_stk_var(self, *args)
def is_scattered(self, *args):
"""
is_scattered(self) -> bool
"""
return _ida_hexrays.lvar_locator_t_is_scattered(self, *args)
def get_reg1(self, *args):
"""
get_reg1(self) -> mreg_t
"""
return _ida_hexrays.lvar_locator_t_get_reg1(self, *args)
def get_reg2(self, *args):
"""
get_reg2(self) -> mreg_t
"""
return _ida_hexrays.lvar_locator_t_get_reg2(self, *args)
def get_scattered(self, *args):
"""
get_scattered(self) -> scattered_aloc_t
get_scattered(self) -> scattered_aloc_t
"""
return _ida_hexrays.lvar_locator_t_get_scattered(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.lvar_locator_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.lvar_locator_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_locator_t
__del__ = lambda self : None;
lvar_locator_t_swigregister = _ida_hexrays.lvar_locator_t_swigregister
lvar_locator_t_swigregister(lvar_locator_t)
class lvar_t(lvar_locator_t):
"""
Proxy of C++ lvar_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
name = _swig_property(_ida_hexrays.lvar_t_name_get, _ida_hexrays.lvar_t_name_set)
cmt = _swig_property(_ida_hexrays.lvar_t_cmt_get, _ida_hexrays.lvar_t_cmt_set)
tif = _swig_property(_ida_hexrays.lvar_t_tif_get, _ida_hexrays.lvar_t_tif_set)
width = _swig_property(_ida_hexrays.lvar_t_width_get, _ida_hexrays.lvar_t_width_set)
defblk = _swig_property(_ida_hexrays.lvar_t_defblk_get, _ida_hexrays.lvar_t_defblk_set)
divisor = _swig_property(_ida_hexrays.lvar_t_divisor_get, _ida_hexrays.lvar_t_divisor_set)
def used(self, *args):
"""
used(self) -> bool
"""
return _ida_hexrays.lvar_t_used(self, *args)
def typed(self, *args):
"""
typed(self) -> bool
"""
return _ida_hexrays.lvar_t_typed(self, *args)
def mreg_done(self, *args):
"""
mreg_done(self) -> bool
"""
return _ida_hexrays.lvar_t_mreg_done(self, *args)
def has_nice_name(self, *args):
"""
has_nice_name(self) -> bool
"""
return _ida_hexrays.lvar_t_has_nice_name(self, *args)
def is_unknown_width(self, *args):
"""
is_unknown_width(self) -> bool
"""
return _ida_hexrays.lvar_t_is_unknown_width(self, *args)
def has_user_info(self, *args):
"""
has_user_info(self) -> bool
"""
return _ida_hexrays.lvar_t_has_user_info(self, *args)
def has_user_name(self, *args):
"""
has_user_name(self) -> bool
"""
return _ida_hexrays.lvar_t_has_user_name(self, *args)
def has_user_type(self, *args):
"""
has_user_type(self) -> bool
"""
return _ida_hexrays.lvar_t_has_user_type(self, *args)
def is_result_var(self, *args):
"""
is_result_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_result_var(self, *args)
def is_arg_var(self, *args):
"""
is_arg_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_arg_var(self, *args)
def is_fake_var(self, *args):
"""
is_fake_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_fake_var(self, *args)
def is_overlapped_var(self, *args):
"""
is_overlapped_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_overlapped_var(self, *args)
def is_floating_var(self, *args):
"""
is_floating_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_floating_var(self, *args)
def is_spoiled_var(self, *args):
"""
is_spoiled_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_spoiled_var(self, *args)
def is_noptr_var(self, *args):
"""
is_noptr_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_noptr_var(self, *args)
def is_mapdst_var(self, *args):
"""
is_mapdst_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_mapdst_var(self, *args)
def is_thisarg(self, *args):
"""
is_thisarg(self) -> bool
"""
return _ida_hexrays.lvar_t_is_thisarg(self, *args)
def is_forced_var(self, *args):
"""
is_forced_var(self) -> bool
"""
return _ida_hexrays.lvar_t_is_forced_var(self, *args)
def has_regname(self, *args):
"""
has_regname(self) -> bool
"""
return _ida_hexrays.lvar_t_has_regname(self, *args)
def is_dummy_arg(self, *args):
"""
is_dummy_arg(self) -> bool
"""
return _ida_hexrays.lvar_t_is_dummy_arg(self, *args)
def is_notarg(self, *args):
"""
is_notarg(self) -> bool
"""
return _ida_hexrays.lvar_t_is_notarg(self, *args)
def set_used(self, *args):
"""
set_used(self)
"""
return _ida_hexrays.lvar_t_set_used(self, *args)
def clear_used(self, *args):
"""
clear_used(self)
"""
return _ida_hexrays.lvar_t_clear_used(self, *args)
def set_typed(self, *args):
"""
set_typed(self)
"""
return _ida_hexrays.lvar_t_set_typed(self, *args)
def set_non_typed(self, *args):
"""
set_non_typed(self)
"""
return _ida_hexrays.lvar_t_set_non_typed(self, *args)
def clr_user_info(self, *args):
"""
clr_user_info(self)
"""
return _ida_hexrays.lvar_t_clr_user_info(self, *args)
def set_user_name(self, *args):
"""
set_user_name(self)
"""
return _ida_hexrays.lvar_t_set_user_name(self, *args)
def set_user_type(self, *args):
"""
set_user_type(self)
"""
return _ida_hexrays.lvar_t_set_user_type(self, *args)
def clr_user_type(self, *args):
"""
clr_user_type(self)
"""
return _ida_hexrays.lvar_t_clr_user_type(self, *args)
def clr_user_name(self, *args):
"""
clr_user_name(self)
"""
return _ida_hexrays.lvar_t_clr_user_name(self, *args)
def set_mreg_done(self, *args):
"""
set_mreg_done(self)
"""
return _ida_hexrays.lvar_t_set_mreg_done(self, *args)
def clr_mreg_done(self, *args):
"""
clr_mreg_done(self)
"""
return _ida_hexrays.lvar_t_clr_mreg_done(self, *args)
def set_unknown_width(self, *args):
"""
set_unknown_width(self)
"""
return _ida_hexrays.lvar_t_set_unknown_width(self, *args)
def clr_unknown_width(self, *args):
"""
clr_unknown_width(self)
"""
return _ida_hexrays.lvar_t_clr_unknown_width(self, *args)
def set_arg_var(self, *args):
"""
set_arg_var(self)
"""
return _ida_hexrays.lvar_t_set_arg_var(self, *args)
def clr_arg_var(self, *args):
"""
clr_arg_var(self)
"""
return _ida_hexrays.lvar_t_clr_arg_var(self, *args)
def set_fake_var(self, *args):
"""
set_fake_var(self)
"""
return _ida_hexrays.lvar_t_set_fake_var(self, *args)
def clr_fake_var(self, *args):
"""
clr_fake_var(self)
"""
return _ida_hexrays.lvar_t_clr_fake_var(self, *args)
def set_overlapped_var(self, *args):
"""
set_overlapped_var(self)
"""
return _ida_hexrays.lvar_t_set_overlapped_var(self, *args)
def clr_overlapped_var(self, *args):
"""
clr_overlapped_var(self)
"""
return _ida_hexrays.lvar_t_clr_overlapped_var(self, *args)
def set_floating_var(self, *args):
"""
set_floating_var(self)
"""
return _ida_hexrays.lvar_t_set_floating_var(self, *args)
def clr_floating_var(self, *args):
"""
clr_floating_var(self)
"""
return _ida_hexrays.lvar_t_clr_floating_var(self, *args)
def set_spoiled_var(self, *args):
"""
set_spoiled_var(self)
"""
return _ida_hexrays.lvar_t_set_spoiled_var(self, *args)
def clr_spoiled_var(self, *args):
"""
clr_spoiled_var(self)
"""
return _ida_hexrays.lvar_t_clr_spoiled_var(self, *args)
def set_mapdst_var(self, *args):
"""
set_mapdst_var(self)
"""
return _ida_hexrays.lvar_t_set_mapdst_var(self, *args)
def clr_mapdst_var(self, *args):
"""
clr_mapdst_var(self)
"""
return _ida_hexrays.lvar_t_clr_mapdst_var(self, *args)
def set_noptr_var(self, *args):
"""
set_noptr_var(self)
"""
return _ida_hexrays.lvar_t_set_noptr_var(self, *args)
def clr_noptr_var(self, *args):
"""
clr_noptr_var(self)
"""
return _ida_hexrays.lvar_t_clr_noptr_var(self, *args)
def set_thisarg(self, *args):
"""
set_thisarg(self)
"""
return _ida_hexrays.lvar_t_set_thisarg(self, *args)
def clr_thisarg(self, *args):
"""
clr_thisarg(self)
"""
return _ida_hexrays.lvar_t_clr_thisarg(self, *args)
def set_forced_var(self, *args):
"""
set_forced_var(self)
"""
return _ida_hexrays.lvar_t_set_forced_var(self, *args)
def clr_forced_var(self, *args):
"""
clr_forced_var(self)
"""
return _ida_hexrays.lvar_t_clr_forced_var(self, *args)
def set_dummy_arg(self, *args):
"""
set_dummy_arg(self)
"""
return _ida_hexrays.lvar_t_set_dummy_arg(self, *args)
def clr_dummy_arg(self, *args):
"""
clr_dummy_arg(self)
"""
return _ida_hexrays.lvar_t_clr_dummy_arg(self, *args)
def set_notarg(self, *args):
"""
set_notarg(self)
"""
return _ida_hexrays.lvar_t_set_notarg(self, *args)
def clr_notarg(self, *args):
"""
clr_notarg(self)
"""
return _ida_hexrays.lvar_t_clr_notarg(self, *args)
def has_common(self, *args):
"""
has_common(self, v) -> bool
"""
return _ida_hexrays.lvar_t_has_common(self, *args)
def has_common_bit(self, *args):
"""
has_common_bit(self, loc, width2) -> bool
"""
return _ida_hexrays.lvar_t_has_common_bit(self, *args)
def type(self, *args):
"""
type(self) -> tinfo_t
type(self) -> tinfo_t
"""
return _ida_hexrays.lvar_t_type(self, *args)
def accepts_type(self, *args):
"""
accepts_type(self, t, may_change_thisarg=False) -> bool
"""
return _ida_hexrays.lvar_t_accepts_type(self, *args)
def set_lvar_type(self, *args):
"""
set_lvar_type(self, t, may_fail=False) -> bool
"""
return _ida_hexrays.lvar_t_set_lvar_type(self, *args)
def set_final_lvar_type(self, *args):
"""
set_final_lvar_type(self, t)
"""
return _ida_hexrays.lvar_t_set_final_lvar_type(self, *args)
def set_width(self, *args):
"""
set_width(self, w, svw_flags=0) -> bool
"""
return _ida_hexrays.lvar_t_set_width(self, *args)
def append_list(self, *args):
"""
append_list(self, lst, pad_if_scattered=False)
"""
return _ida_hexrays.lvar_t_append_list(self, *args)
def is_aliasable(self, *args):
"""
is_aliasable(self, mba) -> bool
"""
return _ida_hexrays.lvar_t_is_aliasable(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_t
__del__ = lambda self : None;
lvar_t_swigregister = _ida_hexrays.lvar_t_swigregister
lvar_t_swigregister(lvar_t)
SVW_INT = _ida_hexrays.SVW_INT
SVW_FLOAT = _ida_hexrays.SVW_FLOAT
SVW_SOFT = _ida_hexrays.SVW_SOFT
class lvars_t(qvector_lvar_t):
"""
Proxy of C++ lvars_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def find_input_lvar(self, *args):
"""
find_input_lvar(self, argloc, _size) -> int
"""
return _ida_hexrays.lvars_t_find_input_lvar(self, *args)
def find_stkvar(self, *args):
"""
find_stkvar(self, spoff, width) -> int
"""
return _ida_hexrays.lvars_t_find_stkvar(self, *args)
def find(self, *args):
"""
find(self, ll) -> lvar_t
"""
return _ida_hexrays.lvars_t_find(self, *args)
def find_lvar(self, *args):
"""
find_lvar(self, location, width, defblk=-1) -> int
"""
return _ida_hexrays.lvars_t_find_lvar(self, *args)
def __init__(self, *args):
"""
__init__(self) -> lvars_t
"""
this = _ida_hexrays.new_lvars_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_lvars_t
__del__ = lambda self : None;
lvars_t_swigregister = _ida_hexrays.lvars_t_swigregister
lvars_t_swigregister(lvars_t)
class lvar_saved_info_t(object):
"""
Proxy of C++ lvar_saved_info_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ll = _swig_property(_ida_hexrays.lvar_saved_info_t_ll_get, _ida_hexrays.lvar_saved_info_t_ll_set)
name = _swig_property(_ida_hexrays.lvar_saved_info_t_name_get, _ida_hexrays.lvar_saved_info_t_name_set)
type = _swig_property(_ida_hexrays.lvar_saved_info_t_type_get, _ida_hexrays.lvar_saved_info_t_type_set)
cmt = _swig_property(_ida_hexrays.lvar_saved_info_t_cmt_get, _ida_hexrays.lvar_saved_info_t_cmt_set)
size = _swig_property(_ida_hexrays.lvar_saved_info_t_size_get, _ida_hexrays.lvar_saved_info_t_size_set)
flags = _swig_property(_ida_hexrays.lvar_saved_info_t_flags_get, _ida_hexrays.lvar_saved_info_t_flags_set)
def __init__(self, *args):
"""
__init__(self) -> lvar_saved_info_t
"""
this = _ida_hexrays.new_lvar_saved_info_t(*args)
try: self.this.append(this)
except: self.this = this
def has_info(self, *args):
"""
has_info(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_has_info(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.lvar_saved_info_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.lvar_saved_info_t___ne__(self, *args)
def is_kept(self, *args):
"""
is_kept(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_is_kept(self, *args)
def clear_keep(self, *args):
"""
clear_keep(self)
"""
return _ida_hexrays.lvar_saved_info_t_clear_keep(self, *args)
def set_keep(self, *args):
"""
set_keep(self)
"""
return _ida_hexrays.lvar_saved_info_t_set_keep(self, *args)
def is_forced_lvar(self, *args):
"""
is_forced_lvar(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_is_forced_lvar(self, *args)
def set_forced_lvar(self, *args):
"""
set_forced_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_set_forced_lvar(self, *args)
def clr_forced_lvar(self, *args):
"""
clr_forced_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_clr_forced_lvar(self, *args)
def is_noptr_lvar(self, *args):
"""
is_noptr_lvar(self) -> bool
"""
return _ida_hexrays.lvar_saved_info_t_is_noptr_lvar(self, *args)
def set_noptr_lvar(self, *args):
"""
set_noptr_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_set_noptr_lvar(self, *args)
def clr_noptr_lvar(self, *args):
"""
clr_noptr_lvar(self)
"""
return _ida_hexrays.lvar_saved_info_t_clr_noptr_lvar(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_saved_info_t
__del__ = lambda self : None;
lvar_saved_info_t_swigregister = _ida_hexrays.lvar_saved_info_t_swigregister
lvar_saved_info_t_swigregister(lvar_saved_info_t)
LVINF_KEEP = _ida_hexrays.LVINF_KEEP
"""
preserve saved user settings regardless of vars for example, if a var
loses all its user-defined attributes or even gets destroyed, keep its
'lvar_saved_info_t' . this is used for ephemeral variables that get
destroyed by macro recognition.
"""
LVINF_FORCE = _ida_hexrays.LVINF_FORCE
"""
force allocation of a new variable. forces the decompiler to create a
new variable at ll.defea
"""
LVINF_NOPTR = _ida_hexrays.LVINF_NOPTR
"""
variable type should not be a pointer
"""
class lvar_uservec_t(object):
"""
Proxy of C++ lvar_uservec_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
lvvec = _swig_property(_ida_hexrays.lvar_uservec_t_lvvec_get, _ida_hexrays.lvar_uservec_t_lvvec_set)
lmaps = _swig_property(_ida_hexrays.lvar_uservec_t_lmaps_get, _ida_hexrays.lvar_uservec_t_lmaps_set)
stkoff_delta = _swig_property(_ida_hexrays.lvar_uservec_t_stkoff_delta_get, _ida_hexrays.lvar_uservec_t_stkoff_delta_set)
ulv_flags = _swig_property(_ida_hexrays.lvar_uservec_t_ulv_flags_get, _ida_hexrays.lvar_uservec_t_ulv_flags_set)
def __init__(self, *args):
"""
__init__(self) -> lvar_uservec_t
"""
this = _ida_hexrays.new_lvar_uservec_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.lvar_uservec_t_swap(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.lvar_uservec_t_clear(self, *args)
def find_info(self, *args):
"""
find_info(self, vloc) -> lvar_saved_info_t
"""
return _ida_hexrays.lvar_uservec_t_find_info(self, *args)
def keep_info(self, *args):
"""
keep_info(self, v)
"""
return _ida_hexrays.lvar_uservec_t_keep_info(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_uservec_t
__del__ = lambda self : None;
lvar_uservec_t_swigregister = _ida_hexrays.lvar_uservec_t_swigregister
lvar_uservec_t_swigregister(lvar_uservec_t)
ULV_PRECISE_DEFEA = _ida_hexrays.ULV_PRECISE_DEFEA
"""
Use precise defea's for lvar locations.
"""
def restore_user_lvar_settings(*args):
"""
restore_user_lvar_settings(lvinf, func_ea) -> bool
Restore user defined local variable settings in the database.
@param lvinf: ptr to output buffer (C++: lvar_uservec_t *)
@param func_ea: entry address of the function (C++: ea_t)
@return: success
"""
return _ida_hexrays.restore_user_lvar_settings(*args)
def save_user_lvar_settings(*args):
"""
save_user_lvar_settings(func_ea, lvinf)
Save user defined local variable settings into the database.
@param func_ea: entry address of the function (C++: ea_t)
@param lvinf: user-specified info about local variables (C++: const
lvar_uservec_t &)
"""
return _ida_hexrays.save_user_lvar_settings(*args)
class user_lvar_modifier_t(object):
"""
Proxy of C++ user_lvar_modifier_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def modify_lvars(self, *args):
"""
modify_lvars(self, lvinf) -> bool
"""
return _ida_hexrays.user_lvar_modifier_t_modify_lvars(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_lvar_modifier_t
"""
if self.__class__ == user_lvar_modifier_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_user_lvar_modifier_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_lvar_modifier_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_user_lvar_modifier_t(self)
return weakref_proxy(self)
user_lvar_modifier_t_swigregister = _ida_hexrays.user_lvar_modifier_t_swigregister
user_lvar_modifier_t_swigregister(user_lvar_modifier_t)
def modify_user_lvars(*args):
"""
modify_user_lvars(entry_ea, mlv) -> bool
Modify saved local variable settings.
@param entry_ea: function start address (C++: ea_t)
@param mlv: local variable modifier (C++: user_lvar_modifier_t &)
@return: true if modified variables
"""
return _ida_hexrays.modify_user_lvars(*args)
class udcall_t(object):
"""
Proxy of C++ udcall_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
name = _swig_property(_ida_hexrays.udcall_t_name_get, _ida_hexrays.udcall_t_name_set)
tif = _swig_property(_ida_hexrays.udcall_t_tif_get, _ida_hexrays.udcall_t_tif_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.udcall_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.udcall_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.udcall_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.udcall_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.udcall_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.udcall_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.udcall_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> udcall_t
"""
this = _ida_hexrays.new_udcall_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_udcall_t
__del__ = lambda self : None;
udcall_t_swigregister = _ida_hexrays.udcall_t_swigregister
udcall_t_swigregister(udcall_t)
def restore_user_defined_calls(*args):
"""
restore_user_defined_calls(udcalls, func_ea) -> bool
Restore user defined function calls from the database.
@param udcalls: ptr to output buffer (C++: udcall_map_t *)
@param func_ea: entry address of the function (C++: ea_t)
@return: success
"""
return _ida_hexrays.restore_user_defined_calls(*args)
def save_user_defined_calls(*args):
"""
save_user_defined_calls(func_ea, udcalls)
Save user defined local function calls into the database.
@param func_ea: entry address of the function (C++: ea_t)
@param udcalls: user-specified info about user defined function calls
(C++: const udcall_map_t &)
"""
return _ida_hexrays.save_user_defined_calls(*args)
def parse_user_call(*args):
"""
parse_user_call(udc, decl, silent) -> bool
Convert function type declaration into internal structure
@param udc: - pointer to output structure (C++: udcall_t *)
@param decl: - function type declaration (C++: const char *)
@param silent: - if TRUE: do not show warning in case of incorrect
type (C++: bool)
@return: success
"""
return _ida_hexrays.parse_user_call(*args)
def convert_to_user_call(*args):
"""
convert_to_user_call(udc, cdg) -> merror_t
try to generate user-defined call for an instruction
@param udc (C++: const udcall_t &)
@param cdg (C++: codegen_t &)
@return: Microcode error codes code: MERR_OK - user-defined call
generated else - error (MERR_INSN == inacceptable udc.tif)
"""
return _ida_hexrays.convert_to_user_call(*args)
class microcode_filter_t(object):
"""
Proxy of C++ microcode_filter_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def match(self, *args):
"""
match(self, cdg) -> bool
"""
return _ida_hexrays.microcode_filter_t_match(self, *args)
def apply(self, *args):
"""
apply(self, cdg) -> merror_t
"""
return _ida_hexrays.microcode_filter_t_apply(self, *args)
def __init__(self, *args):
"""
__init__(self) -> microcode_filter_t
"""
if self.__class__ == microcode_filter_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_microcode_filter_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_microcode_filter_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_microcode_filter_t(self)
return weakref_proxy(self)
microcode_filter_t_swigregister = _ida_hexrays.microcode_filter_t_swigregister
microcode_filter_t_swigregister(microcode_filter_t)
def install_microcode_filter(*args):
"""
install_microcode_filter(filter, install=True)
register/unregister non-standard microcode generator
@param filter: - microcode generator object (C++: microcode_filter_t
*)
@param install: - TRUE - register the object, FALSE - unregister (C++:
bool)
"""
return _ida_hexrays.install_microcode_filter(*args)
class udc_filter_t(microcode_filter_t):
"""
Proxy of C++ udc_filter_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def match(self, *args):
"""
match(self, cdg) -> bool
"""
return _ida_hexrays.udc_filter_t_match(self, *args)
def init(self, *args):
"""
init(self, decl) -> bool
"""
return _ida_hexrays.udc_filter_t_init(self, *args)
def apply(self, *args):
"""
apply(self, cdg) -> merror_t
"""
return _ida_hexrays.udc_filter_t_apply(self, *args)
def __init__(self, *args):
"""
__init__(self) -> udc_filter_t
"""
if self.__class__ == udc_filter_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_udc_filter_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_udc_filter_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_udc_filter_t(self)
return weakref_proxy(self)
udc_filter_t_swigregister = _ida_hexrays.udc_filter_t_swigregister
udc_filter_t_swigregister(udc_filter_t)
class bitset_t(object):
"""
Proxy of C++ bitset_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> bitset_t
__init__(self, m) -> bitset_t
"""
this = _ida_hexrays.new_bitset_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_bitset_t
__del__ = lambda self : None;
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.bitset_t_swap(self, *args)
def copy(self, *args):
"""
copy(self, m) -> bitset_t
"""
return _ida_hexrays.bitset_t_copy(self, *args)
def add(self, *args):
"""
add(self, bit) -> bool
add(self, bit, width) -> bool
add(self, ml) -> bool
"""
return _ida_hexrays.bitset_t_add(self, *args)
def sub(self, *args):
"""
sub(self, bit) -> bool
sub(self, bit, width) -> bool
sub(self, ml) -> bool
"""
return _ida_hexrays.bitset_t_sub(self, *args)
def cut_at(self, *args):
"""
cut_at(self, maxbit) -> bool
"""
return _ida_hexrays.bitset_t_cut_at(self, *args)
def shift_down(self, *args):
"""
shift_down(self, shift)
"""
return _ida_hexrays.bitset_t_shift_down(self, *args)
def has(self, *args):
"""
has(self, bit) -> bool
"""
return _ida_hexrays.bitset_t_has(self, *args)
def has_all(self, *args):
"""
has_all(self, bit, width) -> bool
"""
return _ida_hexrays.bitset_t_has_all(self, *args)
def has_any(self, *args):
"""
has_any(self, bit, width) -> bool
"""
return _ida_hexrays.bitset_t_has_any(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.bitset_t_dstr(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.bitset_t_empty(self, *args)
def count(self, *args):
"""
count(self) -> int
count(self, bit) -> int
"""
return _ida_hexrays.bitset_t_count(self, *args)
def last(self, *args):
"""
last(self) -> int
"""
return _ida_hexrays.bitset_t_last(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.bitset_t_clear(self, *args)
def fill_with_ones(self, *args):
"""
fill_with_ones(self, maxbit)
"""
return _ida_hexrays.bitset_t_fill_with_ones(self, *args)
def has_common(self, *args):
"""
has_common(self, ml) -> bool
"""
return _ida_hexrays.bitset_t_has_common(self, *args)
def intersect(self, *args):
"""
intersect(self, ml) -> bool
"""
return _ida_hexrays.bitset_t_intersect(self, *args)
def is_subset_of(self, *args):
"""
is_subset_of(self, ml) -> bool
"""
return _ida_hexrays.bitset_t_is_subset_of(self, *args)
def includes(self, *args):
"""
includes(self, ml) -> bool
"""
return _ida_hexrays.bitset_t_includes(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.bitset_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.bitset_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.bitset_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.bitset_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.bitset_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.bitset_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.bitset_t_compare(self, *args)
def itat(self, *args):
"""
itat(self, n) -> bitset_t::iterator
"""
return _ida_hexrays.bitset_t_itat(self, *args)
def begin(self, *args):
"""
begin(self) -> bitset_t::iterator
"""
return _ida_hexrays.bitset_t_begin(self, *args)
def end(self, *args):
"""
end(self) -> bitset_t::iterator
"""
return _ida_hexrays.bitset_t_end(self, *args)
def front(self, *args):
"""
front(self) -> int
"""
return _ida_hexrays.bitset_t_front(self, *args)
def back(self, *args):
"""
back(self) -> int
"""
return _ida_hexrays.bitset_t_back(self, *args)
def inc(self, *args):
"""
inc(self, p, n=1)
"""
return _ida_hexrays.bitset_t_inc(self, *args)
def itv(self, *args):
"""
itv(self, it) -> int
"""
return _ida_hexrays.bitset_t_itv(self, *args)
__len__ = count
def __iter__(self):
it = self.begin()
for i in xrange(self.count()):
yield self.itv(it)
self.inc(it)
bitset_t_swigregister = _ida_hexrays.bitset_t_swigregister
bitset_t_swigregister(bitset_t)
bitset_width = cvar.bitset_width
bitset_align = cvar.bitset_align
bitset_shift = cvar.bitset_shift
class ivl_t(uval_ivl_t):
"""
Proxy of C++ ivl_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, _off, _size) -> ivl_t
"""
this = _ida_hexrays.new_ivl_t(*args)
try: self.this.append(this)
except: self.this = this
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.ivl_t_empty(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.ivl_t_clear(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.ivl_t_dstr(self, *args)
def extend_to_cover(self, *args):
"""
extend_to_cover(self, r) -> bool
"""
return _ida_hexrays.ivl_t_extend_to_cover(self, *args)
def intersect(self, *args):
"""
intersect(self, r)
"""
return _ida_hexrays.ivl_t_intersect(self, *args)
def overlap(self, *args):
"""
overlap(self, ivl) -> bool
"""
return _ida_hexrays.ivl_t_overlap(self, *args)
def includes(self, *args):
"""
includes(self, ivl) -> bool
"""
return _ida_hexrays.ivl_t_includes(self, *args)
def contains(self, *args):
"""
contains(self, off2) -> bool
"""
return _ida_hexrays.ivl_t_contains(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ivl_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ivl_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.ivl_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.ivl_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.ivl_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.ivl_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.ivl_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_ivl_t
__del__ = lambda self : None;
ivl_t_swigregister = _ida_hexrays.ivl_t_swigregister
ivl_t_swigregister(ivl_t)
class ivl_with_name_t(object):
"""
Proxy of C++ ivl_with_name_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ivl = _swig_property(_ida_hexrays.ivl_with_name_t_ivl_get, _ida_hexrays.ivl_with_name_t_ivl_set)
whole = _swig_property(_ida_hexrays.ivl_with_name_t_whole_get, _ida_hexrays.ivl_with_name_t_whole_set)
part = _swig_property(_ida_hexrays.ivl_with_name_t_part_get, _ida_hexrays.ivl_with_name_t_part_set)
def __init__(self, *args):
"""
__init__(self) -> ivl_with_name_t
"""
this = _ida_hexrays.new_ivl_with_name_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ivl_with_name_t
__del__ = lambda self : None;
ivl_with_name_t_swigregister = _ida_hexrays.ivl_with_name_t_swigregister
ivl_with_name_t_swigregister(ivl_with_name_t)
class ivlset_t(uval_ivl_ivlset_t):
"""
Proxy of C++ ivlset_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> ivlset_t
__init__(self, ivl) -> ivlset_t
"""
this = _ida_hexrays.new_ivlset_t(*args)
try: self.this.append(this)
except: self.this = this
def add(self, *args):
"""
add(self, ivl) -> bool
add(self, ea, size) -> bool
add(self, ivs) -> bool
"""
return _ida_hexrays.ivlset_t_add(self, *args)
def addmasked(self, *args):
"""
addmasked(self, ivs, mask) -> bool
"""
return _ida_hexrays.ivlset_t_addmasked(self, *args)
def sub(self, *args):
"""
sub(self, ivl) -> bool
sub(self, ea, size) -> bool
sub(self, ivs) -> bool
"""
return _ida_hexrays.ivlset_t_sub(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.ivlset_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.ivlset_t_dstr(self, *args)
def count(self, *args):
"""
count(self) -> asize_t
"""
return _ida_hexrays.ivlset_t_count(self, *args)
def has_common(self, *args):
"""
has_common(self, ivl, strict=False) -> bool
has_common(self, ivs) -> bool
"""
return _ida_hexrays.ivlset_t_has_common(self, *args)
def contains(self, *args):
"""
contains(self, off) -> bool
"""
return _ida_hexrays.ivlset_t_contains(self, *args)
def includes(self, *args):
"""
includes(self, ivs) -> bool
"""
return _ida_hexrays.ivlset_t_includes(self, *args)
def intersect(self, *args):
"""
intersect(self, ivs) -> bool
"""
return _ida_hexrays.ivlset_t_intersect(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ivlset_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ivlset_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.ivlset_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.ivlset_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.ivlset_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.ivlset_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.ivlset_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_ivlset_t
__del__ = lambda self : None;
ivlset_t_swigregister = _ida_hexrays.ivlset_t_swigregister
ivlset_t_swigregister(ivlset_t)
def get_mreg_name(*args):
"""
get_mreg_name(bit, width, ud=None) -> int
"""
return _ida_hexrays.get_mreg_name(*args)
class rlist_t(bitset_t):
"""
Proxy of C++ rlist_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> rlist_t
__init__(self, m) -> rlist_t
__init__(self, reg, width) -> rlist_t
"""
this = _ida_hexrays.new_rlist_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_rlist_t
__del__ = lambda self : None;
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.rlist_t_dstr(self, *args)
rlist_t_swigregister = _ida_hexrays.rlist_t_swigregister
rlist_t_swigregister(rlist_t)
class mlist_t(object):
"""
Proxy of C++ mlist_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
reg = _swig_property(_ida_hexrays.mlist_t_reg_get, _ida_hexrays.mlist_t_reg_set)
mem = _swig_property(_ida_hexrays.mlist_t_mem_get, _ida_hexrays.mlist_t_mem_set)
def __init__(self, *args):
"""
__init__(self) -> mlist_t
__init__(self, ivl) -> mlist_t
__init__(self, r, size) -> mlist_t
"""
this = _ida_hexrays.new_mlist_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.mlist_t_swap(self, *args)
def addmem(self, *args):
"""
addmem(self, ea, size) -> bool
"""
return _ida_hexrays.mlist_t_addmem(self, *args)
def add(self, *args):
"""
add(self, r, size) -> bool
add(self, r) -> bool
add(self, ivl) -> bool
add(self, lst) -> bool
"""
return _ida_hexrays.mlist_t_add(self, *args)
def sub(self, *args):
"""
sub(self, r, size) -> bool
sub(self, ivl) -> bool
sub(self, lst) -> bool
"""
return _ida_hexrays.mlist_t_sub(self, *args)
def count(self, *args):
"""
count(self) -> asize_t
"""
return _ida_hexrays.mlist_t_count(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.mlist_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.mlist_t_dstr(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.mlist_t_empty(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.mlist_t_clear(self, *args)
def has(self, *args):
"""
has(self, r) -> bool
"""
return _ida_hexrays.mlist_t_has(self, *args)
def has_all(self, *args):
"""
has_all(self, r, size) -> bool
"""
return _ida_hexrays.mlist_t_has_all(self, *args)
def has_any(self, *args):
"""
has_any(self, r, size) -> bool
"""
return _ida_hexrays.mlist_t_has_any(self, *args)
def has_memory(self, *args):
"""
has_memory(self) -> bool
"""
return _ida_hexrays.mlist_t_has_memory(self, *args)
def has_common(self, *args):
"""
has_common(self, lst) -> bool
"""
return _ida_hexrays.mlist_t_has_common(self, *args)
def includes(self, *args):
"""
includes(self, lst) -> bool
"""
return _ida_hexrays.mlist_t_includes(self, *args)
def intersect(self, *args):
"""
intersect(self, lst) -> bool
"""
return _ida_hexrays.mlist_t_intersect(self, *args)
def is_subset_of(self, *args):
"""
is_subset_of(self, lst) -> bool
"""
return _ida_hexrays.mlist_t_is_subset_of(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.mlist_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.mlist_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.mlist_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.mlist_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.mlist_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.mlist_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.mlist_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mlist_t
__del__ = lambda self : None;
mlist_t_swigregister = _ida_hexrays.mlist_t_swigregister
mlist_t_swigregister(mlist_t)
class simple_graph_t(object):
"""
Proxy of C++ simple_graph_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
title = _swig_property(_ida_hexrays.simple_graph_t_title_get, _ida_hexrays.simple_graph_t_title_set)
colored_gdl_edges = _swig_property(_ida_hexrays.simple_graph_t_colored_gdl_edges_get, _ida_hexrays.simple_graph_t_colored_gdl_edges_set)
simple_graph_t_swigregister = _ida_hexrays.simple_graph_t_swigregister
simple_graph_t_swigregister(simple_graph_t)
class op_parent_info_t(object):
"""
Proxy of C++ op_parent_info_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mba = _swig_property(_ida_hexrays.op_parent_info_t_mba_get, _ida_hexrays.op_parent_info_t_mba_set)
blk = _swig_property(_ida_hexrays.op_parent_info_t_blk_get, _ida_hexrays.op_parent_info_t_blk_set)
topins = _swig_property(_ida_hexrays.op_parent_info_t_topins_get, _ida_hexrays.op_parent_info_t_topins_set)
curins = _swig_property(_ida_hexrays.op_parent_info_t_curins_get, _ida_hexrays.op_parent_info_t_curins_set)
def __init__(self, *args):
"""
__init__(self, _mba=None, _blk=None, _topins=None) -> op_parent_info_t
"""
this = _ida_hexrays.new_op_parent_info_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_op_parent_info_t
__del__ = lambda self : None;
op_parent_info_t_swigregister = _ida_hexrays.op_parent_info_t_swigregister
op_parent_info_t_swigregister(op_parent_info_t)
class minsn_visitor_t(op_parent_info_t):
"""
Proxy of C++ minsn_visitor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, _mba=None, _blk=None, _topins=None) -> minsn_visitor_t
"""
if self.__class__ == minsn_visitor_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_minsn_visitor_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def visit_minsn(self, *args):
"""
visit_minsn(self) -> int
"""
return _ida_hexrays.minsn_visitor_t_visit_minsn(self, *args)
__swig_destroy__ = _ida_hexrays.delete_minsn_visitor_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_minsn_visitor_t(self)
return weakref_proxy(self)
minsn_visitor_t_swigregister = _ida_hexrays.minsn_visitor_t_swigregister
minsn_visitor_t_swigregister(minsn_visitor_t)
class mop_visitor_t(op_parent_info_t):
"""
Proxy of C++ mop_visitor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, _mba=None, _blk=None, _topins=None) -> mop_visitor_t
"""
if self.__class__ == mop_visitor_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_mop_visitor_t(_self, *args)
try: self.this.append(this)
except: self.this = this
prune = _swig_property(_ida_hexrays.mop_visitor_t_prune_get, _ida_hexrays.mop_visitor_t_prune_set)
def visit_mop(self, *args):
"""
visit_mop(self, op, type, is_target) -> int
"""
return _ida_hexrays.mop_visitor_t_visit_mop(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mop_visitor_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_mop_visitor_t(self)
return weakref_proxy(self)
mop_visitor_t_swigregister = _ida_hexrays.mop_visitor_t_swigregister
mop_visitor_t_swigregister(mop_visitor_t)
class scif_visitor_t(object):
"""
Proxy of C++ scif_visitor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def visit_scif_mop(self, *args):
"""
visit_scif_mop(self, r, off) -> int
"""
return _ida_hexrays.scif_visitor_t_visit_scif_mop(self, *args)
def __init__(self, *args):
"""
__init__(self) -> scif_visitor_t
"""
if self.__class__ == scif_visitor_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_scif_visitor_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_scif_visitor_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_scif_visitor_t(self)
return weakref_proxy(self)
scif_visitor_t_swigregister = _ida_hexrays.scif_visitor_t_swigregister
scif_visitor_t_swigregister(scif_visitor_t)
class mlist_mop_visitor_t(object):
"""
Proxy of C++ mlist_mop_visitor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
topins = _swig_property(_ida_hexrays.mlist_mop_visitor_t_topins_get, _ida_hexrays.mlist_mop_visitor_t_topins_set)
curins = _swig_property(_ida_hexrays.mlist_mop_visitor_t_curins_get, _ida_hexrays.mlist_mop_visitor_t_curins_set)
changed = _swig_property(_ida_hexrays.mlist_mop_visitor_t_changed_get, _ida_hexrays.mlist_mop_visitor_t_changed_set)
list = _swig_property(_ida_hexrays.mlist_mop_visitor_t_list_get, _ida_hexrays.mlist_mop_visitor_t_list_set)
def __init__(self, *args):
"""
__init__(self) -> mlist_mop_visitor_t
"""
if self.__class__ == mlist_mop_visitor_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_mlist_mop_visitor_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def visit_mop(self, *args):
"""
visit_mop(self, op) -> int
"""
return _ida_hexrays.mlist_mop_visitor_t_visit_mop(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mlist_mop_visitor_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_mlist_mop_visitor_t(self)
return weakref_proxy(self)
mlist_mop_visitor_t_swigregister = _ida_hexrays.mlist_mop_visitor_t_swigregister
mlist_mop_visitor_t_swigregister(mlist_mop_visitor_t)
class lvar_ref_t(object):
"""
Proxy of C++ lvar_ref_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mba = _swig_property(_ida_hexrays.lvar_ref_t_mba_get)
off = _swig_property(_ida_hexrays.lvar_ref_t_off_get, _ida_hexrays.lvar_ref_t_off_set)
idx = _swig_property(_ida_hexrays.lvar_ref_t_idx_get, _ida_hexrays.lvar_ref_t_idx_set)
def __init__(self, *args):
"""
__init__(self, m, i, o=0) -> lvar_ref_t
__init__(self, r) -> lvar_ref_t
"""
this = _ida_hexrays.new_lvar_ref_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.lvar_ref_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.lvar_ref_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.lvar_ref_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.lvar_ref_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.lvar_ref_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.lvar_ref_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.lvar_ref_t_compare(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.lvar_ref_t_swap(self, *args)
def var(self, *args):
"""
var(self) -> lvar_t
"""
return _ida_hexrays.lvar_ref_t_var(self, *args)
__swig_destroy__ = _ida_hexrays.delete_lvar_ref_t
__del__ = lambda self : None;
lvar_ref_t_swigregister = _ida_hexrays.lvar_ref_t_swigregister
lvar_ref_t_swigregister(lvar_ref_t)
mop_z = cvar.mop_z
mop_r = cvar.mop_r
mop_n = cvar.mop_n
mop_str = cvar.mop_str
mop_d = cvar.mop_d
mop_S = cvar.mop_S
mop_v = cvar.mop_v
mop_b = cvar.mop_b
mop_f = cvar.mop_f
mop_l = cvar.mop_l
mop_a = cvar.mop_a
mop_h = cvar.mop_h
mop_c = cvar.mop_c
mop_fn = cvar.mop_fn
mop_p = cvar.mop_p
mop_sc = cvar.mop_sc
NOSIZE = cvar.NOSIZE
class stkvar_ref_t(object):
"""
Proxy of C++ stkvar_ref_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mba = _swig_property(_ida_hexrays.stkvar_ref_t_mba_get)
off = _swig_property(_ida_hexrays.stkvar_ref_t_off_get, _ida_hexrays.stkvar_ref_t_off_set)
def __init__(self, *args):
"""
__init__(self, m, o) -> stkvar_ref_t
"""
this = _ida_hexrays.new_stkvar_ref_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.stkvar_ref_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.stkvar_ref_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.stkvar_ref_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.stkvar_ref_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.stkvar_ref_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.stkvar_ref_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.stkvar_ref_t_compare(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.stkvar_ref_t_swap(self, *args)
def get_stkvar(self, *args):
"""
get_stkvar(self, p_off=None) -> member_t *
"""
return _ida_hexrays.stkvar_ref_t_get_stkvar(self, *args)
__swig_destroy__ = _ida_hexrays.delete_stkvar_ref_t
__del__ = lambda self : None;
stkvar_ref_t_swigregister = _ida_hexrays.stkvar_ref_t_swigregister
stkvar_ref_t_swigregister(stkvar_ref_t)
class scif_t(vdloc_t):
"""
Proxy of C++ scif_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mba = _swig_property(_ida_hexrays.scif_t_mba_get, _ida_hexrays.scif_t_mba_set)
name = _swig_property(_ida_hexrays.scif_t_name_get, _ida_hexrays.scif_t_name_set)
type = _swig_property(_ida_hexrays.scif_t_type_get, _ida_hexrays.scif_t_type_set)
def __init__(self, *args):
"""
__init__(self, _mba, n, tif) -> scif_t
"""
this = _ida_hexrays.new_scif_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_scif_t
__del__ = lambda self : None;
scif_t_swigregister = _ida_hexrays.scif_t_swigregister
scif_t_swigregister(scif_t)
class mnumber_t(operand_locator_t):
"""
Proxy of C++ mnumber_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
value = _swig_property(_ida_hexrays.mnumber_t_value_get, _ida_hexrays.mnumber_t_value_set)
org_value = _swig_property(_ida_hexrays.mnumber_t_org_value_get, _ida_hexrays.mnumber_t_org_value_set)
def __init__(self, *args):
"""
__init__(self, v, _ea=BADADDR, n=0) -> mnumber_t
"""
this = _ida_hexrays.new_mnumber_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.mnumber_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.mnumber_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.mnumber_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.mnumber_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.mnumber_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.mnumber_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.mnumber_t_compare(self, *args)
def update_value(self, *args):
"""
update_value(self, val64)
"""
return _ida_hexrays.mnumber_t_update_value(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mnumber_t
__del__ = lambda self : None;
mnumber_t_swigregister = _ida_hexrays.mnumber_t_swigregister
mnumber_t_swigregister(mnumber_t)
class fnumber_t(object):
"""
Proxy of C++ fnumber_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
fnum = _swig_property(_ida_hexrays.fnumber_t_fnum_get, _ida_hexrays.fnumber_t_fnum_set)
nbytes = _swig_property(_ida_hexrays.fnumber_t_nbytes_get, _ida_hexrays.fnumber_t_nbytes_set)
def dereference_uint16(self, *args):
"""
dereference_uint16(self) -> uint16 *
"""
return _ida_hexrays.fnumber_t_dereference_uint16(self, *args)
def dereference_const_uint16(self, *args):
"""
dereference_const_uint16(self) -> uint16 const *
"""
return _ida_hexrays.fnumber_t_dereference_const_uint16(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.fnumber_t__print(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.fnumber_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.fnumber_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.fnumber_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.fnumber_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.fnumber_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.fnumber_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.fnumber_t_compare(self, *args)
def __get_fnum(self, *args):
"""
__get_fnum(self) -> fnum_array
"""
return _ida_hexrays.fnumber_t___get_fnum(self, *args)
fnum = property(__get_fnum)
def __init__(self, *args):
"""
__init__(self) -> fnumber_t
"""
this = _ida_hexrays.new_fnumber_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_fnumber_t
__del__ = lambda self : None;
fnumber_t_swigregister = _ida_hexrays.fnumber_t_swigregister
fnumber_t_swigregister(fnumber_t)
SHINS_NUMADDR = _ida_hexrays.SHINS_NUMADDR
"""
display definition addresses for numbers
"""
SHINS_VALNUM = _ida_hexrays.SHINS_VALNUM
"""
display value numbers
"""
SHINS_SHORT = _ida_hexrays.SHINS_SHORT
"""
do not display use-def chains and other attrs
"""
SHINS_LDXEA = _ida_hexrays.SHINS_LDXEA
"""
display address of ldx expressions (not used)
"""
NO_SIDEFF = _ida_hexrays.NO_SIDEFF
WITH_SIDEFF = _ida_hexrays.WITH_SIDEFF
ONLY_SIDEFF = _ida_hexrays.ONLY_SIDEFF
ANY_REGSIZE = _ida_hexrays.ANY_REGSIZE
class mop_t(object):
"""
Proxy of C++ mop_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
t = _swig_property(_ida_hexrays.mop_t_t_get, _ida_hexrays.mop_t_t_set)
oprops = _swig_property(_ida_hexrays.mop_t_oprops_get, _ida_hexrays.mop_t_oprops_set)
valnum = _swig_property(_ida_hexrays.mop_t_valnum_get, _ida_hexrays.mop_t_valnum_set)
size = _swig_property(_ida_hexrays.mop_t_size_get, _ida_hexrays.mop_t_size_set)
def set_impptr_done(self, *args):
"""
set_impptr_done(self)
"""
return _ida_hexrays.mop_t_set_impptr_done(self, *args)
def set_udt(self, *args):
"""
set_udt(self)
"""
return _ida_hexrays.mop_t_set_udt(self, *args)
def set_undef_val(self, *args):
"""
set_undef_val(self)
"""
return _ida_hexrays.mop_t_set_undef_val(self, *args)
def is_impptr_done(self, *args):
"""
is_impptr_done(self) -> bool
"""
return _ida_hexrays.mop_t_is_impptr_done(self, *args)
def is_udt(self, *args):
"""
is_udt(self) -> bool
"""
return _ida_hexrays.mop_t_is_udt(self, *args)
def probably_floating(self, *args):
"""
probably_floating(self) -> bool
"""
return _ida_hexrays.mop_t_probably_floating(self, *args)
def is_ccflags(self, *args):
"""
is_ccflags(self) -> bool
"""
return _ida_hexrays.mop_t_is_ccflags(self, *args)
def is_undef_val(self, *args):
"""
is_undef_val(self) -> bool
"""
return _ida_hexrays.mop_t_is_undef_val(self, *args)
def __init__(self, *args):
"""
__init__(self) -> mop_t
__init__(self, rop) -> mop_t
__init__(self, _r, _s) -> mop_t
"""
this = _ida_hexrays.new_mop_t(*args)
try: self.this.append(this)
except: self.this = this
def assign(self, *args):
"""
assign(self, rop) -> mop_t
"""
return _ida_hexrays.mop_t_assign(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mop_t
__del__ = lambda self : None;
def zero(self, *args):
"""
zero(self)
"""
return _ida_hexrays.mop_t_zero(self, *args)
def swap(self, *args):
"""
swap(self, rop)
"""
return _ida_hexrays.mop_t_swap(self, *args)
def erase(self, *args):
"""
erase(self)
"""
return _ida_hexrays.mop_t_erase(self, *args)
def erase_but_keep_size(self, *args):
"""
erase_but_keep_size(self)
"""
return _ida_hexrays.mop_t_erase_but_keep_size(self, *args)
def _print(self, *args):
"""
_print(self, shins_flags=0x04|0x02)
"""
return _ida_hexrays.mop_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.mop_t_dstr(self, *args)
def create_from_mlist(self, *args):
"""
create_from_mlist(self, mba, lst, fullsize) -> bool
"""
return _ida_hexrays.mop_t_create_from_mlist(self, *args)
def create_from_ivlset(self, *args):
"""
create_from_ivlset(self, mba, ivs, fullsize) -> bool
"""
return _ida_hexrays.mop_t_create_from_ivlset(self, *args)
def create_from_vdloc(self, *args):
"""
create_from_vdloc(self, mba, loc, _size)
"""
return _ida_hexrays.mop_t_create_from_vdloc(self, *args)
def create_from_scattered_vdloc(self, *args):
"""
create_from_scattered_vdloc(self, mba, name, type, loc)
"""
return _ida_hexrays.mop_t_create_from_scattered_vdloc(self, *args)
def create_from_insn(self, *args):
"""
create_from_insn(self, m)
"""
return _ida_hexrays.mop_t_create_from_insn(self, *args)
def make_number(self, *args):
"""
make_number(self, _value, _size, _ea=BADADDR, opnum=0)
"""
return _ida_hexrays.mop_t_make_number(self, *args)
def make_fpnum(self, *args):
"""
make_fpnum(self, bytes, _size) -> bool
"""
return _ida_hexrays.mop_t_make_fpnum(self, *args)
def _make_reg(self, *args):
"""
_make_reg(self, reg)
_make_reg(self, reg, _size)
"""
return _ida_hexrays.mop_t__make_reg(self, *args)
def make_reg(self, *args):
"""
make_reg(self, reg)
make_reg(self, reg, _size)
"""
return _ida_hexrays.mop_t_make_reg(self, *args)
def _make_lvar(self, *args):
"""
_make_lvar(self, mba, idx, off=0)
"""
return _ida_hexrays.mop_t__make_lvar(self, *args)
def _make_gvar(self, *args):
"""
_make_gvar(self, ea)
"""
return _ida_hexrays.mop_t__make_gvar(self, *args)
def make_gvar(self, *args):
"""
make_gvar(self, ea)
"""
return _ida_hexrays.mop_t_make_gvar(self, *args)
def _make_stkvar(self, *args):
"""
_make_stkvar(self, mba, off)
"""
return _ida_hexrays.mop_t__make_stkvar(self, *args)
def make_reg_pair(self, *args):
"""
make_reg_pair(self, loreg, hireg, halfsize)
"""
return _ida_hexrays.mop_t_make_reg_pair(self, *args)
def _make_insn(self, *args):
"""
_make_insn(self, ins)
"""
return _ida_hexrays.mop_t__make_insn(self, *args)
def make_insn(self, *args):
"""
make_insn(self, ins)
"""
return _ida_hexrays.mop_t_make_insn(self, *args)
def _make_blkref(self, *args):
"""
_make_blkref(self, blknum)
"""
return _ida_hexrays.mop_t__make_blkref(self, *args)
def make_blkref(self, *args):
"""
make_blkref(self, blknum)
"""
return _ida_hexrays.mop_t_make_blkref(self, *args)
def make_helper(self, *args):
"""
make_helper(self, name)
"""
return _ida_hexrays.mop_t_make_helper(self, *args)
def _make_strlit(self, *args):
"""
_make_strlit(self, str)
"""
return _ida_hexrays.mop_t__make_strlit(self, *args)
def _make_callinfo(self, *args):
"""
_make_callinfo(self, fi)
"""
return _ida_hexrays.mop_t__make_callinfo(self, *args)
def _make_cases(self, *args):
"""
_make_cases(self, _cases)
"""
return _ida_hexrays.mop_t__make_cases(self, *args)
def _make_pair(self, *args):
"""
_make_pair(self, _pair)
"""
return _ida_hexrays.mop_t__make_pair(self, *args)
def is_reg(self, *args):
"""
is_reg(self) -> bool
is_reg(self, _r) -> bool
is_reg(self, _r, _size) -> bool
"""
return _ida_hexrays.mop_t_is_reg(self, *args)
def is_cc(self, *args):
"""
is_cc(self) -> bool
"""
return _ida_hexrays.mop_t_is_cc(self, *args)
def is_bit_reg(self, *args):
"""
is_bit_reg(self, reg) -> bool
is_bit_reg(self) -> bool
"""
return _ida_hexrays.mop_t_is_bit_reg(self, *args)
def is_kreg(self, *args):
"""
is_kreg(self) -> bool
"""
return _ida_hexrays.mop_t_is_kreg(self, *args)
def is_mob(self, *args):
"""
is_mob(self, serial) -> bool
"""
return _ida_hexrays.mop_t_is_mob(self, *args)
def is_scattered(self, *args):
"""
is_scattered(self) -> bool
"""
return _ida_hexrays.mop_t_is_scattered(self, *args)
def is_glbaddr(self, *args):
"""
is_glbaddr(self) -> bool
is_glbaddr(self, ea) -> bool
"""
return _ida_hexrays.mop_t_is_glbaddr(self, *args)
def is_stkaddr(self, *args):
"""
is_stkaddr(self) -> bool
"""
return _ida_hexrays.mop_t_is_stkaddr(self, *args)
def is_insn(self, *args):
"""
is_insn(self) -> bool
is_insn(self, code) -> bool
"""
return _ida_hexrays.mop_t_is_insn(self, *args)
def has_side_effects(self, *args):
"""
has_side_effects(self, include_ldx_and_divs=False) -> bool
"""
return _ida_hexrays.mop_t_has_side_effects(self, *args)
def may_use_aliased_memory(self, *args):
"""
may_use_aliased_memory(self) -> bool
"""
return _ida_hexrays.mop_t_may_use_aliased_memory(self, *args)
def is01(self, *args):
"""
is01(self) -> bool
"""
return _ida_hexrays.mop_t_is01(self, *args)
def is_sign_extended_from(self, *args):
"""
is_sign_extended_from(self, nbytes) -> bool
"""
return _ida_hexrays.mop_t_is_sign_extended_from(self, *args)
def is_zero_extended_from(self, *args):
"""
is_zero_extended_from(self, nbytes) -> bool
"""
return _ida_hexrays.mop_t_is_zero_extended_from(self, *args)
def is_extended_from(self, *args):
"""
is_extended_from(self, nbytes, is_signed) -> bool
"""
return _ida_hexrays.mop_t_is_extended_from(self, *args)
def equal_mops(self, *args):
"""
equal_mops(self, rop, eqflags) -> bool
"""
return _ida_hexrays.mop_t_equal_mops(self, *args)
def __eq__(self, *args):
"""
__eq__(self, rop) -> bool
"""
return _ida_hexrays.mop_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, rop) -> bool
"""
return _ida_hexrays.mop_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, rop) -> bool
"""
return _ida_hexrays.mop_t___lt__(self, *args)
def lexcompare(self, *args):
"""
lexcompare(self, rop) -> int
"""
return _ida_hexrays.mop_t_lexcompare(self, *args)
def for_all_ops(self, *args):
"""
for_all_ops(self, mv, type=None, is_target=False) -> int
"""
return _ida_hexrays.mop_t_for_all_ops(self, *args)
def for_all_scattered_submops(self, *args):
"""
for_all_scattered_submops(self, sv) -> int
"""
return _ida_hexrays.mop_t_for_all_scattered_submops(self, *args)
def value(self, *args):
"""
value(self, is_signed) -> uint64
"""
return _ida_hexrays.mop_t_value(self, *args)
def signed_value(self, *args):
"""
signed_value(self) -> int64
"""
return _ida_hexrays.mop_t_signed_value(self, *args)
def unsigned_value(self, *args):
"""
unsigned_value(self) -> uint64
"""
return _ida_hexrays.mop_t_unsigned_value(self, *args)
def is_constant(self, *args):
"""
is_constant(self, is_signed=True) -> bool
"""
return _ida_hexrays.mop_t_is_constant(self, *args)
def is_equal_to(self, *args):
"""
is_equal_to(self, n, is_signed=True) -> bool
"""
return _ida_hexrays.mop_t_is_equal_to(self, *args)
def is_zero(self, *args):
"""
is_zero(self) -> bool
"""
return _ida_hexrays.mop_t_is_zero(self, *args)
def is_one(self, *args):
"""
is_one(self) -> bool
"""
return _ida_hexrays.mop_t_is_one(self, *args)
def is_positive_constant(self, *args):
"""
is_positive_constant(self) -> bool
"""
return _ida_hexrays.mop_t_is_positive_constant(self, *args)
def is_negative_constant(self, *args):
"""
is_negative_constant(self) -> bool
"""
return _ida_hexrays.mop_t_is_negative_constant(self, *args)
def get_stkvar(self, *args):
"""
get_stkvar(self, p_off) -> member_t *
"""
return _ida_hexrays.mop_t_get_stkvar(self, *args)
def get_stkoff(self, *args):
"""
get_stkoff(self, p_off) -> bool
"""
return _ida_hexrays.mop_t_get_stkoff(self, *args)
def get_insn(self, *args):
"""
get_insn(self, code) -> minsn_t
get_insn(self, code) -> minsn_t
"""
return _ida_hexrays.mop_t_get_insn(self, *args)
def make_low_half(self, *args):
"""
make_low_half(self, width) -> bool
"""
return _ida_hexrays.mop_t_make_low_half(self, *args)
def make_high_half(self, *args):
"""
make_high_half(self, width) -> bool
"""
return _ida_hexrays.mop_t_make_high_half(self, *args)
def make_first_half(self, *args):
"""
make_first_half(self, width) -> bool
"""
return _ida_hexrays.mop_t_make_first_half(self, *args)
def make_second_half(self, *args):
"""
make_second_half(self, width) -> bool
"""
return _ida_hexrays.mop_t_make_second_half(self, *args)
def shift_mop(self, *args):
"""
shift_mop(self, offset) -> bool
"""
return _ida_hexrays.mop_t_shift_mop(self, *args)
def change_size(self, *args):
"""
change_size(self, nsize, sideff=WITH_SIDEFF) -> bool
"""
return _ida_hexrays.mop_t_change_size(self, *args)
def double_size(self, *args):
"""
double_size(self, sideff=WITH_SIDEFF) -> bool
"""
return _ida_hexrays.mop_t_double_size(self, *args)
def preserve_side_effects(self, *args):
"""
preserve_side_effects(self, blk, top, moved_calls=None) -> bool
"""
return _ida_hexrays.mop_t_preserve_side_effects(self, *args)
def apply_ld_mcode(self, *args):
"""
apply_ld_mcode(self, mcode, ea, newsize)
"""
return _ida_hexrays.mop_t_apply_ld_mcode(self, *args)
def apply_xdu(self, *args):
"""
apply_xdu(self, ea, newsize)
"""
return _ida_hexrays.mop_t_apply_xdu(self, *args)
def apply_xds(self, *args):
"""
apply_xds(self, ea, newsize)
"""
return _ida_hexrays.mop_t_apply_xds(self, *args)
def _register(self, *args):
"""
_register(self)
"""
return _ida_hexrays.mop_t__register(self, *args)
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.mop_t__deregister(self, *args)
def _get_r(self, *args):
"""
_get_r(self) -> mreg_t
"""
return _ida_hexrays.mop_t__get_r(self, *args)
def _set_r(self, *args):
"""
_set_r(self, _v)
"""
return _ida_hexrays.mop_t__set_r(self, *args)
r = property( lambda self: self._get_r() if self.t == mop_r else None, lambda self, v: self._ensure_cond(self.t == mop_r,"self.t == mop_r") and self._set_r(v))
def _get_nnn(self, *args):
"""
_get_nnn(self) -> mnumber_t
"""
return _ida_hexrays.mop_t__get_nnn(self, *args)
def _set_nnn(self, *args):
"""
_set_nnn(self, _v)
"""
return _ida_hexrays.mop_t__set_nnn(self, *args)
nnn = property( lambda self: self._get_nnn() if self.t == mop_n else None, lambda self, v: self._ensure_cond(self.t == mop_n,"self.t == mop_n") and self._ensure_no_obj(self._get_nnn(),"nnn", True) and self._acquire_ownership(v, True) and self._set_nnn(v))
def _get_cstr(self, *args):
"""
_get_cstr(self) -> char const *
"""
return _ida_hexrays.mop_t__get_cstr(self, *args)
def _set_cstr(self, *args):
"""
_set_cstr(self, _v)
"""
return _ida_hexrays.mop_t__set_cstr(self, *args)
cstr = property( lambda self: self._get_cstr() if self.t == mop_str else None, lambda self, v: self._ensure_cond(self.t == mop_str,"self.t == mop_str") and self._ensure_no_obj(self._get_cstr(),"cstr", False) and self._acquire_ownership(v, False) and self._set_cstr(v))
def _get_d(self, *args):
"""
_get_d(self) -> minsn_t
"""
return _ida_hexrays.mop_t__get_d(self, *args)
def _set_d(self, *args):
"""
_set_d(self, _v)
"""
return _ida_hexrays.mop_t__set_d(self, *args)
d = property( lambda self: self._get_d() if self.t == mop_d else None, lambda self, v: self._ensure_cond(self.t == mop_d,"self.t == mop_d") and self._ensure_no_obj(self._get_d(),"d", True) and self._acquire_ownership(v, True) and self._set_d(v))
def _get_s(self, *args):
"""
_get_s(self) -> stkvar_ref_t
"""
return _ida_hexrays.mop_t__get_s(self, *args)
def _set_s(self, *args):
"""
_set_s(self, _v)
"""
return _ida_hexrays.mop_t__set_s(self, *args)
s = property( lambda self: self._get_s() if self.t == mop_S else None, lambda self, v: self._ensure_cond(self.t == mop_S,"self.t == mop_S") and self._ensure_no_obj(self._get_s(),"s", True) and self._acquire_ownership(v, True) and self._set_s(v))
def _get_g(self, *args):
"""
_get_g(self) -> ea_t
"""
return _ida_hexrays.mop_t__get_g(self, *args)
def _set_g(self, *args):
"""
_set_g(self, _v)
"""
return _ida_hexrays.mop_t__set_g(self, *args)
g = property( lambda self: self._get_g() if self.t == mop_v else None, lambda self, v: self._ensure_cond(self.t == mop_v,"self.t == mop_v") and self._set_g(v))
def _get_b(self, *args):
"""
_get_b(self) -> int
"""
return _ida_hexrays.mop_t__get_b(self, *args)
def _set_b(self, *args):
"""
_set_b(self, _v)
"""
return _ida_hexrays.mop_t__set_b(self, *args)
b = property( lambda self: self._get_b() if self.t == mop_b else None, lambda self, v: self._ensure_cond(self.t == mop_b,"self.t == mop_b") and self._set_b(v))
def _get_f(self, *args):
"""
_get_f(self) -> mcallinfo_t
"""
return _ida_hexrays.mop_t__get_f(self, *args)
def _set_f(self, *args):
"""
_set_f(self, _v)
"""
return _ida_hexrays.mop_t__set_f(self, *args)
f = property( lambda self: self._get_f() if self.t == mop_f else None, lambda self, v: self._ensure_cond(self.t == mop_f,"self.t == mop_f") and self._ensure_no_obj(self._get_f(),"f", True) and self._acquire_ownership(v, True) and self._set_f(v))
def _get_l(self, *args):
"""
_get_l(self) -> lvar_ref_t
"""
return _ida_hexrays.mop_t__get_l(self, *args)
def _set_l(self, *args):
"""
_set_l(self, _v)
"""
return _ida_hexrays.mop_t__set_l(self, *args)
l = property( lambda self: self._get_l() if self.t == mop_l else None, lambda self, v: self._ensure_cond(self.t == mop_l,"self.t == mop_l") and self._ensure_no_obj(self._get_l(),"l", True) and self._acquire_ownership(v, True) and self._set_l(v))
def _get_a(self, *args):
"""
_get_a(self) -> mop_addr_t
"""
return _ida_hexrays.mop_t__get_a(self, *args)
def _set_a(self, *args):
"""
_set_a(self, _v)
"""
return _ida_hexrays.mop_t__set_a(self, *args)
a = property( lambda self: self._get_a() if self.t == mop_a else None, lambda self, v: self._ensure_cond(self.t == mop_a,"self.t == mop_a") and self._ensure_no_obj(self._get_a(),"a", True) and self._acquire_ownership(v, True) and self._set_a(v))
def _get_helper(self, *args):
"""
_get_helper(self) -> char const *
"""
return _ida_hexrays.mop_t__get_helper(self, *args)
def _set_helper(self, *args):
"""
_set_helper(self, _v)
"""
return _ida_hexrays.mop_t__set_helper(self, *args)
helper = property( lambda self: self._get_helper() if self.t == mop_h else None, lambda self, v: self._ensure_cond(self.t == mop_h,"self.t == mop_h") and self._ensure_no_obj(self._get_helper(),"helper", False) and self._acquire_ownership(v, False) and self._set_helper(v))
def _get_c(self, *args):
"""
_get_c(self) -> mcases_t
"""
return _ida_hexrays.mop_t__get_c(self, *args)
def _set_c(self, *args):
"""
_set_c(self, _v)
"""
return _ida_hexrays.mop_t__set_c(self, *args)
c = property( lambda self: self._get_c() if self.t == mop_c else None, lambda self, v: self._ensure_cond(self.t == mop_c,"self.t == mop_c") and self._ensure_no_obj(self._get_c(),"c", True) and self._acquire_ownership(v, True) and self._set_c(v))
def _get_fpc(self, *args):
"""
_get_fpc(self) -> fnumber_t
"""
return _ida_hexrays.mop_t__get_fpc(self, *args)
def _set_fpc(self, *args):
"""
_set_fpc(self, _v)
"""
return _ida_hexrays.mop_t__set_fpc(self, *args)
fpc = property( lambda self: self._get_fpc() if self.t == mop_fn else None, lambda self, v: self._ensure_cond(self.t == mop_fn,"self.t == mop_fn") and self._ensure_no_obj(self._get_fpc(),"fpc", True) and self._acquire_ownership(v, True) and self._set_fpc(v))
def _get_pair(self, *args):
"""
_get_pair(self) -> mop_pair_t
"""
return _ida_hexrays.mop_t__get_pair(self, *args)
def _set_pair(self, *args):
"""
_set_pair(self, _v)
"""
return _ida_hexrays.mop_t__set_pair(self, *args)
pair = property( lambda self: self._get_pair() if self.t == mop_p else None, lambda self, v: self._ensure_cond(self.t == mop_p,"self.t == mop_p") and self._ensure_no_obj(self._get_pair(),"pair", True) and self._acquire_ownership(v, True) and self._set_pair(v))
def _get_scif(self, *args):
"""
_get_scif(self) -> scif_t
"""
return _ida_hexrays.mop_t__get_scif(self, *args)
def _set_scif(self, *args):
"""
_set_scif(self, _v)
"""
return _ida_hexrays.mop_t__set_scif(self, *args)
scif = property( lambda self: self._get_scif() if self.t == mop_sc else None, lambda self, v: self._ensure_cond(self.t == mop_sc,"self.t == mop_sc") and self._ensure_no_obj(self._get_scif(),"scif", True) and self._acquire_ownership(v, True) and self._set_scif(v))
def _get_t(self, *args):
"""
_get_t(self) -> mopt_t
"""
return _ida_hexrays.mop_t__get_t(self, *args)
def _set_t(self, *args):
"""
_set_t(self, v)
"""
return _ida_hexrays.mop_t__set_t(self, *args)
def _ensure_no_t(self):
if self.t not in [mop_z]:
raise Exception("%s has type %s; cannot be modified" % (self, self.t))
return True
t = property(
_get_t,
lambda self, v: self._ensure_no_t() and self._set_t(v))
def __dbg_get_meminfo(self, *args):
"""
__dbg_get_meminfo(self) -> qstring
"""
return _ida_hexrays.mop_t___dbg_get_meminfo(self, *args)
def __dbg_get_registered_kind(self, *args):
"""
__dbg_get_registered_kind(self) -> int
"""
return _ida_hexrays.mop_t___dbg_get_registered_kind(self, *args)
def _obj_id(self, *args):
"""
_obj_id(self) -> PyObject *
"""
return _ida_hexrays.mop_t__obj_id(self, *args)
obj_id = property(_obj_id)
def _ensure_cond(self, ok, cond_str):
if not ok:
raise Exception("Condition \"%s\" not verified" % cond_str)
return True
def _ensure_no_obj(self, o, attr, attr_is_acquired):
if attr_is_acquired and o is not None:
raise Exception("%s already owns attribute \"%s\" (%s); cannot be modified" % (self, attr, o))
return True
def _acquire_ownership(self, v, acquire):
if acquire and (v is not None) and not isinstance(v, (int, long)):
if not v.thisown:
raise Exception("%s is already owned, and cannot be reused" % v)
v.thisown = False
dereg = getattr(v, "_deregister", None)
if dereg:
dereg()
return True
def _maybe_disown_and_deregister(self):
if self.thisown:
self.thisown = False
self._deregister()
def _own_and_register(self):
assert(not self.thisown)
self.thisown = True
self._register()
def replace_by(self, o):
assert(isinstance(o, (cexpr_t, cinsn_t)))
o._maybe_disown_and_deregister()
self._replace_by(o)
def _meminfo(self):
cpp = self.__dbg_get_meminfo()
rkind = self.__dbg_get_registered_kind()
rkind_str = [
"(not owned)",
"cfuncptr_t",
"cinsn_t",
"cexpr_t",
"cblock_t",
"mbl_array_t",
"mop_t",
"minsn_t",
"optinsn_t",
"optblock_t",
"valrng_t"][rkind]
return "%s [thisown=%s, owned by IDAPython as=%s]" % (
cpp,
self.thisown,
rkind_str)
meminfo = property(_meminfo)
mop_t_swigregister = _ida_hexrays.mop_t_swigregister
mop_t_swigregister(mop_t)
MAX_OPSIZE = cvar.MAX_OPSIZE
DOUBLE_OPSIZE = cvar.DOUBLE_OPSIZE
OPROP_IMPDONE = _ida_hexrays.OPROP_IMPDONE
"""
imported operand (a pointer) has been dereferenced
"""
OPROP_UDT = _ida_hexrays.OPROP_UDT
"""
a struct or union
"""
OPROP_FLOAT = _ida_hexrays.OPROP_FLOAT
"""
possibly floating value
"""
OPROP_CCFLAGS = _ida_hexrays.OPROP_CCFLAGS
"""
condition codes register value
"""
OPROP_UDEFVAL = _ida_hexrays.OPROP_UDEFVAL
"""
uses undefined value
"""
def lexcompare(*args):
"""
lexcompare(a, b) -> int
"""
return _ida_hexrays.lexcompare(*args)
class mop_pair_t(object):
"""
Proxy of C++ mop_pair_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
lop = _swig_property(_ida_hexrays.mop_pair_t_lop_get, _ida_hexrays.mop_pair_t_lop_set)
hop = _swig_property(_ida_hexrays.mop_pair_t_hop_get, _ida_hexrays.mop_pair_t_hop_set)
def __init__(self, *args):
"""
__init__(self) -> mop_pair_t
"""
this = _ida_hexrays.new_mop_pair_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_mop_pair_t
__del__ = lambda self : None;
mop_pair_t_swigregister = _ida_hexrays.mop_pair_t_swigregister
mop_pair_t_swigregister(mop_pair_t)
class mop_addr_t(mop_t):
"""
Proxy of C++ mop_addr_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
insize = _swig_property(_ida_hexrays.mop_addr_t_insize_get, _ida_hexrays.mop_addr_t_insize_set)
outsize = _swig_property(_ida_hexrays.mop_addr_t_outsize_get, _ida_hexrays.mop_addr_t_outsize_set)
def __init__(self, *args):
"""
__init__(self) -> mop_addr_t
__init__(self, ra) -> mop_addr_t
__init__(self, ra, isz, osz) -> mop_addr_t
"""
this = _ida_hexrays.new_mop_addr_t(*args)
try: self.this.append(this)
except: self.this = this
def lexcompare(self, *args):
"""
lexcompare(self, ra) -> int
"""
return _ida_hexrays.mop_addr_t_lexcompare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mop_addr_t
__del__ = lambda self : None;
mop_addr_t_swigregister = _ida_hexrays.mop_addr_t_swigregister
mop_addr_t_swigregister(mop_addr_t)
class mcallarg_t(mop_t):
"""
Proxy of C++ mcallarg_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.mcallarg_t_ea_get, _ida_hexrays.mcallarg_t_ea_set)
type = _swig_property(_ida_hexrays.mcallarg_t_type_get, _ida_hexrays.mcallarg_t_type_set)
name = _swig_property(_ida_hexrays.mcallarg_t_name_get, _ida_hexrays.mcallarg_t_name_set)
argloc = _swig_property(_ida_hexrays.mcallarg_t_argloc_get, _ida_hexrays.mcallarg_t_argloc_set)
def __init__(self, *args):
"""
__init__(self) -> mcallarg_t
__init__(self, rarg) -> mcallarg_t
"""
this = _ida_hexrays.new_mcallarg_t(*args)
try: self.this.append(this)
except: self.this = this
def copy_mop(self, *args):
"""
copy_mop(self, op)
"""
return _ida_hexrays.mcallarg_t_copy_mop(self, *args)
def _print(self, *args):
"""
_print(self, shins_flags=0x04|0x02)
"""
return _ida_hexrays.mcallarg_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.mcallarg_t_dstr(self, *args)
def set_regarg(self, *args):
"""
set_regarg(self, mr, sz, tif)
set_regarg(self, mr, tif)
set_regarg(self, mr, dt, sign=type_unsigned)
"""
return _ida_hexrays.mcallarg_t_set_regarg(self, *args)
def make_int(self, *args):
"""
make_int(self, val, val_ea, opno=0)
"""
return _ida_hexrays.mcallarg_t_make_int(self, *args)
def make_uint(self, *args):
"""
make_uint(self, val, val_ea, opno=0)
"""
return _ida_hexrays.mcallarg_t_make_uint(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mcallarg_t
__del__ = lambda self : None;
mcallarg_t_swigregister = _ida_hexrays.mcallarg_t_swigregister
mcallarg_t_swigregister(mcallarg_t)
ROLE_UNK = _ida_hexrays.ROLE_UNK
ROLE_EMPTY = _ida_hexrays.ROLE_EMPTY
ROLE_MEMSET = _ida_hexrays.ROLE_MEMSET
ROLE_MEMSET32 = _ida_hexrays.ROLE_MEMSET32
ROLE_MEMSET64 = _ida_hexrays.ROLE_MEMSET64
ROLE_MEMCPY = _ida_hexrays.ROLE_MEMCPY
ROLE_STRCPY = _ida_hexrays.ROLE_STRCPY
ROLE_STRLEN = _ida_hexrays.ROLE_STRLEN
ROLE_STRCAT = _ida_hexrays.ROLE_STRCAT
ROLE_TAIL = _ida_hexrays.ROLE_TAIL
ROLE_BUG = _ida_hexrays.ROLE_BUG
ROLE_ALLOCA = _ida_hexrays.ROLE_ALLOCA
ROLE_BSWAP = _ida_hexrays.ROLE_BSWAP
ROLE_PRESENT = _ida_hexrays.ROLE_PRESENT
ROLE_CONTAINING_RECORD = _ida_hexrays.ROLE_CONTAINING_RECORD
ROLE_FASTFAIL = _ida_hexrays.ROLE_FASTFAIL
ROLE_READFLAGS = _ida_hexrays.ROLE_READFLAGS
ROLE_IS_MUL_OK = _ida_hexrays.ROLE_IS_MUL_OK
ROLE_SATURATED_MUL = _ida_hexrays.ROLE_SATURATED_MUL
ROLE_BITTEST = _ida_hexrays.ROLE_BITTEST
ROLE_BITTESTANDSET = _ida_hexrays.ROLE_BITTESTANDSET
ROLE_BITTESTANDRESET = _ida_hexrays.ROLE_BITTESTANDRESET
ROLE_BITTESTANDCOMPLEMENT = _ida_hexrays.ROLE_BITTESTANDCOMPLEMENT
ROLE_VA_ARG = _ida_hexrays.ROLE_VA_ARG
ROLE_VA_COPY = _ida_hexrays.ROLE_VA_COPY
ROLE_VA_START = _ida_hexrays.ROLE_VA_START
ROLE_VA_END = _ida_hexrays.ROLE_VA_END
ROLE_ROL = _ida_hexrays.ROLE_ROL
ROLE_ROR = _ida_hexrays.ROLE_ROR
ROLE_CFSUB3 = _ida_hexrays.ROLE_CFSUB3
ROLE_OFSUB3 = _ida_hexrays.ROLE_OFSUB3
ROLE_ABS = _ida_hexrays.ROLE_ABS
FUNC_NAME_MEMCPY = _ida_hexrays.FUNC_NAME_MEMCPY
FUNC_NAME_MEMSET = _ida_hexrays.FUNC_NAME_MEMSET
FUNC_NAME_MEMSET32 = _ida_hexrays.FUNC_NAME_MEMSET32
FUNC_NAME_MEMSET64 = _ida_hexrays.FUNC_NAME_MEMSET64
FUNC_NAME_STRCPY = _ida_hexrays.FUNC_NAME_STRCPY
FUNC_NAME_STRLEN = _ida_hexrays.FUNC_NAME_STRLEN
FUNC_NAME_STRCAT = _ida_hexrays.FUNC_NAME_STRCAT
FUNC_NAME_TAIL = _ida_hexrays.FUNC_NAME_TAIL
FUNC_NAME_VA_ARG = _ida_hexrays.FUNC_NAME_VA_ARG
FUNC_NAME_EMPTY = _ida_hexrays.FUNC_NAME_EMPTY
FUNC_NAME_PRESENT = _ida_hexrays.FUNC_NAME_PRESENT
FUNC_NAME_CONTAINING_RECORD = _ida_hexrays.FUNC_NAME_CONTAINING_RECORD
class mcallinfo_t(object):
"""
Proxy of C++ mcallinfo_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
callee = _swig_property(_ida_hexrays.mcallinfo_t_callee_get, _ida_hexrays.mcallinfo_t_callee_set)
solid_args = _swig_property(_ida_hexrays.mcallinfo_t_solid_args_get, _ida_hexrays.mcallinfo_t_solid_args_set)
call_spd = _swig_property(_ida_hexrays.mcallinfo_t_call_spd_get, _ida_hexrays.mcallinfo_t_call_spd_set)
stkargs_top = _swig_property(_ida_hexrays.mcallinfo_t_stkargs_top_get, _ida_hexrays.mcallinfo_t_stkargs_top_set)
cc = _swig_property(_ida_hexrays.mcallinfo_t_cc_get, _ida_hexrays.mcallinfo_t_cc_set)
args = _swig_property(_ida_hexrays.mcallinfo_t_args_get, _ida_hexrays.mcallinfo_t_args_set)
retregs = _swig_property(_ida_hexrays.mcallinfo_t_retregs_get, _ida_hexrays.mcallinfo_t_retregs_set)
return_type = _swig_property(_ida_hexrays.mcallinfo_t_return_type_get, _ida_hexrays.mcallinfo_t_return_type_set)
return_argloc = _swig_property(_ida_hexrays.mcallinfo_t_return_argloc_get, _ida_hexrays.mcallinfo_t_return_argloc_set)
return_regs = _swig_property(_ida_hexrays.mcallinfo_t_return_regs_get, _ida_hexrays.mcallinfo_t_return_regs_set)
spoiled = _swig_property(_ida_hexrays.mcallinfo_t_spoiled_get, _ida_hexrays.mcallinfo_t_spoiled_set)
pass_regs = _swig_property(_ida_hexrays.mcallinfo_t_pass_regs_get, _ida_hexrays.mcallinfo_t_pass_regs_set)
visible_memory = _swig_property(_ida_hexrays.mcallinfo_t_visible_memory_get, _ida_hexrays.mcallinfo_t_visible_memory_set)
dead_regs = _swig_property(_ida_hexrays.mcallinfo_t_dead_regs_get, _ida_hexrays.mcallinfo_t_dead_regs_set)
flags = _swig_property(_ida_hexrays.mcallinfo_t_flags_get, _ida_hexrays.mcallinfo_t_flags_set)
role = _swig_property(_ida_hexrays.mcallinfo_t_role_get, _ida_hexrays.mcallinfo_t_role_set)
def __init__(self, *args):
"""
__init__(self, _callee=BADADDR, _sargs=0) -> mcallinfo_t
"""
this = _ida_hexrays.new_mcallinfo_t(*args)
try: self.this.append(this)
except: self.this = this
def lexcompare(self, *args):
"""
lexcompare(self, f) -> int
"""
return _ida_hexrays.mcallinfo_t_lexcompare(self, *args)
def set_type(self, *args):
"""
set_type(self, type) -> bool
"""
return _ida_hexrays.mcallinfo_t_set_type(self, *args)
def get_type(self, *args):
"""
get_type(self) -> tinfo_t
"""
return _ida_hexrays.mcallinfo_t_get_type(self, *args)
def is_vararg(self, *args):
"""
is_vararg(self) -> bool
"""
return _ida_hexrays.mcallinfo_t_is_vararg(self, *args)
def _print(self, *args):
"""
_print(self, size=-1, shins_flags=0x04|0x02)
"""
return _ida_hexrays.mcallinfo_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.mcallinfo_t_dstr(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mcallinfo_t
__del__ = lambda self : None;
mcallinfo_t_swigregister = _ida_hexrays.mcallinfo_t_swigregister
mcallinfo_t_swigregister(mcallinfo_t)
FCI_PROP = _ida_hexrays.FCI_PROP
"""
call has been propagated
"""
FCI_DEAD = _ida_hexrays.FCI_DEAD
"""
some return registers were determined dead
"""
FCI_FINAL = _ida_hexrays.FCI_FINAL
"""
call type is final, should not be changed
"""
FCI_NORET = _ida_hexrays.FCI_NORET
"""
call does not return
"""
FCI_PURE = _ida_hexrays.FCI_PURE
"""
pure function
"""
FCI_NOSIDE = _ida_hexrays.FCI_NOSIDE
"""
call does not have side effects
"""
FCI_SPLOK = _ida_hexrays.FCI_SPLOK
"""
spoiled/visible_memory lists have been optimized. for some functions
we can reduce them as soon as information about the arguments becomes
available. in order not to try optimize them again we use this bit.
"""
FCI_HASCALL = _ida_hexrays.FCI_HASCALL
"""
A function is an synthetic helper combined from several instructions
and at least one of them was a call to a real functions
"""
FCI_HASFMT = _ida_hexrays.FCI_HASFMT
"""
printf- or scanf-style format string
A variadic function with recognized
"""
class mcases_t(object):
"""
Proxy of C++ mcases_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
values = _swig_property(_ida_hexrays.mcases_t_values_get, _ida_hexrays.mcases_t_values_set)
targets = _swig_property(_ida_hexrays.mcases_t_targets_get, _ida_hexrays.mcases_t_targets_set)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.mcases_t_swap(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.mcases_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.mcases_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.mcases_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.mcases_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.mcases_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.mcases_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.mcases_t_compare(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.mcases_t_empty(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.mcases_t_size(self, *args)
def resize(self, *args):
"""
resize(self, s)
"""
return _ida_hexrays.mcases_t_resize(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.mcases_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.mcases_t_dstr(self, *args)
def __init__(self, *args):
"""
__init__(self) -> mcases_t
"""
this = _ida_hexrays.new_mcases_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_mcases_t
__del__ = lambda self : None;
mcases_t_swigregister = _ida_hexrays.mcases_t_swigregister
mcases_t_swigregister(mcases_t)
class voff_t(object):
"""
Proxy of C++ voff_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
off = _swig_property(_ida_hexrays.voff_t_off_get, _ida_hexrays.voff_t_off_set)
type = _swig_property(_ida_hexrays.voff_t_type_get, _ida_hexrays.voff_t_type_set)
def __init__(self, *args):
"""
__init__(self) -> voff_t
__init__(self, _type, _off) -> voff_t
__init__(self, op) -> voff_t
"""
this = _ida_hexrays.new_voff_t(*args)
try: self.this.append(this)
except: self.this = this
def set(self, *args):
"""
set(self, _type, _off)
"""
return _ida_hexrays.voff_t_set(self, *args)
def set_stkoff(self, *args):
"""
set_stkoff(self, stkoff)
"""
return _ida_hexrays.voff_t_set_stkoff(self, *args)
def set_reg(self, *args):
"""
set_reg(self, mreg)
"""
return _ida_hexrays.voff_t_set_reg(self, *args)
def undef(self, *args):
"""
undef(self)
"""
return _ida_hexrays.voff_t_undef(self, *args)
def defined(self, *args):
"""
defined(self) -> bool
"""
return _ida_hexrays.voff_t_defined(self, *args)
def is_reg(self, *args):
"""
is_reg(self) -> bool
"""
return _ida_hexrays.voff_t_is_reg(self, *args)
def is_stkoff(self, *args):
"""
is_stkoff(self) -> bool
"""
return _ida_hexrays.voff_t_is_stkoff(self, *args)
def get_reg(self, *args):
"""
get_reg(self) -> mreg_t
"""
return _ida_hexrays.voff_t_get_reg(self, *args)
def get_stkoff(self, *args):
"""
get_stkoff(self) -> sval_t
"""
return _ida_hexrays.voff_t_get_stkoff(self, *args)
def inc(self, *args):
"""
inc(self, delta)
"""
return _ida_hexrays.voff_t_inc(self, *args)
def add(self, *args):
"""
add(self, width) -> voff_t
"""
return _ida_hexrays.voff_t_add(self, *args)
def diff(self, *args):
"""
diff(self, r) -> sval_t
"""
return _ida_hexrays.voff_t_diff(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.voff_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.voff_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.voff_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.voff_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.voff_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.voff_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.voff_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_voff_t
__del__ = lambda self : None;
voff_t_swigregister = _ida_hexrays.voff_t_swigregister
voff_t_swigregister(voff_t)
class vivl_t(voff_t):
"""
Proxy of C++ vivl_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
size = _swig_property(_ida_hexrays.vivl_t_size_get, _ida_hexrays.vivl_t_size_set)
def __init__(self, *args):
"""
__init__(self, _type=mop_z, _off=-1, _size=0) -> vivl_t
__init__(self, ch) -> vivl_t
__init__(self, op) -> vivl_t
"""
this = _ida_hexrays.new_vivl_t(*args)
try: self.this.append(this)
except: self.this = this
def set(self, *args):
"""
set(self, _type, _off, _size=0)
set(self, voff, _size)
"""
return _ida_hexrays.vivl_t_set(self, *args)
def set_stkoff(self, *args):
"""
set_stkoff(self, stkoff, sz=0)
"""
return _ida_hexrays.vivl_t_set_stkoff(self, *args)
def set_reg(self, *args):
"""
set_reg(self, mreg, sz=0)
"""
return _ida_hexrays.vivl_t_set_reg(self, *args)
def extend_to_cover(self, *args):
"""
extend_to_cover(self, r) -> bool
"""
return _ida_hexrays.vivl_t_extend_to_cover(self, *args)
def intersect(self, *args):
"""
intersect(self, r) -> uval_t
"""
return _ida_hexrays.vivl_t_intersect(self, *args)
def overlap(self, *args):
"""
overlap(self, r) -> bool
"""
return _ida_hexrays.vivl_t_overlap(self, *args)
def includes(self, *args):
"""
includes(self, r) -> bool
"""
return _ida_hexrays.vivl_t_includes(self, *args)
def contains(self, *args):
"""
contains(self, voff2) -> bool
"""
return _ida_hexrays.vivl_t_contains(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.vivl_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.vivl_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.vivl_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.vivl_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.vivl_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.vivl_t_compare(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
__eq__(self, mop) -> bool
"""
return _ida_hexrays.vivl_t___eq__(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.vivl_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.vivl_t_dstr(self, *args)
__swig_destroy__ = _ida_hexrays.delete_vivl_t
__del__ = lambda self : None;
vivl_t_swigregister = _ida_hexrays.vivl_t_swigregister
vivl_t_swigregister(vivl_t)
class chain_t(ida_pro.intvec_t):
"""
Proxy of C++ chain_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
width = _swig_property(_ida_hexrays.chain_t_width_get, _ida_hexrays.chain_t_width_set)
varnum = _swig_property(_ida_hexrays.chain_t_varnum_get, _ida_hexrays.chain_t_varnum_set)
flags = _swig_property(_ida_hexrays.chain_t_flags_get, _ida_hexrays.chain_t_flags_set)
def __init__(self, *args):
"""
__init__(self) -> chain_t
__init__(self, t, off, w=1, v=-1) -> chain_t
__init__(self, _k, w=1) -> chain_t
"""
this = _ida_hexrays.new_chain_t(*args)
try: self.this.append(this)
except: self.this = this
def set_value(self, *args):
"""
set_value(self, r)
"""
return _ida_hexrays.chain_t_set_value(self, *args)
def key(self, *args):
"""
key(self) -> voff_t
"""
return _ida_hexrays.chain_t_key(self, *args)
def is_inited(self, *args):
"""
is_inited(self) -> bool
"""
return _ida_hexrays.chain_t_is_inited(self, *args)
def is_reg(self, *args):
"""
is_reg(self) -> bool
"""
return _ida_hexrays.chain_t_is_reg(self, *args)
def is_stkoff(self, *args):
"""
is_stkoff(self) -> bool
"""
return _ida_hexrays.chain_t_is_stkoff(self, *args)
def is_replaced(self, *args):
"""
is_replaced(self) -> bool
"""
return _ida_hexrays.chain_t_is_replaced(self, *args)
def is_overlapped(self, *args):
"""
is_overlapped(self) -> bool
"""
return _ida_hexrays.chain_t_is_overlapped(self, *args)
def is_fake(self, *args):
"""
is_fake(self) -> bool
"""
return _ida_hexrays.chain_t_is_fake(self, *args)
def is_passreg(self, *args):
"""
is_passreg(self) -> bool
"""
return _ida_hexrays.chain_t_is_passreg(self, *args)
def is_term(self, *args):
"""
is_term(self) -> bool
"""
return _ida_hexrays.chain_t_is_term(self, *args)
def set_inited(self, *args):
"""
set_inited(self, b)
"""
return _ida_hexrays.chain_t_set_inited(self, *args)
def set_replaced(self, *args):
"""
set_replaced(self, b)
"""
return _ida_hexrays.chain_t_set_replaced(self, *args)
def set_overlapped(self, *args):
"""
set_overlapped(self, b)
"""
return _ida_hexrays.chain_t_set_overlapped(self, *args)
def set_term(self, *args):
"""
set_term(self, b)
"""
return _ida_hexrays.chain_t_set_term(self, *args)
def get_reg(self, *args):
"""
get_reg(self) -> mreg_t
"""
return _ida_hexrays.chain_t_get_reg(self, *args)
def get_stkoff(self, *args):
"""
get_stkoff(self) -> sval_t
"""
return _ida_hexrays.chain_t_get_stkoff(self, *args)
def overlap(self, *args):
"""
overlap(self, r) -> bool
"""
return _ida_hexrays.chain_t_overlap(self, *args)
def includes(self, *args):
"""
includes(self, r) -> bool
"""
return _ida_hexrays.chain_t_includes(self, *args)
def endoff(self, *args):
"""
endoff(self) -> voff_t
"""
return _ida_hexrays.chain_t_endoff(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.chain_t___lt__(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.chain_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.chain_t_dstr(self, *args)
def append_list(self, *args):
"""
append_list(self, list)
"""
return _ida_hexrays.chain_t_append_list(self, *args)
def clear_varnum(self, *args):
"""
clear_varnum(self)
"""
return _ida_hexrays.chain_t_clear_varnum(self, *args)
__swig_destroy__ = _ida_hexrays.delete_chain_t
__del__ = lambda self : None;
chain_t_swigregister = _ida_hexrays.chain_t_swigregister
chain_t_swigregister(chain_t)
CHF_INITED = _ida_hexrays.CHF_INITED
"""
is chain initialized? (valid only after lvar allocation)
"""
CHF_REPLACED = _ida_hexrays.CHF_REPLACED
"""
chain operands have been replaced?
"""
CHF_OVER = _ida_hexrays.CHF_OVER
"""
overlapped chain
"""
CHF_FAKE = _ida_hexrays.CHF_FAKE
"""
fake chain created by widen_chains()
"""
CHF_PASSTHRU = _ida_hexrays.CHF_PASSTHRU
"""
pass-thru chain, must use the input variable to the block
"""
CHF_TERM = _ida_hexrays.CHF_TERM
"""
terminating chain; the variable does not survive across the block
"""
SIZEOF_BLOCK_CHAINS = _ida_hexrays.SIZEOF_BLOCK_CHAINS
class block_chains_t(object):
"""
Proxy of C++ block_chains_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def get_reg_chain(self, *args):
"""
get_reg_chain(self, reg, width=1) -> chain_t
get_reg_chain(self, reg, width=1) -> chain_t
"""
return _ida_hexrays.block_chains_t_get_reg_chain(self, *args)
def get_stk_chain(self, *args):
"""
get_stk_chain(self, off, width=1) -> chain_t
get_stk_chain(self, off, width=1) -> chain_t
"""
return _ida_hexrays.block_chains_t_get_stk_chain(self, *args)
def get_chain(self, *args):
"""
get_chain(self, k, width=1) -> chain_t
get_chain(self, k, width=1) -> chain_t
get_chain(self, ch) -> chain_t
get_chain(self, ch) -> chain_t
"""
return _ida_hexrays.block_chains_t_get_chain(self, *args)
def _print(self, *args):
"""
_print(self)
"""
return _ida_hexrays.block_chains_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.block_chains_t_dstr(self, *args)
def __init__(self, *args):
"""
__init__(self) -> block_chains_t
"""
this = _ida_hexrays.new_block_chains_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_block_chains_t
__del__ = lambda self : None;
block_chains_t_swigregister = _ida_hexrays.block_chains_t_swigregister
block_chains_t_swigregister(block_chains_t)
class chain_visitor_t(object):
"""
Proxy of C++ chain_visitor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
parent = _swig_property(_ida_hexrays.chain_visitor_t_parent_get, _ida_hexrays.chain_visitor_t_parent_set)
def __init__(self, *args):
"""
__init__(self) -> chain_visitor_t
"""
if self.__class__ == chain_visitor_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_chain_visitor_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def visit_chain(self, *args):
"""
visit_chain(self, nblock, ch) -> int
"""
return _ida_hexrays.chain_visitor_t_visit_chain(self, *args)
__swig_destroy__ = _ida_hexrays.delete_chain_visitor_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_chain_visitor_t(self)
return weakref_proxy(self)
chain_visitor_t_swigregister = _ida_hexrays.chain_visitor_t_swigregister
chain_visitor_t_swigregister(chain_visitor_t)
class graph_chains_t(block_chains_vec_t):
"""
Proxy of C++ graph_chains_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> graph_chains_t
"""
this = _ida_hexrays.new_graph_chains_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_graph_chains_t
__del__ = lambda self : None;
def for_all_chains(self, *args):
"""
for_all_chains(self, cv, gca_flags) -> int
"""
return _ida_hexrays.graph_chains_t_for_all_chains(self, *args)
def is_locked(self, *args):
"""
is_locked(self) -> bool
"""
return _ida_hexrays.graph_chains_t_is_locked(self, *args)
def acquire(self, *args):
"""
acquire(self)
"""
return _ida_hexrays.graph_chains_t_acquire(self, *args)
def release(self, *args):
"""
release(self)
"""
return _ida_hexrays.graph_chains_t_release(self, *args)
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.graph_chains_t_swap(self, *args)
graph_chains_t_swigregister = _ida_hexrays.graph_chains_t_swigregister
graph_chains_t_swigregister(graph_chains_t)
GCA_EMPTY = _ida_hexrays.GCA_EMPTY
"""
include empty chains
"""
GCA_SPEC = _ida_hexrays.GCA_SPEC
"""
include chains for special registers
"""
GCA_ALLOC = _ida_hexrays.GCA_ALLOC
"""
enumerate only allocated chains
"""
GCA_NALLOC = _ida_hexrays.GCA_NALLOC
"""
enumerate only non-allocated chains
"""
GCA_OFIRST = _ida_hexrays.GCA_OFIRST
"""
consider only chains of the first block
"""
GCA_OLAST = _ida_hexrays.GCA_OLAST
"""
consider only chains of the last block
"""
class minsn_t(object):
"""
Proxy of C++ minsn_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
opcode = _swig_property(_ida_hexrays.minsn_t_opcode_get, _ida_hexrays.minsn_t_opcode_set)
iprops = _swig_property(_ida_hexrays.minsn_t_iprops_get, _ida_hexrays.minsn_t_iprops_set)
next = _swig_property(_ida_hexrays.minsn_t_next_get, _ida_hexrays.minsn_t_next_set)
prev = _swig_property(_ida_hexrays.minsn_t_prev_get, _ida_hexrays.minsn_t_prev_set)
ea = _swig_property(_ida_hexrays.minsn_t_ea_get, _ida_hexrays.minsn_t_ea_set)
l = _swig_property(_ida_hexrays.minsn_t_l_get, _ida_hexrays.minsn_t_l_set)
r = _swig_property(_ida_hexrays.minsn_t_r_get, _ida_hexrays.minsn_t_r_set)
d = _swig_property(_ida_hexrays.minsn_t_d_get, _ida_hexrays.minsn_t_d_set)
def is_optional(self, *args):
"""
is_optional(self) -> bool
"""
return _ida_hexrays.minsn_t_is_optional(self, *args)
def is_combined(self, *args):
"""
is_combined(self) -> bool
"""
return _ida_hexrays.minsn_t_is_combined(self, *args)
def is_farcall(self, *args):
"""
is_farcall(self) -> bool
"""
return _ida_hexrays.minsn_t_is_farcall(self, *args)
def is_cleaning_pop(self, *args):
"""
is_cleaning_pop(self) -> bool
"""
return _ida_hexrays.minsn_t_is_cleaning_pop(self, *args)
def is_extstx(self, *args):
"""
is_extstx(self) -> bool
"""
return _ida_hexrays.minsn_t_is_extstx(self, *args)
def is_tailcall(self, *args):
"""
is_tailcall(self) -> bool
"""
return _ida_hexrays.minsn_t_is_tailcall(self, *args)
def is_fpinsn(self, *args):
"""
is_fpinsn(self) -> bool
"""
return _ida_hexrays.minsn_t_is_fpinsn(self, *args)
def is_assert(self, *args):
"""
is_assert(self) -> bool
"""
return _ida_hexrays.minsn_t_is_assert(self, *args)
def is_persistent(self, *args):
"""
is_persistent(self) -> bool
"""
return _ida_hexrays.minsn_t_is_persistent(self, *args)
def is_wild_match(self, *args):
"""
is_wild_match(self) -> bool
"""
return _ida_hexrays.minsn_t_is_wild_match(self, *args)
def is_propagatable(self, *args):
"""
is_propagatable(self) -> bool
"""
return _ida_hexrays.minsn_t_is_propagatable(self, *args)
def is_ignlowsrc(self, *args):
"""
is_ignlowsrc(self) -> bool
"""
return _ida_hexrays.minsn_t_is_ignlowsrc(self, *args)
def is_inverted_jx(self, *args):
"""
is_inverted_jx(self) -> bool
"""
return _ida_hexrays.minsn_t_is_inverted_jx(self, *args)
def was_noret_icall(self, *args):
"""
was_noret_icall(self) -> bool
"""
return _ida_hexrays.minsn_t_was_noret_icall(self, *args)
def is_multimov(self, *args):
"""
is_multimov(self) -> bool
"""
return _ida_hexrays.minsn_t_is_multimov(self, *args)
def is_combinable(self, *args):
"""
is_combinable(self) -> bool
"""
return _ida_hexrays.minsn_t_is_combinable(self, *args)
def was_split(self, *args):
"""
was_split(self) -> bool
"""
return _ida_hexrays.minsn_t_was_split(self, *args)
def set_optional(self, *args):
"""
set_optional(self)
"""
return _ida_hexrays.minsn_t_set_optional(self, *args)
def clr_combined(self, *args):
"""
clr_combined(self)
"""
return _ida_hexrays.minsn_t_clr_combined(self, *args)
def set_farcall(self, *args):
"""
set_farcall(self)
"""
return _ida_hexrays.minsn_t_set_farcall(self, *args)
def set_cleaning_pop(self, *args):
"""
set_cleaning_pop(self)
"""
return _ida_hexrays.minsn_t_set_cleaning_pop(self, *args)
def set_extstx(self, *args):
"""
set_extstx(self)
"""
return _ida_hexrays.minsn_t_set_extstx(self, *args)
def set_tailcall(self, *args):
"""
set_tailcall(self)
"""
return _ida_hexrays.minsn_t_set_tailcall(self, *args)
def clr_tailcall(self, *args):
"""
clr_tailcall(self)
"""
return _ida_hexrays.minsn_t_clr_tailcall(self, *args)
def set_fpinsn(self, *args):
"""
set_fpinsn(self)
"""
return _ida_hexrays.minsn_t_set_fpinsn(self, *args)
def clr_fpinsn(self, *args):
"""
clr_fpinsn(self)
"""
return _ida_hexrays.minsn_t_clr_fpinsn(self, *args)
def set_assert(self, *args):
"""
set_assert(self)
"""
return _ida_hexrays.minsn_t_set_assert(self, *args)
def clr_assert(self, *args):
"""
clr_assert(self)
"""
return _ida_hexrays.minsn_t_clr_assert(self, *args)
def set_persistent(self, *args):
"""
set_persistent(self)
"""
return _ida_hexrays.minsn_t_set_persistent(self, *args)
def set_wild_match(self, *args):
"""
set_wild_match(self)
"""
return _ida_hexrays.minsn_t_set_wild_match(self, *args)
def clr_propagatable(self, *args):
"""
clr_propagatable(self)
"""
return _ida_hexrays.minsn_t_clr_propagatable(self, *args)
def set_ignlowsrc(self, *args):
"""
set_ignlowsrc(self)
"""
return _ida_hexrays.minsn_t_set_ignlowsrc(self, *args)
def clr_ignlowsrc(self, *args):
"""
clr_ignlowsrc(self)
"""
return _ida_hexrays.minsn_t_clr_ignlowsrc(self, *args)
def set_inverted_jx(self, *args):
"""
set_inverted_jx(self)
"""
return _ida_hexrays.minsn_t_set_inverted_jx(self, *args)
def set_noret_icall(self, *args):
"""
set_noret_icall(self)
"""
return _ida_hexrays.minsn_t_set_noret_icall(self, *args)
def clr_noret_icall(self, *args):
"""
clr_noret_icall(self)
"""
return _ida_hexrays.minsn_t_clr_noret_icall(self, *args)
def set_multimov(self, *args):
"""
set_multimov(self)
"""
return _ida_hexrays.minsn_t_set_multimov(self, *args)
def clr_multimov(self, *args):
"""
clr_multimov(self)
"""
return _ida_hexrays.minsn_t_clr_multimov(self, *args)
def set_combinable(self, *args):
"""
set_combinable(self)
"""
return _ida_hexrays.minsn_t_set_combinable(self, *args)
def clr_combinable(self, *args):
"""
clr_combinable(self)
"""
return _ida_hexrays.minsn_t_clr_combinable(self, *args)
def set_split_size(self, *args):
"""
set_split_size(self, s)
"""
return _ida_hexrays.minsn_t_set_split_size(self, *args)
def get_split_size(self, *args):
"""
get_split_size(self) -> int
"""
return _ida_hexrays.minsn_t_get_split_size(self, *args)
def __init__(self, *args):
"""
__init__(self, _ea) -> minsn_t
__init__(self, m) -> minsn_t
"""
this = _ida_hexrays.new_minsn_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, m)
"""
return _ida_hexrays.minsn_t_swap(self, *args)
def _print(self, *args):
"""
_print(self, shins_flags=0x04|0x02)
"""
return _ida_hexrays.minsn_t__print(self, *args)
def dstr(self, *args):
"""
dstr(self) -> char const *
"""
return _ida_hexrays.minsn_t_dstr(self, *args)
def setaddr(self, *args):
"""
setaddr(self, new_ea)
"""
return _ida_hexrays.minsn_t_setaddr(self, *args)
def optimize_solo(self, *args):
"""
optimize_solo(self, optflags=0) -> int
"""
return _ida_hexrays.minsn_t_optimize_solo(self, *args)
def optimize_subtree(self, *args):
"""
optimize_subtree(self, blk, top, parent, converted_call, optflags=0x0002) -> int
"""
return _ida_hexrays.minsn_t_optimize_subtree(self, *args)
def for_all_ops(self, *args):
"""
for_all_ops(self, mv) -> int
"""
return _ida_hexrays.minsn_t_for_all_ops(self, *args)
def for_all_insns(self, *args):
"""
for_all_insns(self, mv) -> int
"""
return _ida_hexrays.minsn_t_for_all_insns(self, *args)
def _make_nop(self, *args):
"""
_make_nop(self)
"""
return _ida_hexrays.minsn_t__make_nop(self, *args)
def equal_insns(self, *args):
"""
equal_insns(self, m, eqflags) -> bool
"""
return _ida_hexrays.minsn_t_equal_insns(self, *args)
def __lt__(self, *args):
"""
__lt__(self, ri) -> bool
"""
return _ida_hexrays.minsn_t___lt__(self, *args)
def lexcompare(self, *args):
"""
lexcompare(self, ri) -> int
"""
return _ida_hexrays.minsn_t_lexcompare(self, *args)
def is_noret_call(self, *args):
"""
is_noret_call(self, ignore_noret_icall=False) -> bool
"""
return _ida_hexrays.minsn_t_is_noret_call(self, *args)
def is_unknown_call(self, *args):
"""
is_unknown_call(self) -> bool
"""
return _ida_hexrays.minsn_t_is_unknown_call(self, *args)
def is_helper(self, *args):
"""
is_helper(self, name) -> bool
"""
return _ida_hexrays.minsn_t_is_helper(self, *args)
def find_call(self, *args):
"""
find_call(self, with_helpers=False) -> minsn_t
"""
return _ida_hexrays.minsn_t_find_call(self, *args)
def contains_call(self, *args):
"""
contains_call(self, with_helpers=False) -> bool
"""
return _ida_hexrays.minsn_t_contains_call(self, *args)
def has_side_effects(self, *args):
"""
has_side_effects(self, include_ldx_and_divs=False) -> bool
"""
return _ida_hexrays.minsn_t_has_side_effects(self, *args)
def get_role(self, *args):
"""
get_role(self) -> funcrole_t
"""
return _ida_hexrays.minsn_t_get_role(self, *args)
def is_memcpy(self, *args):
"""
is_memcpy(self) -> bool
"""
return _ida_hexrays.minsn_t_is_memcpy(self, *args)
def is_memset(self, *args):
"""
is_memset(self) -> bool
"""
return _ida_hexrays.minsn_t_is_memset(self, *args)
def is_alloca(self, *args):
"""
is_alloca(self) -> bool
"""
return _ida_hexrays.minsn_t_is_alloca(self, *args)
def is_bswap(self, *args):
"""
is_bswap(self) -> bool
"""
return _ida_hexrays.minsn_t_is_bswap(self, *args)
def is_readflags(self, *args):
"""
is_readflags(self) -> bool
"""
return _ida_hexrays.minsn_t_is_readflags(self, *args)
def contains_opcode(self, *args):
"""
contains_opcode(self, mcode) -> bool
"""
return _ida_hexrays.minsn_t_contains_opcode(self, *args)
def find_opcode(self, *args):
"""
find_opcode(self, mcode) -> minsn_t
find_opcode(self, mcode) -> minsn_t
"""
return _ida_hexrays.minsn_t_find_opcode(self, *args)
def find_ins_op(self, *args):
"""
find_ins_op(self, other, op=m_nop) -> minsn_t
"""
return _ida_hexrays.minsn_t_find_ins_op(self, *args)
def find_num_op(self, *args):
"""
find_num_op(self, other) -> mop_t
"""
return _ida_hexrays.minsn_t_find_num_op(self, *args)
def is_mov(self, *args):
"""
is_mov(self) -> bool
"""
return _ida_hexrays.minsn_t_is_mov(self, *args)
def is_like_move(self, *args):
"""
is_like_move(self) -> bool
"""
return _ida_hexrays.minsn_t_is_like_move(self, *args)
def modifes_d(self, *args):
"""
modifes_d(self) -> bool
"""
return _ida_hexrays.minsn_t_modifes_d(self, *args)
def modifies_pair_mop(self, *args):
"""
modifies_pair_mop(self) -> bool
"""
return _ida_hexrays.minsn_t_modifies_pair_mop(self, *args)
def is_between(self, *args):
"""
is_between(self, m1, m2) -> bool
"""
return _ida_hexrays.minsn_t_is_between(self, *args)
def is_after(self, *args):
"""
is_after(self, m) -> bool
"""
return _ida_hexrays.minsn_t_is_after(self, *args)
def may_use_aliased_memory(self, *args):
"""
may_use_aliased_memory(self) -> bool
"""
return _ida_hexrays.minsn_t_may_use_aliased_memory(self, *args)
def _register(self, *args):
"""
_register(self)
"""
return _ida_hexrays.minsn_t__register(self, *args)
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.minsn_t__deregister(self, *args)
def __dbg_get_meminfo(self, *args):
"""
__dbg_get_meminfo(self) -> qstring
"""
return _ida_hexrays.minsn_t___dbg_get_meminfo(self, *args)
def __dbg_get_registered_kind(self, *args):
"""
__dbg_get_registered_kind(self) -> int
"""
return _ida_hexrays.minsn_t___dbg_get_registered_kind(self, *args)
def _obj_id(self, *args):
"""
_obj_id(self) -> PyObject *
"""
return _ida_hexrays.minsn_t__obj_id(self, *args)
obj_id = property(_obj_id)
def _ensure_cond(self, ok, cond_str):
if not ok:
raise Exception("Condition \"%s\" not verified" % cond_str)
return True
def _ensure_no_obj(self, o, attr, attr_is_acquired):
if attr_is_acquired and o is not None:
raise Exception("%s already owns attribute \"%s\" (%s); cannot be modified" % (self, attr, o))
return True
def _acquire_ownership(self, v, acquire):
if acquire and (v is not None) and not isinstance(v, (int, long)):
if not v.thisown:
raise Exception("%s is already owned, and cannot be reused" % v)
v.thisown = False
dereg = getattr(v, "_deregister", None)
if dereg:
dereg()
return True
def _maybe_disown_and_deregister(self):
if self.thisown:
self.thisown = False
self._deregister()
def _own_and_register(self):
assert(not self.thisown)
self.thisown = True
self._register()
def replace_by(self, o):
assert(isinstance(o, (cexpr_t, cinsn_t)))
o._maybe_disown_and_deregister()
self._replace_by(o)
def _meminfo(self):
cpp = self.__dbg_get_meminfo()
rkind = self.__dbg_get_registered_kind()
rkind_str = [
"(not owned)",
"cfuncptr_t",
"cinsn_t",
"cexpr_t",
"cblock_t",
"mbl_array_t",
"mop_t",
"minsn_t",
"optinsn_t",
"optblock_t",
"valrng_t"][rkind]
return "%s [thisown=%s, owned by IDAPython as=%s]" % (
cpp,
self.thisown,
rkind_str)
meminfo = property(_meminfo)
__swig_destroy__ = _ida_hexrays.delete_minsn_t
__del__ = lambda self : None;
minsn_t_swigregister = _ida_hexrays.minsn_t_swigregister
minsn_t_swigregister(minsn_t)
IPROP_OPTIONAL = _ida_hexrays.IPROP_OPTIONAL
"""
optional instruction
"""
IPROP_PERSIST = _ida_hexrays.IPROP_PERSIST
"""
persistent insn; they are not destroyed
"""
IPROP_WILDMATCH = _ida_hexrays.IPROP_WILDMATCH
"""
match multiple insns
"""
IPROP_CLNPOP = _ida_hexrays.IPROP_CLNPOP
"""
(e.g. "pop ecx" is often used for that)
the purpose of the instruction is to clean stack
"""
IPROP_FPINSN = _ida_hexrays.IPROP_FPINSN
"""
floating point insn
"""
IPROP_FARCALL = _ida_hexrays.IPROP_FARCALL
"""
call of a far function using push cs/call sequence
"""
IPROP_TAILCALL = _ida_hexrays.IPROP_TAILCALL
"""
tail call
"""
IPROP_ASSERT = _ida_hexrays.IPROP_ASSERT
"""
assertion: usually mov #val, op. assertions are used to help the
optimizer. assertions are ignored when generating ctree
"""
IPROP_SPLIT = _ida_hexrays.IPROP_SPLIT
"""
the instruction has been split:
"""
IPROP_SPLIT1 = _ida_hexrays.IPROP_SPLIT1
"""
into 1 byte
"""
IPROP_SPLIT2 = _ida_hexrays.IPROP_SPLIT2
"""
into 2 bytes
"""
IPROP_SPLIT4 = _ida_hexrays.IPROP_SPLIT4
"""
into 4 bytes
"""
IPROP_SPLIT8 = _ida_hexrays.IPROP_SPLIT8
"""
into 8 bytes
"""
IPROP_COMBINED = _ida_hexrays.IPROP_COMBINED
"""
insn has been modified because of a partial reference
"""
IPROP_EXTSTX = _ida_hexrays.IPROP_EXTSTX
"""
this is m_ext propagated into m_stx
"""
IPROP_IGNLOWSRC = _ida_hexrays.IPROP_IGNLOWSRC
"""
low part of the instruction source operand has been created
artificially (this bit is used only for 'and x, 80...')
"""
IPROP_INV_JX = _ida_hexrays.IPROP_INV_JX
"""
inverted conditional jump
"""
IPROP_WAS_NORET = _ida_hexrays.IPROP_WAS_NORET
"""
was noret icall
"""
IPROP_MULTI_MOV = _ida_hexrays.IPROP_MULTI_MOV
"""
(example: STM on ARM may transfer multiple registers)
the minsn was generated as part of insn that moves multiple
registersbits that can be set by plugins:
"""
IPROP_DONT_PROP = _ida_hexrays.IPROP_DONT_PROP
"""
may not propagate
"""
IPROP_DONT_COMB = _ida_hexrays.IPROP_DONT_COMB
"""
may not combine this instruction with others
"""
OPTI_ADDREXPRS = _ida_hexrays.OPTI_ADDREXPRS
"""
optimize all address expressions (&x+N; &x-&y)
"""
OPTI_MINSTKREF = _ida_hexrays.OPTI_MINSTKREF
"""
may update minstkref
"""
OPTI_COMBINSNS = _ida_hexrays.OPTI_COMBINSNS
"""
may combine insns (only for optimize_insn)
"""
OPTI_NO_LDXOPT = _ida_hexrays.OPTI_NO_LDXOPT
"""
do not optimize low/high(ldx)
"""
EQ_IGNSIZE = _ida_hexrays.EQ_IGNSIZE
"""
ignore operand sizes
"""
EQ_IGNCODE = _ida_hexrays.EQ_IGNCODE
"""
ignore instruction opcodes
"""
EQ_CMPDEST = _ida_hexrays.EQ_CMPDEST
"""
compare instruction destinations
"""
EQ_OPTINSN = _ida_hexrays.EQ_OPTINSN
"""
optimize mop_d operands
"""
def getf_reginsn(*args):
"""
getf_reginsn(ins) -> minsn_t
Skip assertions forward.
@param ins (C++: const minsn_t *)
"""
return _ida_hexrays.getf_reginsn(*args)
def getb_reginsn(*args):
"""
getb_reginsn(ins) -> minsn_t
Skip assertions backward.
@param ins (C++: const minsn_t *)
"""
return _ida_hexrays.getb_reginsn(*args)
BLT_NONE = _ida_hexrays.BLT_NONE
BLT_STOP = _ida_hexrays.BLT_STOP
BLT_0WAY = _ida_hexrays.BLT_0WAY
BLT_1WAY = _ida_hexrays.BLT_1WAY
BLT_2WAY = _ida_hexrays.BLT_2WAY
BLT_NWAY = _ida_hexrays.BLT_NWAY
BLT_XTRN = _ida_hexrays.BLT_XTRN
class mblock_t(object):
"""
Proxy of C++ mblock_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
nextb = _swig_property(_ida_hexrays.mblock_t_nextb_get, _ida_hexrays.mblock_t_nextb_set)
prevb = _swig_property(_ida_hexrays.mblock_t_prevb_get, _ida_hexrays.mblock_t_prevb_set)
flags = _swig_property(_ida_hexrays.mblock_t_flags_get, _ida_hexrays.mblock_t_flags_set)
start = _swig_property(_ida_hexrays.mblock_t_start_get, _ida_hexrays.mblock_t_start_set)
end = _swig_property(_ida_hexrays.mblock_t_end_get, _ida_hexrays.mblock_t_end_set)
head = _swig_property(_ida_hexrays.mblock_t_head_get, _ida_hexrays.mblock_t_head_set)
tail = _swig_property(_ida_hexrays.mblock_t_tail_get, _ida_hexrays.mblock_t_tail_set)
mba = _swig_property(_ida_hexrays.mblock_t_mba_get, _ida_hexrays.mblock_t_mba_set)
serial = _swig_property(_ida_hexrays.mblock_t_serial_get, _ida_hexrays.mblock_t_serial_set)
type = _swig_property(_ida_hexrays.mblock_t_type_get, _ida_hexrays.mblock_t_type_set)
dead_at_start = _swig_property(_ida_hexrays.mblock_t_dead_at_start_get, _ida_hexrays.mblock_t_dead_at_start_set)
mustbuse = _swig_property(_ida_hexrays.mblock_t_mustbuse_get, _ida_hexrays.mblock_t_mustbuse_set)
maybuse = _swig_property(_ida_hexrays.mblock_t_maybuse_get, _ida_hexrays.mblock_t_maybuse_set)
mustbdef = _swig_property(_ida_hexrays.mblock_t_mustbdef_get, _ida_hexrays.mblock_t_mustbdef_set)
maybdef = _swig_property(_ida_hexrays.mblock_t_maybdef_get, _ida_hexrays.mblock_t_maybdef_set)
dnu = _swig_property(_ida_hexrays.mblock_t_dnu_get, _ida_hexrays.mblock_t_dnu_set)
maxbsp = _swig_property(_ida_hexrays.mblock_t_maxbsp_get, _ida_hexrays.mblock_t_maxbsp_set)
minbstkref = _swig_property(_ida_hexrays.mblock_t_minbstkref_get, _ida_hexrays.mblock_t_minbstkref_set)
minbargref = _swig_property(_ida_hexrays.mblock_t_minbargref_get, _ida_hexrays.mblock_t_minbargref_set)
predset = _swig_property(_ida_hexrays.mblock_t_predset_get, _ida_hexrays.mblock_t_predset_set)
succset = _swig_property(_ida_hexrays.mblock_t_succset_get, _ida_hexrays.mblock_t_succset_set)
def mark_lists_dirty(self, *args):
"""
mark_lists_dirty(self)
"""
return _ida_hexrays.mblock_t_mark_lists_dirty(self, *args)
def request_propagation(self, *args):
"""
request_propagation(self)
"""
return _ida_hexrays.mblock_t_request_propagation(self, *args)
def needs_propagation(self, *args):
"""
needs_propagation(self) -> bool
"""
return _ida_hexrays.mblock_t_needs_propagation(self, *args)
def request_demote64(self, *args):
"""
request_demote64(self)
"""
return _ida_hexrays.mblock_t_request_demote64(self, *args)
def lists_dirty(self, *args):
"""
lists_dirty(self) -> bool
"""
return _ida_hexrays.mblock_t_lists_dirty(self, *args)
def lists_ready(self, *args):
"""
lists_ready(self) -> bool
"""
return _ida_hexrays.mblock_t_lists_ready(self, *args)
def make_lists_ready(self, *args):
"""
make_lists_ready(self) -> int
"""
return _ida_hexrays.mblock_t_make_lists_ready(self, *args)
def npred(self, *args):
"""
npred(self) -> int
"""
return _ida_hexrays.mblock_t_npred(self, *args)
def nsucc(self, *args):
"""
nsucc(self) -> int
"""
return _ida_hexrays.mblock_t_nsucc(self, *args)
def pred(self, *args):
"""
pred(self, n) -> int
"""
return _ida_hexrays.mblock_t_pred(self, *args)
def succ(self, *args):
"""
succ(self, n) -> int
"""
return _ida_hexrays.mblock_t_succ(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mblock_t
__del__ = lambda self : None;
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.mblock_t_empty(self, *args)
def _print(self, *args):
"""
_print(self, vp)
"""
return _ida_hexrays.mblock_t__print(self, *args)
def dump(self, *args):
"""
dump(self)
"""
return _ida_hexrays.mblock_t_dump(self, *args)
def dump_block(self, *args):
"""
dump_block(self, title)
"""
return _ida_hexrays.mblock_t_dump_block(self, *args)
def insert_into_block(self, *args):
"""
insert_into_block(self, nm, om) -> minsn_t
"""
val = _ida_hexrays.mblock_t_insert_into_block(self, *args)
mn = args[0]
mn._maybe_disown_and_deregister()
return val
def remove_from_block(self, *args):
"""
remove_from_block(self, m) -> minsn_t
"""
mn = args[0]
val = _ida_hexrays.mblock_t_remove_from_block(self, *args)
if mn:
mn._own_and_register()
return val
def for_all_insns(self, *args):
"""
for_all_insns(self, mv) -> int
"""
return _ida_hexrays.mblock_t_for_all_insns(self, *args)
def for_all_ops(self, *args):
"""
for_all_ops(self, mv) -> int
"""
return _ida_hexrays.mblock_t_for_all_ops(self, *args)
def for_all_uses(self, *args):
"""
for_all_uses(self, list, i1, i2, mmv) -> int
"""
return _ida_hexrays.mblock_t_for_all_uses(self, *args)
def optimize_insn(self, *args):
"""
optimize_insn(self, m, optflags=0x0002|0x0004) -> int
"""
return _ida_hexrays.mblock_t_optimize_insn(self, *args)
def optimize_block(self, *args):
"""
optimize_block(self) -> int
"""
return _ida_hexrays.mblock_t_optimize_block(self, *args)
def build_lists(self, *args):
"""
build_lists(self, kill_deads) -> int
"""
return _ida_hexrays.mblock_t_build_lists(self, *args)
def optimize_useless_jump(self, *args):
"""
optimize_useless_jump(self) -> int
"""
return _ida_hexrays.mblock_t_optimize_useless_jump(self, *args)
def append_use_list(self, *args):
"""
append_use_list(self, list, op, maymust, mask=bitrange_t(0, USHRT_MAX))
"""
return _ida_hexrays.mblock_t_append_use_list(self, *args)
def append_def_list(self, *args):
"""
append_def_list(self, list, op, maymust)
"""
return _ida_hexrays.mblock_t_append_def_list(self, *args)
def build_use_list(self, *args):
"""
build_use_list(self, ins, maymust) -> mlist_t
"""
return _ida_hexrays.mblock_t_build_use_list(self, *args)
def build_def_list(self, *args):
"""
build_def_list(self, ins, maymust) -> mlist_t
"""
return _ida_hexrays.mblock_t_build_def_list(self, *args)
def is_used(self, *args):
"""
is_used(self, list, i1, i2, maymust=MAY_ACCESS) -> bool
"""
return _ida_hexrays.mblock_t_is_used(self, *args)
def find_first_use(self, *args):
"""
find_first_use(self, list, i1, i2, maymust=MAY_ACCESS) -> minsn_t
"""
return _ida_hexrays.mblock_t_find_first_use(self, *args)
def is_redefined(self, *args):
"""
is_redefined(self, list, i1, i2, maymust=MAY_ACCESS) -> bool
"""
return _ida_hexrays.mblock_t_is_redefined(self, *args)
def find_redefinition(self, *args):
"""
find_redefinition(self, list, i1, i2, maymust=MAY_ACCESS) -> minsn_t
"""
return _ida_hexrays.mblock_t_find_redefinition(self, *args)
def is_rhs_redefined(self, *args):
"""
is_rhs_redefined(self, ins, i1, i2) -> bool
"""
return _ida_hexrays.mblock_t_is_rhs_redefined(self, *args)
def find_access(self, *args):
"""
find_access(self, op, parent, mend, fdflags) -> minsn_t
"""
return _ida_hexrays.mblock_t_find_access(self, *args)
def find_def(self, *args):
"""
find_def(self, op, p_i1, i2, fdflags) -> minsn_t
"""
return _ida_hexrays.mblock_t_find_def(self, *args)
def find_use(self, *args):
"""
find_use(self, op, p_i1, i2, fdflags) -> minsn_t
"""
return _ida_hexrays.mblock_t_find_use(self, *args)
def get_valranges(self, *args):
"""
get_valranges(self, res, vivl, vrflags) -> bool
get_valranges(self, res, vivl, m, vrflags) -> bool
"""
return _ida_hexrays.mblock_t_get_valranges(self, *args)
def make_nop(self, *args):
"""
make_nop(self, m)
"""
return _ida_hexrays.mblock_t_make_nop(self, *args)
def get_reginsn_qty(self, *args):
"""
get_reginsn_qty(self) -> size_t
"""
return _ida_hexrays.mblock_t_get_reginsn_qty(self, *args)
def is_call_block(self, *args):
"""
is_call_block(self) -> bool
"""
return _ida_hexrays.mblock_t_is_call_block(self, *args)
def is_unknown_call(self, *args):
"""
is_unknown_call(self) -> bool
"""
return _ida_hexrays.mblock_t_is_unknown_call(self, *args)
def is_nway(self, *args):
"""
is_nway(self) -> bool
"""
return _ida_hexrays.mblock_t_is_nway(self, *args)
def is_branch(self, *args):
"""
is_branch(self) -> bool
"""
return _ida_hexrays.mblock_t_is_branch(self, *args)
def is_simple_goto_block(self, *args):
"""
is_simple_goto_block(self) -> bool
"""
return _ida_hexrays.mblock_t_is_simple_goto_block(self, *args)
def is_simple_jcnd_block(self, *args):
"""
is_simple_jcnd_block(self) -> bool
"""
return _ida_hexrays.mblock_t_is_simple_jcnd_block(self, *args)
def preds(self):
"""
Iterates the list of predecessor blocks
"""
for ser in self.predset:
yield self.mba.get_mblock(ser)
def succs(self):
"""
Iterates the list of successor blocks
"""
for ser in self.succset:
yield self.mba.get_mblock(ser)
mblock_t_swigregister = _ida_hexrays.mblock_t_swigregister
mblock_t_swigregister(mblock_t)
MBL_PRIV = _ida_hexrays.MBL_PRIV
"""
the specified are accepted (used in patterns)
private block - no instructions except
"""
MBL_NONFAKE = _ida_hexrays.MBL_NONFAKE
"""
regular block
"""
MBL_FAKE = _ida_hexrays.MBL_FAKE
"""
fake block (after a tail call)
"""
MBL_GOTO = _ida_hexrays.MBL_GOTO
"""
this block is a goto target
"""
MBL_TCAL = _ida_hexrays.MBL_TCAL
"""
aritifical call block for tail calls
"""
MBL_PUSH = _ida_hexrays.MBL_PUSH
"""
needs "convert push/pop instructions"
"""
MBL_DMT64 = _ida_hexrays.MBL_DMT64
"""
needs "demote 64bits"
"""
MBL_COMB = _ida_hexrays.MBL_COMB
"""
needs "combine" pass
"""
MBL_PROP = _ida_hexrays.MBL_PROP
"""
needs 'propagation' pass
"""
MBL_DEAD = _ida_hexrays.MBL_DEAD
"""
needs "eliminate deads" pass
"""
MBL_LIST = _ida_hexrays.MBL_LIST
"""
use/def lists are ready (not dirty)
"""
MBL_INCONST = _ida_hexrays.MBL_INCONST
"""
inconsistent lists: we are building them
"""
MBL_CALL = _ida_hexrays.MBL_CALL
"""
call information has been built
"""
MBL_BACKPROP = _ida_hexrays.MBL_BACKPROP
"""
performed backprop_cc
"""
MBL_NORET = _ida_hexrays.MBL_NORET
"""
dead end block: doesn't return execution control
"""
MBL_DSLOT = _ida_hexrays.MBL_DSLOT
"""
block for delay slot
"""
MBL_VALRANGES = _ida_hexrays.MBL_VALRANGES
"""
should optimize using value ranges
"""
FD_BACKWARD = _ida_hexrays.FD_BACKWARD
"""
search direction
"""
FD_FORWARD = _ida_hexrays.FD_FORWARD
"""
search direction
"""
FD_USE = _ida_hexrays.FD_USE
"""
look for use
"""
FD_DEF = _ida_hexrays.FD_DEF
"""
look for definition
"""
FD_DIRTY = _ida_hexrays.FD_DIRTY
"""
by function calls and indirect memory access
ignore possible implicit definitions
"""
VR_AT_START = _ida_hexrays.VR_AT_START
"""
at the block start (if M is NULL)
get value ranges before the instruction or
"""
VR_AT_END = _ida_hexrays.VR_AT_END
"""
get value ranges after the instruction or at the block end, just after
the last instruction (if M is NULL)
"""
VR_EXACT = _ida_hexrays.VR_EXACT
"""
valrng size will be >= vivl.size
find exact match. if not set, the returned
"""
WARN_VARARG_REGS = _ida_hexrays.WARN_VARARG_REGS
WARN_ILL_PURGED = _ida_hexrays.WARN_ILL_PURGED
WARN_ILL_FUNCTYPE = _ida_hexrays.WARN_ILL_FUNCTYPE
WARN_VARARG_TCAL = _ida_hexrays.WARN_VARARG_TCAL
WARN_VARARG_NOSTK = _ida_hexrays.WARN_VARARG_NOSTK
WARN_VARARG_MANY = _ida_hexrays.WARN_VARARG_MANY
WARN_ADDR_OUTARGS = _ida_hexrays.WARN_ADDR_OUTARGS
WARN_DEP_UNK_CALLS = _ida_hexrays.WARN_DEP_UNK_CALLS
WARN_ILL_ELLIPSIS = _ida_hexrays.WARN_ILL_ELLIPSIS
WARN_GUESSED_TYPE = _ida_hexrays.WARN_GUESSED_TYPE
WARN_EXP_LINVAR = _ida_hexrays.WARN_EXP_LINVAR
WARN_WIDEN_CHAINS = _ida_hexrays.WARN_WIDEN_CHAINS
WARN_BAD_PURGED = _ida_hexrays.WARN_BAD_PURGED
WARN_CBUILD_LOOPS = _ida_hexrays.WARN_CBUILD_LOOPS
WARN_NO_SAVE_REST = _ida_hexrays.WARN_NO_SAVE_REST
WARN_ODD_INPUT_REG = _ida_hexrays.WARN_ODD_INPUT_REG
WARN_ODD_ADDR_USE = _ida_hexrays.WARN_ODD_ADDR_USE
WARN_MUST_RET_FP = _ida_hexrays.WARN_MUST_RET_FP
WARN_ILL_FPU_STACK = _ida_hexrays.WARN_ILL_FPU_STACK
WARN_SELFREF_PROP = _ida_hexrays.WARN_SELFREF_PROP
WARN_WOULD_OVERLAP = _ida_hexrays.WARN_WOULD_OVERLAP
WARN_ARRAY_INARG = _ida_hexrays.WARN_ARRAY_INARG
WARN_MAX_ARGS = _ida_hexrays.WARN_MAX_ARGS
WARN_BAD_FIELD_TYPE = _ida_hexrays.WARN_BAD_FIELD_TYPE
WARN_WRITE_CONST = _ida_hexrays.WARN_WRITE_CONST
WARN_BAD_RETVAR = _ida_hexrays.WARN_BAD_RETVAR
WARN_FRAG_LVAR = _ida_hexrays.WARN_FRAG_LVAR
WARN_HUGE_STKOFF = _ida_hexrays.WARN_HUGE_STKOFF
WARN_UNINITED_REG = _ida_hexrays.WARN_UNINITED_REG
WARN_FIXED_MACRO = _ida_hexrays.WARN_FIXED_MACRO
WARN_WRONG_VA_OFF = _ida_hexrays.WARN_WRONG_VA_OFF
WARN_CR_NOFIELD = _ida_hexrays.WARN_CR_NOFIELD
WARN_CR_BADOFF = _ida_hexrays.WARN_CR_BADOFF
WARN_BAD_STROFF = _ida_hexrays.WARN_BAD_STROFF
WARN_BAD_VARSIZE = _ida_hexrays.WARN_BAD_VARSIZE
WARN_UNSUPP_REG = _ida_hexrays.WARN_UNSUPP_REG
WARN_UNALIGNED_ARG = _ida_hexrays.WARN_UNALIGNED_ARG
WARN_BAD_STD_TYPE = _ida_hexrays.WARN_BAD_STD_TYPE
WARN_BAD_CALL_SP = _ida_hexrays.WARN_BAD_CALL_SP
WARN_MISSED_SWITCH = _ida_hexrays.WARN_MISSED_SWITCH
WARN_BAD_SP = _ida_hexrays.WARN_BAD_SP
WARN_BAD_STKPNT = _ida_hexrays.WARN_BAD_STKPNT
WARN_UNDEF_LVAR = _ida_hexrays.WARN_UNDEF_LVAR
WARN_JUMPOUT = _ida_hexrays.WARN_JUMPOUT
WARN_BAD_VALRNG = _ida_hexrays.WARN_BAD_VALRNG
WARN_BAD_SHADOW = _ida_hexrays.WARN_BAD_SHADOW
WARN_MAX = _ida_hexrays.WARN_MAX
class hexwarn_t(object):
"""
Proxy of C++ hexwarn_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.hexwarn_t_ea_get, _ida_hexrays.hexwarn_t_ea_set)
id = _swig_property(_ida_hexrays.hexwarn_t_id_get, _ida_hexrays.hexwarn_t_id_set)
text = _swig_property(_ida_hexrays.hexwarn_t_text_get, _ida_hexrays.hexwarn_t_text_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.hexwarn_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.hexwarn_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.hexwarn_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.hexwarn_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.hexwarn_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.hexwarn_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.hexwarn_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> hexwarn_t
"""
this = _ida_hexrays.new_hexwarn_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_hexwarn_t
__del__ = lambda self : None;
hexwarn_t_swigregister = _ida_hexrays.hexwarn_t_swigregister
hexwarn_t_swigregister(hexwarn_t)
MMAT_ZERO = _ida_hexrays.MMAT_ZERO
MMAT_GENERATED = _ida_hexrays.MMAT_GENERATED
MMAT_PREOPTIMIZED = _ida_hexrays.MMAT_PREOPTIMIZED
MMAT_LOCOPT = _ida_hexrays.MMAT_LOCOPT
MMAT_CALLS = _ida_hexrays.MMAT_CALLS
MMAT_GLBOPT1 = _ida_hexrays.MMAT_GLBOPT1
MMAT_GLBOPT2 = _ida_hexrays.MMAT_GLBOPT2
MMAT_GLBOPT3 = _ida_hexrays.MMAT_GLBOPT3
MMAT_LVARS = _ida_hexrays.MMAT_LVARS
MMIDX_GLBLOW = _ida_hexrays.MMIDX_GLBLOW
MMIDX_LVARS = _ida_hexrays.MMIDX_LVARS
MMIDX_RETADDR = _ida_hexrays.MMIDX_RETADDR
MMIDX_SHADOW = _ida_hexrays.MMIDX_SHADOW
MMIDX_ARGS = _ida_hexrays.MMIDX_ARGS
MMIDX_GLBHIGH = _ida_hexrays.MMIDX_GLBHIGH
class mba_ranges_t(object):
"""
Proxy of C++ mba_ranges_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
pfn = _swig_property(_ida_hexrays.mba_ranges_t_pfn_get, _ida_hexrays.mba_ranges_t_pfn_set)
ranges = _swig_property(_ida_hexrays.mba_ranges_t_ranges_get, _ida_hexrays.mba_ranges_t_ranges_set)
def __init__(self, *args):
"""
__init__(self, _pfn=None) -> mba_ranges_t
__init__(self, r) -> mba_ranges_t
"""
this = _ida_hexrays.new_mba_ranges_t(*args)
try: self.this.append(this)
except: self.this = this
def start(self, *args):
"""
start(self) -> ea_t
"""
return _ida_hexrays.mba_ranges_t_start(self, *args)
def empty(self, *args):
"""
empty(self) -> bool
"""
return _ida_hexrays.mba_ranges_t_empty(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.mba_ranges_t_clear(self, *args)
def is_snippet(self, *args):
"""
is_snippet(self) -> bool
"""
return _ida_hexrays.mba_ranges_t_is_snippet(self, *args)
def is_fragmented(self, *args):
"""
is_fragmented(self) -> bool
"""
return _ida_hexrays.mba_ranges_t_is_fragmented(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mba_ranges_t
__del__ = lambda self : None;
mba_ranges_t_swigregister = _ida_hexrays.mba_ranges_t_swigregister
mba_ranges_t_swigregister(mba_ranges_t)
class mba_range_iterator_t(object):
"""
Proxy of C++ mba_range_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
rii = _swig_property(_ida_hexrays.mba_range_iterator_t_rii_get, _ida_hexrays.mba_range_iterator_t_rii_set)
fii = _swig_property(_ida_hexrays.mba_range_iterator_t_fii_get, _ida_hexrays.mba_range_iterator_t_fii_set)
def is_snippet(self, *args):
"""
is_snippet(self) -> bool
"""
return _ida_hexrays.mba_range_iterator_t_is_snippet(self, *args)
def set(self, *args):
"""
set(self, mbr) -> bool
"""
return _ida_hexrays.mba_range_iterator_t_set(self, *args)
def next(self, *args):
"""
next(self) -> bool
"""
return _ida_hexrays.mba_range_iterator_t_next(self, *args)
def chunk(self, *args):
"""
chunk(self) -> range_t
"""
return _ida_hexrays.mba_range_iterator_t_chunk(self, *args)
def __init__(self, *args):
"""
__init__(self) -> mba_range_iterator_t
"""
this = _ida_hexrays.new_mba_range_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_mba_range_iterator_t
__del__ = lambda self : None;
mba_range_iterator_t_swigregister = _ida_hexrays.mba_range_iterator_t_swigregister
mba_range_iterator_t_swigregister(mba_range_iterator_t)
class mbl_array_t(object):
"""
Proxy of C++ mbl_array_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def precise_defeas(self, *args):
"""
precise_defeas(self) -> bool
"""
return _ida_hexrays.mbl_array_t_precise_defeas(self, *args)
def optimized(self, *args):
"""
optimized(self) -> bool
"""
return _ida_hexrays.mbl_array_t_optimized(self, *args)
def short_display(self, *args):
"""
short_display(self) -> bool
"""
return _ida_hexrays.mbl_array_t_short_display(self, *args)
def show_reduction(self, *args):
"""
show_reduction(self) -> bool
"""
return _ida_hexrays.mbl_array_t_show_reduction(self, *args)
def graph_insns(self, *args):
"""
graph_insns(self) -> bool
"""
return _ida_hexrays.mbl_array_t_graph_insns(self, *args)
def loaded_gdl(self, *args):
"""
loaded_gdl(self) -> bool
"""
return _ida_hexrays.mbl_array_t_loaded_gdl(self, *args)
def should_beautify(self, *args):
"""
should_beautify(self) -> bool
"""
return _ida_hexrays.mbl_array_t_should_beautify(self, *args)
def rtype_refined(self, *args):
"""
rtype_refined(self) -> bool
"""
return _ida_hexrays.mbl_array_t_rtype_refined(self, *args)
def may_refine_rettype(self, *args):
"""
may_refine_rettype(self) -> bool
"""
return _ida_hexrays.mbl_array_t_may_refine_rettype(self, *args)
def use_wingraph32(self, *args):
"""
use_wingraph32(self) -> bool
"""
return _ida_hexrays.mbl_array_t_use_wingraph32(self, *args)
def display_numaddrs(self, *args):
"""
display_numaddrs(self) -> bool
"""
return _ida_hexrays.mbl_array_t_display_numaddrs(self, *args)
def display_valnums(self, *args):
"""
display_valnums(self) -> bool
"""
return _ida_hexrays.mbl_array_t_display_valnums(self, *args)
def is_pattern(self, *args):
"""
is_pattern(self) -> bool
"""
return _ida_hexrays.mbl_array_t_is_pattern(self, *args)
def is_thunk(self, *args):
"""
is_thunk(self) -> bool
"""
return _ida_hexrays.mbl_array_t_is_thunk(self, *args)
def saverest_done(self, *args):
"""
saverest_done(self) -> bool
"""
return _ida_hexrays.mbl_array_t_saverest_done(self, *args)
def callinfo_built(self, *args):
"""
callinfo_built(self) -> bool
"""
return _ida_hexrays.mbl_array_t_callinfo_built(self, *args)
def has_overvars(self, *args):
"""
has_overvars(self) -> bool
"""
return _ida_hexrays.mbl_array_t_has_overvars(self, *args)
def really_alloc(self, *args):
"""
really_alloc(self) -> bool
"""
return _ida_hexrays.mbl_array_t_really_alloc(self, *args)
def lvars_allocated(self, *args):
"""
lvars_allocated(self) -> bool
"""
return _ida_hexrays.mbl_array_t_lvars_allocated(self, *args)
def chain_varnums_ok(self, *args):
"""
chain_varnums_ok(self) -> bool
"""
return _ida_hexrays.mbl_array_t_chain_varnums_ok(self, *args)
def returns_fpval(self, *args):
"""
returns_fpval(self) -> bool
"""
return _ida_hexrays.mbl_array_t_returns_fpval(self, *args)
def has_passregs(self, *args):
"""
has_passregs(self) -> bool
"""
return _ida_hexrays.mbl_array_t_has_passregs(self, *args)
def generated_asserts(self, *args):
"""
generated_asserts(self) -> bool
"""
return _ida_hexrays.mbl_array_t_generated_asserts(self, *args)
def propagated_asserts(self, *args):
"""
propagated_asserts(self) -> bool
"""
return _ida_hexrays.mbl_array_t_propagated_asserts(self, *args)
def deleted_pairs(self, *args):
"""
deleted_pairs(self) -> bool
"""
return _ida_hexrays.mbl_array_t_deleted_pairs(self, *args)
def common_stkvars_stkargs(self, *args):
"""
common_stkvars_stkargs(self) -> bool
"""
return _ida_hexrays.mbl_array_t_common_stkvars_stkargs(self, *args)
def lvar_names_ok(self, *args):
"""
lvar_names_ok(self) -> bool
"""
return _ida_hexrays.mbl_array_t_lvar_names_ok(self, *args)
def lvars_renamed(self, *args):
"""
lvars_renamed(self) -> bool
"""
return _ida_hexrays.mbl_array_t_lvars_renamed(self, *args)
def has_over_chains(self, *args):
"""
has_over_chains(self) -> bool
"""
return _ida_hexrays.mbl_array_t_has_over_chains(self, *args)
def valranges_done(self, *args):
"""
valranges_done(self) -> bool
"""
return _ida_hexrays.mbl_array_t_valranges_done(self, *args)
def argidx_ok(self, *args):
"""
argidx_ok(self) -> bool
"""
return _ida_hexrays.mbl_array_t_argidx_ok(self, *args)
def is_ctr(self, *args):
"""
is_ctr(self) -> bool
"""
return _ida_hexrays.mbl_array_t_is_ctr(self, *args)
def is_dtr(self, *args):
"""
is_dtr(self) -> bool
"""
return _ida_hexrays.mbl_array_t_is_dtr(self, *args)
def is_cdtr(self, *args):
"""
is_cdtr(self) -> bool
"""
return _ida_hexrays.mbl_array_t_is_cdtr(self, *args)
def get_mba_flags(self, *args):
"""
get_mba_flags(self) -> int
"""
return _ida_hexrays.mbl_array_t_get_mba_flags(self, *args)
def get_mba_flags2(self, *args):
"""
get_mba_flags2(self) -> int
"""
return _ida_hexrays.mbl_array_t_get_mba_flags2(self, *args)
def set_mba_flags(self, *args):
"""
set_mba_flags(self, f)
"""
return _ida_hexrays.mbl_array_t_set_mba_flags(self, *args)
def clr_mba_flags(self, *args):
"""
clr_mba_flags(self, f)
"""
return _ida_hexrays.mbl_array_t_clr_mba_flags(self, *args)
def set_mba_flags2(self, *args):
"""
set_mba_flags2(self, f)
"""
return _ida_hexrays.mbl_array_t_set_mba_flags2(self, *args)
def clr_mba_flags2(self, *args):
"""
clr_mba_flags2(self, f)
"""
return _ida_hexrays.mbl_array_t_clr_mba_flags2(self, *args)
def clr_cdtr(self, *args):
"""
clr_cdtr(self)
"""
return _ida_hexrays.mbl_array_t_clr_cdtr(self, *args)
def calc_shins_flags(self, *args):
"""
calc_shins_flags(self) -> int
"""
return _ida_hexrays.mbl_array_t_calc_shins_flags(self, *args)
def stkoff_vd2ida(self, *args):
"""
stkoff_vd2ida(self, off) -> sval_t
"""
return _ida_hexrays.mbl_array_t_stkoff_vd2ida(self, *args)
def stkoff_ida2vd(self, *args):
"""
stkoff_ida2vd(self, off) -> sval_t
"""
return _ida_hexrays.mbl_array_t_stkoff_ida2vd(self, *args)
def argbase(self, *args):
"""
argbase(self) -> sval_t
"""
return _ida_hexrays.mbl_array_t_argbase(self, *args)
def idaloc2vd(self, *args):
"""
idaloc2vd(self, loc, width) -> vdloc_t
"""
return _ida_hexrays.mbl_array_t_idaloc2vd(self, *args)
def vd2idaloc(self, *args):
"""
vd2idaloc(self, loc, width, spd) -> argloc_t
vd2idaloc(self, loc, width) -> argloc_t
"""
return _ida_hexrays.mbl_array_t_vd2idaloc(self, *args)
def is_stkarg(self, *args):
"""
is_stkarg(self, v) -> bool
"""
return _ida_hexrays.mbl_array_t_is_stkarg(self, *args)
def get_ida_argloc(self, *args):
"""
get_ida_argloc(self, v) -> argloc_t
"""
return _ida_hexrays.mbl_array_t_get_ida_argloc(self, *args)
mbr = _swig_property(_ida_hexrays.mbl_array_t_mbr_get, _ida_hexrays.mbl_array_t_mbr_set)
entry_ea = _swig_property(_ida_hexrays.mbl_array_t_entry_ea_get, _ida_hexrays.mbl_array_t_entry_ea_set)
last_prolog_ea = _swig_property(_ida_hexrays.mbl_array_t_last_prolog_ea_get, _ida_hexrays.mbl_array_t_last_prolog_ea_set)
first_epilog_ea = _swig_property(_ida_hexrays.mbl_array_t_first_epilog_ea_get, _ida_hexrays.mbl_array_t_first_epilog_ea_set)
qty = _swig_property(_ida_hexrays.mbl_array_t_qty_get, _ida_hexrays.mbl_array_t_qty_set)
npurged = _swig_property(_ida_hexrays.mbl_array_t_npurged_get, _ida_hexrays.mbl_array_t_npurged_set)
cc = _swig_property(_ida_hexrays.mbl_array_t_cc_get, _ida_hexrays.mbl_array_t_cc_set)
tmpstk_size = _swig_property(_ida_hexrays.mbl_array_t_tmpstk_size_get, _ida_hexrays.mbl_array_t_tmpstk_size_set)
frsize = _swig_property(_ida_hexrays.mbl_array_t_frsize_get, _ida_hexrays.mbl_array_t_frsize_set)
frregs = _swig_property(_ida_hexrays.mbl_array_t_frregs_get, _ida_hexrays.mbl_array_t_frregs_set)
fpd = _swig_property(_ida_hexrays.mbl_array_t_fpd_get, _ida_hexrays.mbl_array_t_fpd_set)
pfn_flags = _swig_property(_ida_hexrays.mbl_array_t_pfn_flags_get, _ida_hexrays.mbl_array_t_pfn_flags_set)
retsize = _swig_property(_ida_hexrays.mbl_array_t_retsize_get, _ida_hexrays.mbl_array_t_retsize_set)
shadow_args = _swig_property(_ida_hexrays.mbl_array_t_shadow_args_get, _ida_hexrays.mbl_array_t_shadow_args_set)
fullsize = _swig_property(_ida_hexrays.mbl_array_t_fullsize_get, _ida_hexrays.mbl_array_t_fullsize_set)
stacksize = _swig_property(_ida_hexrays.mbl_array_t_stacksize_get, _ida_hexrays.mbl_array_t_stacksize_set)
inargoff = _swig_property(_ida_hexrays.mbl_array_t_inargoff_get, _ida_hexrays.mbl_array_t_inargoff_set)
minstkref = _swig_property(_ida_hexrays.mbl_array_t_minstkref_get, _ida_hexrays.mbl_array_t_minstkref_set)
minstkref_ea = _swig_property(_ida_hexrays.mbl_array_t_minstkref_ea_get, _ida_hexrays.mbl_array_t_minstkref_ea_set)
minargref = _swig_property(_ida_hexrays.mbl_array_t_minargref_get, _ida_hexrays.mbl_array_t_minargref_set)
spd_adjust = _swig_property(_ida_hexrays.mbl_array_t_spd_adjust_get, _ida_hexrays.mbl_array_t_spd_adjust_set)
aliased_vars = _swig_property(_ida_hexrays.mbl_array_t_aliased_vars_get, _ida_hexrays.mbl_array_t_aliased_vars_set)
aliased_args = _swig_property(_ida_hexrays.mbl_array_t_aliased_args_get, _ida_hexrays.mbl_array_t_aliased_args_set)
gotoff_stkvars = _swig_property(_ida_hexrays.mbl_array_t_gotoff_stkvars_get, _ida_hexrays.mbl_array_t_gotoff_stkvars_set)
restricted_memory = _swig_property(_ida_hexrays.mbl_array_t_restricted_memory_get, _ida_hexrays.mbl_array_t_restricted_memory_set)
aliased_memory = _swig_property(_ida_hexrays.mbl_array_t_aliased_memory_get, _ida_hexrays.mbl_array_t_aliased_memory_set)
nodel_memory = _swig_property(_ida_hexrays.mbl_array_t_nodel_memory_get, _ida_hexrays.mbl_array_t_nodel_memory_set)
consumed_argregs = _swig_property(_ida_hexrays.mbl_array_t_consumed_argregs_get, _ida_hexrays.mbl_array_t_consumed_argregs_set)
maturity = _swig_property(_ida_hexrays.mbl_array_t_maturity_get, _ida_hexrays.mbl_array_t_maturity_set)
reqmat = _swig_property(_ida_hexrays.mbl_array_t_reqmat_get, _ida_hexrays.mbl_array_t_reqmat_set)
final_type = _swig_property(_ida_hexrays.mbl_array_t_final_type_get, _ida_hexrays.mbl_array_t_final_type_set)
idb_type = _swig_property(_ida_hexrays.mbl_array_t_idb_type_get, _ida_hexrays.mbl_array_t_idb_type_set)
idb_spoiled = _swig_property(_ida_hexrays.mbl_array_t_idb_spoiled_get, _ida_hexrays.mbl_array_t_idb_spoiled_set)
spoiled_list = _swig_property(_ida_hexrays.mbl_array_t_spoiled_list_get, _ida_hexrays.mbl_array_t_spoiled_list_set)
fti_flags = _swig_property(_ida_hexrays.mbl_array_t_fti_flags_get, _ida_hexrays.mbl_array_t_fti_flags_set)
idb_node = _swig_property(_ida_hexrays.mbl_array_t_idb_node_get, _ida_hexrays.mbl_array_t_idb_node_set)
label = _swig_property(_ida_hexrays.mbl_array_t_label_get, _ida_hexrays.mbl_array_t_label_set)
vars = _swig_property(_ida_hexrays.mbl_array_t_vars_get, _ida_hexrays.mbl_array_t_vars_set)
argidx = _swig_property(_ida_hexrays.mbl_array_t_argidx_get, _ida_hexrays.mbl_array_t_argidx_set)
retvaridx = _swig_property(_ida_hexrays.mbl_array_t_retvaridx_get, _ida_hexrays.mbl_array_t_retvaridx_set)
error_ea = _swig_property(_ida_hexrays.mbl_array_t_error_ea_get, _ida_hexrays.mbl_array_t_error_ea_set)
error_strarg = _swig_property(_ida_hexrays.mbl_array_t_error_strarg_get, _ida_hexrays.mbl_array_t_error_strarg_set)
blocks = _swig_property(_ida_hexrays.mbl_array_t_blocks_get, _ida_hexrays.mbl_array_t_blocks_set)
natural = _swig_property(_ida_hexrays.mbl_array_t_natural_get, _ida_hexrays.mbl_array_t_natural_set)
std_ivls = _swig_property(_ida_hexrays.mbl_array_t_std_ivls_get, _ida_hexrays.mbl_array_t_std_ivls_set)
notes = _swig_property(_ida_hexrays.mbl_array_t_notes_get, _ida_hexrays.mbl_array_t_notes_set)
occurred_warns = _swig_property(_ida_hexrays.mbl_array_t_occurred_warns_get, _ida_hexrays.mbl_array_t_occurred_warns_set)
def write_to_const_detected(self, *args):
"""
write_to_const_detected(self) -> bool
"""
return _ida_hexrays.mbl_array_t_write_to_const_detected(self, *args)
def bad_call_sp_detected(self, *args):
"""
bad_call_sp_detected(self) -> bool
"""
return _ida_hexrays.mbl_array_t_bad_call_sp_detected(self, *args)
def regargs_is_not_aligned(self, *args):
"""
regargs_is_not_aligned(self) -> bool
"""
return _ida_hexrays.mbl_array_t_regargs_is_not_aligned(self, *args)
def has_bad_sp(self, *args):
"""
has_bad_sp(self) -> bool
"""
return _ida_hexrays.mbl_array_t_has_bad_sp(self, *args)
__swig_destroy__ = _ida_hexrays.delete_mbl_array_t
__del__ = lambda self : None;
def term(self, *args):
"""
term(self)
"""
return _ida_hexrays.mbl_array_t_term(self, *args)
def get_curfunc(self, *args):
"""
get_curfunc(self) -> func_t *
"""
return _ida_hexrays.mbl_array_t_get_curfunc(self, *args)
def use_frame(self, *args):
"""
use_frame(self) -> bool
"""
return _ida_hexrays.mbl_array_t_use_frame(self, *args)
def is_snippet(self, *args):
"""
is_snippet(self) -> bool
"""
return _ida_hexrays.mbl_array_t_is_snippet(self, *args)
def optimize_local(self, *args):
"""
optimize_local(self, locopt_bits) -> int
"""
return _ida_hexrays.mbl_array_t_optimize_local(self, *args)
def build_graph(self, *args):
"""
build_graph(self) -> merror_t
"""
return _ida_hexrays.mbl_array_t_build_graph(self, *args)
def get_graph(self, *args):
"""
get_graph(self) -> mbl_graph_t
"""
return _ida_hexrays.mbl_array_t_get_graph(self, *args)
def analyze_calls(self, *args):
"""
analyze_calls(self, acflags) -> int
"""
return _ida_hexrays.mbl_array_t_analyze_calls(self, *args)
def optimize_global(self, *args):
"""
optimize_global(self) -> merror_t
"""
return _ida_hexrays.mbl_array_t_optimize_global(self, *args)
def alloc_lvars(self, *args):
"""
alloc_lvars(self)
"""
return _ida_hexrays.mbl_array_t_alloc_lvars(self, *args)
def dump(self, *args):
"""
dump(self)
"""
return _ida_hexrays.mbl_array_t_dump(self, *args)
def dump_mba(self, *args):
"""
dump_mba(self, _verify, title)
"""
return _ida_hexrays.mbl_array_t_dump_mba(self, *args)
def _print(self, *args):
"""
_print(self, vp)
"""
return _ida_hexrays.mbl_array_t__print(self, *args)
def verify(self, *args):
"""
verify(self, always)
"""
return _ida_hexrays.mbl_array_t_verify(self, *args)
def mark_chains_dirty(self, *args):
"""
mark_chains_dirty(self)
"""
return _ida_hexrays.mbl_array_t_mark_chains_dirty(self, *args)
def get_mblock(self, *args):
"""
get_mblock(self, n) -> mblock_t
get_mblock(self, n) -> mblock_t
"""
return _ida_hexrays.mbl_array_t_get_mblock(self, *args)
def insert_block(self, *args):
"""
insert_block(self, bblk) -> mblock_t
"""
return _ida_hexrays.mbl_array_t_insert_block(self, *args)
def remove_block(self, *args):
"""
remove_block(self, blk) -> bool
"""
return _ida_hexrays.mbl_array_t_remove_block(self, *args)
def copy_block(self, *args):
"""
copy_block(self, blk, new_serial, cpblk_flags=3) -> mblock_t
"""
return _ida_hexrays.mbl_array_t_copy_block(self, *args)
def remove_empty_blocks(self, *args):
"""
remove_empty_blocks(self) -> bool
"""
return _ida_hexrays.mbl_array_t_remove_empty_blocks(self, *args)
def combine_blocks(self, *args):
"""
combine_blocks(self) -> bool
"""
return _ida_hexrays.mbl_array_t_combine_blocks(self, *args)
def for_all_ops(self, *args):
"""
for_all_ops(self, mv) -> int
"""
return _ida_hexrays.mbl_array_t_for_all_ops(self, *args)
def for_all_insns(self, *args):
"""
for_all_insns(self, mv) -> int
"""
return _ida_hexrays.mbl_array_t_for_all_insns(self, *args)
def for_all_topinsns(self, *args):
"""
for_all_topinsns(self, mv) -> int
"""
return _ida_hexrays.mbl_array_t_for_all_topinsns(self, *args)
def find_mop(self, *args):
"""
find_mop(self, ctx, ea, is_dest, list) -> mop_t
"""
return _ida_hexrays.mbl_array_t_find_mop(self, *args)
def arg(self, *args):
"""
arg(self, n) -> lvar_t
arg(self, n) -> lvar_t
"""
return _ida_hexrays.mbl_array_t_arg(self, *args)
def get_std_region(self, *args):
"""
get_std_region(self, idx) -> ivl_t
"""
return _ida_hexrays.mbl_array_t_get_std_region(self, *args)
def get_lvars_region(self, *args):
"""
get_lvars_region(self) -> ivl_t
"""
return _ida_hexrays.mbl_array_t_get_lvars_region(self, *args)
def get_shadow_region(self, *args):
"""
get_shadow_region(self) -> ivl_t
"""
return _ida_hexrays.mbl_array_t_get_shadow_region(self, *args)
def get_args_region(self, *args):
"""
get_args_region(self) -> ivl_t
"""
return _ida_hexrays.mbl_array_t_get_args_region(self, *args)
def get_stack_region(self, *args):
"""
get_stack_region(self) -> ivl_t
"""
return _ida_hexrays.mbl_array_t_get_stack_region(self, *args)
def serialize(self, *args):
"""
serialize(self)
"""
return _ida_hexrays.mbl_array_t_serialize(self, *args)
def deserialize(*args):
"""
deserialize(bytes, nbytes) -> mbl_array_t
"""
return _ida_hexrays.mbl_array_t_deserialize(*args)
deserialize = staticmethod(deserialize)
def _register(self, *args):
"""
_register(self)
"""
return _ida_hexrays.mbl_array_t__register(self, *args)
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.mbl_array_t__deregister(self, *args)
mbl_array_t_swigregister = _ida_hexrays.mbl_array_t_swigregister
mbl_array_t_swigregister(mbl_array_t)
MBA_PRCDEFS = _ida_hexrays.MBA_PRCDEFS
"""
use precise defeas for chain-allocated lvars
"""
MBA_NOFUNC = _ida_hexrays.MBA_NOFUNC
"""
function is not present, addresses might be wrong
"""
MBA_PATTERN = _ida_hexrays.MBA_PATTERN
"""
microcode pattern, callinfo is present
"""
MBA_LOADED = _ida_hexrays.MBA_LOADED
"""
loaded gdl, no instructions (debugging)
"""
MBA_RETFP = _ida_hexrays.MBA_RETFP
"""
function returns floating point value
"""
MBA_SPLINFO = _ida_hexrays.MBA_SPLINFO
"""
(final_type ? idb_spoiled : spoiled_regs) is valid
"""
MBA_PASSREGS = _ida_hexrays.MBA_PASSREGS
"""
has 'mcallinfo_t::pass_regs'
"""
MBA_THUNK = _ida_hexrays.MBA_THUNK
"""
thunk function
"""
MBA_CMNSTK = _ida_hexrays.MBA_CMNSTK
"""
stkvars+stkargs should be considered as one area
"""
MBA_PREOPT = _ida_hexrays.MBA_PREOPT
"""
preoptimization stage complete
"""
MBA_CMBBLK = _ida_hexrays.MBA_CMBBLK
"""
request to combine blocks
"""
MBA_ASRTOK = _ida_hexrays.MBA_ASRTOK
"""
assertions have been generated
"""
MBA_CALLS = _ida_hexrays.MBA_CALLS
"""
callinfo has been built
"""
MBA_ASRPROP = _ida_hexrays.MBA_ASRPROP
"""
assertion have been propagated
"""
MBA_SAVRST = _ida_hexrays.MBA_SAVRST
"""
save-restore analysis has been performed
"""
MBA_RETREF = _ida_hexrays.MBA_RETREF
"""
return type has been refined
"""
MBA_GLBOPT = _ida_hexrays.MBA_GLBOPT
"""
microcode has been optimized globally
"""
MBA_OVERVAR = _ida_hexrays.MBA_OVERVAR
"""
an overlapped variable has been detected
"""
MBA_LVARS0 = _ida_hexrays.MBA_LVARS0
"""
lvar pre-allocation has been performed
"""
MBA_LVARS1 = _ida_hexrays.MBA_LVARS1
"""
lvar real allocation has been performed
"""
MBA_DELPAIRS = _ida_hexrays.MBA_DELPAIRS
"""
pairs have been deleted once
"""
MBA_CHVARS = _ida_hexrays.MBA_CHVARS
"""
can verify chain varnums
"""
MBA_SHORT = _ida_hexrays.MBA_SHORT
"""
use short display
"""
MBA_COLGDL = _ida_hexrays.MBA_COLGDL
"""
display graph after each reduction
"""
MBA_INSGDL = _ida_hexrays.MBA_INSGDL
"""
display instruction in graphs
"""
MBA_NICE = _ida_hexrays.MBA_NICE
"""
apply transformations to c code
"""
MBA_REFINE = _ida_hexrays.MBA_REFINE
"""
may refine return value size
"""
MBA_RESERVED = _ida_hexrays.MBA_RESERVED
MBA_WINGR32 = _ida_hexrays.MBA_WINGR32
"""
use wingraph32
"""
MBA_NUMADDR = _ida_hexrays.MBA_NUMADDR
"""
display definition addresses for numbers
"""
MBA_VALNUM = _ida_hexrays.MBA_VALNUM
"""
display value numbers
"""
MBA_INITIAL_FLAGS = _ida_hexrays.MBA_INITIAL_FLAGS
MBA2_LVARNAMES_OK = _ida_hexrays.MBA2_LVARNAMES_OK
MBA2_LVARS_RENAMED = _ida_hexrays.MBA2_LVARS_RENAMED
MBA2_OVER_CHAINS = _ida_hexrays.MBA2_OVER_CHAINS
MBA2_VALRNG_DONE = _ida_hexrays.MBA2_VALRNG_DONE
MBA2_IS_CTR = _ida_hexrays.MBA2_IS_CTR
MBA2_IS_DTR = _ida_hexrays.MBA2_IS_DTR
MBA2_ARGIDX_OK = _ida_hexrays.MBA2_ARGIDX_OK
MBA2_NO_DUP_CALLS = _ida_hexrays.MBA2_NO_DUP_CALLS
MBA2_NO_DUP_LVARS = _ida_hexrays.MBA2_NO_DUP_LVARS
MBA2_INITIAL_FLAGS = _ida_hexrays.MBA2_INITIAL_FLAGS
MBA2_ALL_FLAGS = _ida_hexrays.MBA2_ALL_FLAGS
NALT_VD = _ida_hexrays.NALT_VD
"""
this index is not used by ida
"""
LOCOPT_ALL = _ida_hexrays.LOCOPT_ALL
"""
is not set, only dirty blocks will be optimized
redo optimization for all blocks. if this bit
"""
LOCOPT_REFINE = _ida_hexrays.LOCOPT_REFINE
"""
refine return type, ok to fail
"""
LOCOPT_REFINE2 = _ida_hexrays.LOCOPT_REFINE2
"""
refine return type, try harder
"""
ACFL_LOCOPT = _ida_hexrays.ACFL_LOCOPT
"""
perform local propagation (requires ACFL_BLKOPT)
"""
ACFL_BLKOPT = _ida_hexrays.ACFL_BLKOPT
"""
perform interblock transformations
"""
ACFL_GLBPROP = _ida_hexrays.ACFL_GLBPROP
"""
perform global propagation
"""
ACFL_GLBDEL = _ida_hexrays.ACFL_GLBDEL
"""
perform dead code eliminition
"""
ACFL_GUESS = _ida_hexrays.ACFL_GUESS
"""
may guess calling conventions
"""
CPBLK_FAST = _ida_hexrays.CPBLK_FAST
"""
do not update minbstkref and minbargref
"""
CPBLK_MINREF = _ida_hexrays.CPBLK_MINREF
"""
update minbstkref and minbargref
"""
CPBLK_OPTJMP = _ida_hexrays.CPBLK_OPTJMP
"""
if it becomes useless
del the jump insn at the end of the block
"""
def mbl_array_t_deserialize(*args):
"""
mbl_array_t_deserialize(bytes, nbytes) -> mbl_array_t
"""
return _ida_hexrays.mbl_array_t_deserialize(*args)
class chain_keeper_t(object):
"""
Proxy of C++ chain_keeper_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, _gc) -> chain_keeper_t
"""
this = _ida_hexrays.new_chain_keeper_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_chain_keeper_t
__del__ = lambda self : None;
def front(self, *args):
"""
front(self) -> block_chains_t
"""
return _ida_hexrays.chain_keeper_t_front(self, *args)
def back(self, *args):
"""
back(self) -> block_chains_t
"""
return _ida_hexrays.chain_keeper_t_back(self, *args)
def for_all_chains(self, *args):
"""
for_all_chains(self, cv, gca) -> int
"""
return _ida_hexrays.chain_keeper_t_for_all_chains(self, *args)
chain_keeper_t_swigregister = _ida_hexrays.chain_keeper_t_swigregister
chain_keeper_t_swigregister(chain_keeper_t)
GC_REGS_AND_STKVARS = _ida_hexrays.GC_REGS_AND_STKVARS
GC_ASR = _ida_hexrays.GC_ASR
GC_XDSU = _ida_hexrays.GC_XDSU
GC_END = _ida_hexrays.GC_END
GC_DIRTY_ALL = _ida_hexrays.GC_DIRTY_ALL
class mbl_graph_t(simple_graph_t):
"""
Proxy of C++ mbl_graph_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def is_ud_chain_dirty(self, *args):
"""
is_ud_chain_dirty(self, gctype) -> bool
"""
return _ida_hexrays.mbl_graph_t_is_ud_chain_dirty(self, *args)
def is_du_chain_dirty(self, *args):
"""
is_du_chain_dirty(self, gctype) -> bool
"""
return _ida_hexrays.mbl_graph_t_is_du_chain_dirty(self, *args)
def get_chain_stamp(self, *args):
"""
get_chain_stamp(self) -> int
"""
return _ida_hexrays.mbl_graph_t_get_chain_stamp(self, *args)
def get_ud(self, *args):
"""
get_ud(self, gctype) -> graph_chains_t
"""
return _ida_hexrays.mbl_graph_t_get_ud(self, *args)
def get_du(self, *args):
"""
get_du(self, gctype) -> graph_chains_t
"""
return _ida_hexrays.mbl_graph_t_get_du(self, *args)
def is_redefined_globally(self, *args):
"""
is_redefined_globally(self, list, b1, b2, m1, m2, maymust=MAY_ACCESS) -> bool
"""
return _ida_hexrays.mbl_graph_t_is_redefined_globally(self, *args)
def is_used_globally(self, *args):
"""
is_used_globally(self, list, b1, b2, m1, m2, maymust=MAY_ACCESS) -> bool
"""
return _ida_hexrays.mbl_graph_t_is_used_globally(self, *args)
def get_mblock(self, *args):
"""
get_mblock(self, n) -> mblock_t
"""
return _ida_hexrays.mbl_graph_t_get_mblock(self, *args)
mbl_graph_t_swigregister = _ida_hexrays.mbl_graph_t_swigregister
mbl_graph_t_swigregister(mbl_graph_t)
class codegen_t(object):
"""
Proxy of C++ codegen_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mba = _swig_property(_ida_hexrays.codegen_t_mba_get, _ida_hexrays.codegen_t_mba_set)
mb = _swig_property(_ida_hexrays.codegen_t_mb_get, _ida_hexrays.codegen_t_mb_set)
insn = _swig_property(_ida_hexrays.codegen_t_insn_get, _ida_hexrays.codegen_t_insn_set)
ignore_micro = _swig_property(_ida_hexrays.codegen_t_ignore_micro_get, _ida_hexrays.codegen_t_ignore_micro_set)
def __init__(self, *args):
"""
__init__(self, m) -> codegen_t
"""
if self.__class__ == codegen_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_codegen_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_codegen_t
__del__ = lambda self : None;
def analyze_prolog(self, *args):
"""
analyze_prolog(self, fc, reachable) -> merror_t
"""
return _ida_hexrays.codegen_t_analyze_prolog(self, *args)
def gen_micro(self, *args):
"""
gen_micro(self) -> merror_t
"""
return _ida_hexrays.codegen_t_gen_micro(self, *args)
def load_operand(self, *args):
"""
load_operand(self, opnum) -> mreg_t
"""
return _ida_hexrays.codegen_t_load_operand(self, *args)
def emit_micro_mvm(self, *args):
"""
emit_micro_mvm(self, code, dtype, l, r, d, offsize) -> minsn_t
"""
return _ida_hexrays.codegen_t_emit_micro_mvm(self, *args)
def emit(self, *args):
"""
emit(self, code, width, l, r, d, offsize) -> minsn_t
emit(self, code, l, r, d) -> minsn_t
"""
return _ida_hexrays.codegen_t_emit(self, *args)
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_codegen_t(self)
return weakref_proxy(self)
codegen_t_swigregister = _ida_hexrays.codegen_t_swigregister
codegen_t_swigregister(codegen_t)
def is_kreg(*args):
"""
is_kreg(r) -> bool
Is a kernel register?
@param r (C++: mreg_t)
"""
return _ida_hexrays.is_kreg(*args)
def get_temp_regs(*args):
"""
get_temp_regs() -> mlist_t
Get list of temporary registers. Tempregs are temporary registers that
are used during code generation. They do not map to regular processor
registers. They are used only to store temporary values during
execution of one instruction. Tempregs may not be used to pass a value
from one block to another. In other words, at the end of a block all
tempregs must be dead.
"""
return _ida_hexrays.get_temp_regs(*args)
def get_hexrays_version(*args):
"""
get_hexrays_version() -> char const *
Get decompiler version. The returned string is of the form
<major>.<minor>.<revision>.<build-date>
@return: pointer to version string. For example: "2.0.0.140605"
"""
return _ida_hexrays.get_hexrays_version(*args)
def checkout_hexrays_license(*args):
"""
checkout_hexrays_license(silent) -> bool
Check out a floating decompiler license. This function will display a
dialog box if the license is not available. For non-floating licenses
this function is effectively no-op. It is not necessary to call this
function before decompiling. If the license was not checked out, the
decompiler will automatically do it. This function can be used to
check out a license in advance and ensure that a license is available.
@param silent: silently fail if the license can not be checked out.
(C++: bool)
@return: false if failed
"""
return _ida_hexrays.checkout_hexrays_license(*args)
def open_pseudocode(*args):
"""
open_pseudocode(ea, new_window) -> vdui_t
Open pseudocode window. The specified function is decompiled and the
pseudocode window is opened.
@param ea: function to decompile (C++: ea_t)
@param new_window: 0:reuse existing window; 1:open new window; -1:
reuse existing window if the current view is
pseudocode (C++: int)
@return: false if failed
"""
return _ida_hexrays.open_pseudocode(*args)
def close_pseudocode(*args):
"""
close_pseudocode(f) -> bool
Close pseudocode window.
@param f: pointer to window (C++: TWidget *)
@return: false if failed
"""
return _ida_hexrays.close_pseudocode(*args)
VDRUN_NEWFILE = _ida_hexrays.VDRUN_NEWFILE
"""
Create a new file or overwrite existing file.
"""
VDRUN_APPEND = _ida_hexrays.VDRUN_APPEND
"""
Create a new file or append to existing file.
"""
VDRUN_ONLYNEW = _ida_hexrays.VDRUN_ONLYNEW
"""
Fail if output file already exists.
"""
VDRUN_SILENT = _ida_hexrays.VDRUN_SILENT
"""
Silent decompilation.
"""
VDRUN_SENDIDB = _ida_hexrays.VDRUN_SENDIDB
"""
Send problematic databases to hex-rays.com.
"""
VDRUN_MAYSTOP = _ida_hexrays.VDRUN_MAYSTOP
"""
the user can cancel decompilation
"""
VDRUN_CMDLINE = _ida_hexrays.VDRUN_CMDLINE
"""
called from ida's command line
"""
VDRUN_STATS = _ida_hexrays.VDRUN_STATS
"""
print statistics into vd_stats.txt
"""
VDRUN_LUMINA = _ida_hexrays.VDRUN_LUMINA
"""
use lumina server
"""
def decompile_many(*args):
"""
decompile_many(outfile, funcaddrs, flags) -> bool
Batch decompilation. Decompile all or the specified functions
@param outfile: name of the output file (C++: const char *)
@param funcaddrs: list of functions to decompile. If NULL or empty,
then decompile all nonlib functions (C++: eavec_t
*)
@param flags: Batch decompilation bits (C++: int)
@return: true if no internal error occurred and the user has not
cancelled decompilation
"""
return _ida_hexrays.decompile_many(*args)
class hexrays_failure_t(object):
"""
Proxy of C++ hexrays_failure_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
code = _swig_property(_ida_hexrays.hexrays_failure_t_code_get, _ida_hexrays.hexrays_failure_t_code_set)
errea = _swig_property(_ida_hexrays.hexrays_failure_t_errea_get, _ida_hexrays.hexrays_failure_t_errea_set)
str = _swig_property(_ida_hexrays.hexrays_failure_t_str_get, _ida_hexrays.hexrays_failure_t_str_set)
def __init__(self, *args):
"""
__init__(self) -> hexrays_failure_t
__init__(self, c, ea, buf=None) -> hexrays_failure_t
__init__(self, c, ea, buf) -> hexrays_failure_t
"""
this = _ida_hexrays.new_hexrays_failure_t(*args)
try: self.this.append(this)
except: self.this = this
def desc(self, *args):
"""
desc(self) -> qstring
"""
return _ida_hexrays.hexrays_failure_t_desc(self, *args)
__swig_destroy__ = _ida_hexrays.delete_hexrays_failure_t
__del__ = lambda self : None;
hexrays_failure_t_swigregister = _ida_hexrays.hexrays_failure_t_swigregister
hexrays_failure_t_swigregister(hexrays_failure_t)
class vd_failure_t(object):
"""
Proxy of C++ vd_failure_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
hf = _swig_property(_ida_hexrays.vd_failure_t_hf_get, _ida_hexrays.vd_failure_t_hf_set)
def __init__(self, *args):
"""
__init__(self) -> vd_failure_t
__init__(self, code, ea, buf=None) -> vd_failure_t
__init__(self, code, ea, buf) -> vd_failure_t
__init__(self, _hf) -> vd_failure_t
"""
this = _ida_hexrays.new_vd_failure_t(*args)
try: self.this.append(this)
except: self.this = this
def desc(self, *args):
"""
desc(self) -> qstring
"""
return _ida_hexrays.vd_failure_t_desc(self, *args)
__swig_destroy__ = _ida_hexrays.delete_vd_failure_t
__del__ = lambda self : None;
vd_failure_t_swigregister = _ida_hexrays.vd_failure_t_swigregister
vd_failure_t_swigregister(vd_failure_t)
class vd_interr_t(vd_failure_t):
"""
Proxy of C++ vd_interr_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, ea, buf) -> vd_interr_t
"""
this = _ida_hexrays.new_vd_interr_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_vd_interr_t
__del__ = lambda self : None;
vd_interr_t_swigregister = _ida_hexrays.vd_interr_t_swigregister
vd_interr_t_swigregister(vd_interr_t)
def send_database(*args):
"""
send_database(err, silent)
Send the database to Hex-Rays. This function sends the current
database to the Hex-Rays server. The database is sent in the
compressed form over an encrypted (SSL) connection.
@param err: failure description object. Empty hexrays_failure_t
object can be used if error information is not available.
(C++: const hexrays_failure_t &)
@param silent: if false, a dialog box will be displayed before sending
the database. (C++: bool)
"""
return _ida_hexrays.send_database(*args)
class gco_info_t(object):
"""
Proxy of C++ gco_info_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
name = _swig_property(_ida_hexrays.gco_info_t_name_get, _ida_hexrays.gco_info_t_name_set)
size = _swig_property(_ida_hexrays.gco_info_t_size_get, _ida_hexrays.gco_info_t_size_set)
flags = _swig_property(_ida_hexrays.gco_info_t_flags_get, _ida_hexrays.gco_info_t_flags_set)
def is_reg(self, *args):
"""
is_reg(self) -> bool
"""
return _ida_hexrays.gco_info_t_is_reg(self, *args)
def is_use(self, *args):
"""
is_use(self) -> bool
"""
return _ida_hexrays.gco_info_t_is_use(self, *args)
def is_def(self, *args):
"""
is_def(self) -> bool
"""
return _ida_hexrays.gco_info_t_is_def(self, *args)
def append_to_list(self, *args):
"""
append_to_list(self, list, mba) -> bool
"""
return _ida_hexrays.gco_info_t_append_to_list(self, *args)
def __init__(self, *args):
"""
__init__(self) -> gco_info_t
"""
this = _ida_hexrays.new_gco_info_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_gco_info_t
__del__ = lambda self : None;
gco_info_t_swigregister = _ida_hexrays.gco_info_t_swigregister
gco_info_t_swigregister(gco_info_t)
GCO_STK = _ida_hexrays.GCO_STK
"""
a stack variable
"""
GCO_REG = _ida_hexrays.GCO_REG
"""
is register? otherwise a stack variable
"""
GCO_USE = _ida_hexrays.GCO_USE
"""
is source operand?
"""
GCO_DEF = _ida_hexrays.GCO_DEF
"""
is destination operand?
"""
def get_current_operand(*args):
"""
get_current_operand(out) -> bool
Get the instruction operand under the cursor. This function determines
the operand that is under the cursor in the active disassembly
listing. If the operand refers to a register or stack variable, it
return true.
@param out (C++: gco_info_t *)
"""
return _ida_hexrays.get_current_operand(*args)
def remitem(*args):
"""
remitem(e)
"""
return _ida_hexrays.remitem(*args)
cot_empty = _ida_hexrays.cot_empty
cot_comma = _ida_hexrays.cot_comma
cot_asg = _ida_hexrays.cot_asg
cot_asgbor = _ida_hexrays.cot_asgbor
cot_asgxor = _ida_hexrays.cot_asgxor
cot_asgband = _ida_hexrays.cot_asgband
cot_asgadd = _ida_hexrays.cot_asgadd
cot_asgsub = _ida_hexrays.cot_asgsub
cot_asgmul = _ida_hexrays.cot_asgmul
cot_asgsshr = _ida_hexrays.cot_asgsshr
cot_asgushr = _ida_hexrays.cot_asgushr
cot_asgshl = _ida_hexrays.cot_asgshl
cot_asgsdiv = _ida_hexrays.cot_asgsdiv
cot_asgudiv = _ida_hexrays.cot_asgudiv
cot_asgsmod = _ida_hexrays.cot_asgsmod
cot_asgumod = _ida_hexrays.cot_asgumod
cot_tern = _ida_hexrays.cot_tern
cot_lor = _ida_hexrays.cot_lor
cot_land = _ida_hexrays.cot_land
cot_bor = _ida_hexrays.cot_bor
cot_xor = _ida_hexrays.cot_xor
cot_band = _ida_hexrays.cot_band
cot_eq = _ida_hexrays.cot_eq
cot_ne = _ida_hexrays.cot_ne
cot_sge = _ida_hexrays.cot_sge
cot_uge = _ida_hexrays.cot_uge
cot_sle = _ida_hexrays.cot_sle
cot_ule = _ida_hexrays.cot_ule
cot_sgt = _ida_hexrays.cot_sgt
cot_ugt = _ida_hexrays.cot_ugt
cot_slt = _ida_hexrays.cot_slt
cot_ult = _ida_hexrays.cot_ult
cot_sshr = _ida_hexrays.cot_sshr
cot_ushr = _ida_hexrays.cot_ushr
cot_shl = _ida_hexrays.cot_shl
cot_add = _ida_hexrays.cot_add
cot_sub = _ida_hexrays.cot_sub
cot_mul = _ida_hexrays.cot_mul
cot_sdiv = _ida_hexrays.cot_sdiv
cot_udiv = _ida_hexrays.cot_udiv
cot_smod = _ida_hexrays.cot_smod
cot_umod = _ida_hexrays.cot_umod
cot_fadd = _ida_hexrays.cot_fadd
cot_fsub = _ida_hexrays.cot_fsub
cot_fmul = _ida_hexrays.cot_fmul
cot_fdiv = _ida_hexrays.cot_fdiv
cot_fneg = _ida_hexrays.cot_fneg
cot_neg = _ida_hexrays.cot_neg
cot_cast = _ida_hexrays.cot_cast
cot_lnot = _ida_hexrays.cot_lnot
cot_bnot = _ida_hexrays.cot_bnot
cot_ptr = _ida_hexrays.cot_ptr
cot_ref = _ida_hexrays.cot_ref
cot_postinc = _ida_hexrays.cot_postinc
cot_postdec = _ida_hexrays.cot_postdec
cot_preinc = _ida_hexrays.cot_preinc
cot_predec = _ida_hexrays.cot_predec
cot_call = _ida_hexrays.cot_call
cot_idx = _ida_hexrays.cot_idx
cot_memref = _ida_hexrays.cot_memref
cot_memptr = _ida_hexrays.cot_memptr
cot_num = _ida_hexrays.cot_num
cot_fnum = _ida_hexrays.cot_fnum
cot_str = _ida_hexrays.cot_str
cot_obj = _ida_hexrays.cot_obj
cot_var = _ida_hexrays.cot_var
cot_insn = _ida_hexrays.cot_insn
cot_sizeof = _ida_hexrays.cot_sizeof
cot_helper = _ida_hexrays.cot_helper
cot_type = _ida_hexrays.cot_type
cot_last = _ida_hexrays.cot_last
cit_empty = _ida_hexrays.cit_empty
cit_block = _ida_hexrays.cit_block
cit_expr = _ida_hexrays.cit_expr
cit_if = _ida_hexrays.cit_if
cit_for = _ida_hexrays.cit_for
cit_while = _ida_hexrays.cit_while
cit_do = _ida_hexrays.cit_do
cit_switch = _ida_hexrays.cit_switch
cit_break = _ida_hexrays.cit_break
cit_continue = _ida_hexrays.cit_continue
cit_return = _ida_hexrays.cit_return
cit_goto = _ida_hexrays.cit_goto
cit_asm = _ida_hexrays.cit_asm
cit_end = _ida_hexrays.cit_end
class operator_info_t(object):
"""
Proxy of C++ operator_info_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
text = _swig_property(_ida_hexrays.operator_info_t_text_get, _ida_hexrays.operator_info_t_text_set)
precedence = _swig_property(_ida_hexrays.operator_info_t_precedence_get, _ida_hexrays.operator_info_t_precedence_set)
valency = _swig_property(_ida_hexrays.operator_info_t_valency_get, _ida_hexrays.operator_info_t_valency_set)
fixtype = _swig_property(_ida_hexrays.operator_info_t_fixtype_get, _ida_hexrays.operator_info_t_fixtype_set)
flags = _swig_property(_ida_hexrays.operator_info_t_flags_get, _ida_hexrays.operator_info_t_flags_set)
def __init__(self, *args):
"""
__init__(self) -> operator_info_t
"""
this = _ida_hexrays.new_operator_info_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_operator_info_t
__del__ = lambda self : None;
operator_info_t_swigregister = _ida_hexrays.operator_info_t_swigregister
operator_info_t_swigregister(operator_info_t)
FX_NONE = cvar.FX_NONE
FX_INFIX = cvar.FX_INFIX
FX_PREFIX = cvar.FX_PREFIX
FX_POSTFIX = cvar.FX_POSTFIX
FX_TERNARY = cvar.FX_TERNARY
COI_RL = cvar.COI_RL
COI_LR = cvar.COI_LR
COI_INT = cvar.COI_INT
COI_FP = cvar.COI_FP
COI_SH = cvar.COI_SH
COI_SGN = cvar.COI_SGN
COI_SBN = cvar.COI_SBN
def negated_relation(*args):
"""
negated_relation(op) -> ctype_t
Negate a comparison operator. For example, cot_sge becomes cot_slt.
@param op (C++: ctype_t)
"""
return _ida_hexrays.negated_relation(*args)
def swapped_relation(*args):
"""
swapped_relation(op) -> ctype_t
Swap a comparison operator. For example, cot_sge becomes cot_sle.
@param op (C++: ctype_t)
"""
return _ida_hexrays.swapped_relation(*args)
def get_op_signness(*args):
"""
get_op_signness(op) -> type_sign_t
Get operator sign. Meaningful for sign-dependent operators, like
cot_sdiv.
@param op (C++: ctype_t)
"""
return _ida_hexrays.get_op_signness(*args)
def asgop(*args):
"""
asgop(cop) -> ctype_t
Convert plain operator into assignment operator. For example, cot_add
returns cot_asgadd.
@param cop (C++: ctype_t)
"""
return _ida_hexrays.asgop(*args)
def asgop_revert(*args):
"""
asgop_revert(cop) -> ctype_t
Convert assignment operator into plain operator. For example,
cot_asgadd returns cot_add
@param cop (C++: ctype_t)
@return: cot_empty is the input operator is not an assignment
operator.
"""
return _ida_hexrays.asgop_revert(*args)
def op_uses_x(*args):
"""
op_uses_x(op) -> bool
Does operator use the 'x' field of 'cexpr_t' ?
@param op (C++: ctype_t)
"""
return _ida_hexrays.op_uses_x(*args)
def op_uses_y(*args):
"""
op_uses_y(op) -> bool
Does operator use the 'y' field of 'cexpr_t' ?
@param op (C++: ctype_t)
"""
return _ida_hexrays.op_uses_y(*args)
def op_uses_z(*args):
"""
op_uses_z(op) -> bool
Does operator use the 'z' field of 'cexpr_t' ?
@param op (C++: ctype_t)
"""
return _ida_hexrays.op_uses_z(*args)
def is_binary(*args):
"""
is_binary(op) -> bool
Is binary operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_binary(*args)
def is_unary(*args):
"""
is_unary(op) -> bool
Is unary operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_unary(*args)
def is_relational(*args):
"""
is_relational(op) -> bool
Is comparison operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_relational(*args)
def is_assignment(*args):
"""
is_assignment(op) -> bool
Is assignment operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_assignment(*args)
def accepts_udts(*args):
"""
accepts_udts(op) -> bool
"""
return _ida_hexrays.accepts_udts(*args)
def is_prepost(*args):
"""
is_prepost(op) -> bool
Is pre/post increment/decrement operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_prepost(*args)
def is_commutative(*args):
"""
is_commutative(op) -> bool
Is commutative operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_commutative(*args)
def is_additive(*args):
"""
is_additive(op) -> bool
Is additive operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_additive(*args)
def is_multiplicative(*args):
"""
is_multiplicative(op) -> bool
Is multiplicative operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_multiplicative(*args)
def is_bitop(*args):
"""
is_bitop(op) -> bool
Is bit related operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_bitop(*args)
def is_logical(*args):
"""
is_logical(op) -> bool
Is logical operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_logical(*args)
def is_loop(*args):
"""
is_loop(op) -> bool
Is loop statement code?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_loop(*args)
def is_break_consumer(*args):
"""
is_break_consumer(op) -> bool
Does a break statement influence the specified statement code?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_break_consumer(*args)
def is_lvalue(*args):
"""
is_lvalue(op) -> bool
Is Lvalue operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_lvalue(*args)
def accepts_small_udts(*args):
"""
accepts_small_udts(op) -> bool
Is the operator allowed on small structure or union?
@param op (C++: ctype_t)
"""
return _ida_hexrays.accepts_small_udts(*args)
class cnumber_t(object):
"""
Proxy of C++ cnumber_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
_value = _swig_property(_ida_hexrays.cnumber_t__value_get, _ida_hexrays.cnumber_t__value_set)
nf = _swig_property(_ida_hexrays.cnumber_t_nf_get, _ida_hexrays.cnumber_t_nf_set)
def __init__(self, *args):
"""
__init__(self, _opnum=0) -> cnumber_t
"""
this = _ida_hexrays.new_cnumber_t(*args)
try: self.this.append(this)
except: self.this = this
def _print(self, *args):
"""
_print(self, type, parent=None, nice_stroff=None)
"""
return _ida_hexrays.cnumber_t__print(self, *args)
def value(self, *args):
"""
value(self, type) -> uint64
"""
return _ida_hexrays.cnumber_t_value(self, *args)
def assign(self, *args):
"""
assign(self, v, nbytes, sign)
"""
return _ida_hexrays.cnumber_t_assign(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cnumber_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cnumber_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cnumber_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cnumber_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cnumber_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cnumber_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cnumber_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cnumber_t
__del__ = lambda self : None;
cnumber_t_swigregister = _ida_hexrays.cnumber_t_swigregister
cnumber_t_swigregister(cnumber_t)
class var_ref_t(object):
"""
Proxy of C++ var_ref_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mba = _swig_property(_ida_hexrays.var_ref_t_mba_get, _ida_hexrays.var_ref_t_mba_set)
idx = _swig_property(_ida_hexrays.var_ref_t_idx_get, _ida_hexrays.var_ref_t_idx_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.var_ref_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.var_ref_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.var_ref_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.var_ref_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.var_ref_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.var_ref_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.var_ref_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> var_ref_t
"""
this = _ida_hexrays.new_var_ref_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_var_ref_t
__del__ = lambda self : None;
var_ref_t_swigregister = _ida_hexrays.var_ref_t_swigregister
var_ref_t_swigregister(var_ref_t)
class ctree_visitor_t(object):
"""
Proxy of C++ ctree_visitor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
cv_flags = _swig_property(_ida_hexrays.ctree_visitor_t_cv_flags_get, _ida_hexrays.ctree_visitor_t_cv_flags_set)
def maintain_parents(self, *args):
"""
maintain_parents(self) -> bool
"""
return _ida_hexrays.ctree_visitor_t_maintain_parents(self, *args)
def must_prune(self, *args):
"""
must_prune(self) -> bool
"""
return _ida_hexrays.ctree_visitor_t_must_prune(self, *args)
def must_restart(self, *args):
"""
must_restart(self) -> bool
"""
return _ida_hexrays.ctree_visitor_t_must_restart(self, *args)
def is_postorder(self, *args):
"""
is_postorder(self) -> bool
"""
return _ida_hexrays.ctree_visitor_t_is_postorder(self, *args)
def only_insns(self, *args):
"""
only_insns(self) -> bool
"""
return _ida_hexrays.ctree_visitor_t_only_insns(self, *args)
def prune_now(self, *args):
"""
prune_now(self)
"""
return _ida_hexrays.ctree_visitor_t_prune_now(self, *args)
def clr_prune(self, *args):
"""
clr_prune(self)
"""
return _ida_hexrays.ctree_visitor_t_clr_prune(self, *args)
def set_restart(self, *args):
"""
set_restart(self)
"""
return _ida_hexrays.ctree_visitor_t_set_restart(self, *args)
def clr_restart(self, *args):
"""
clr_restart(self)
"""
return _ida_hexrays.ctree_visitor_t_clr_restart(self, *args)
parents = _swig_property(_ida_hexrays.ctree_visitor_t_parents_get, _ida_hexrays.ctree_visitor_t_parents_set)
def __init__(self, *args):
"""
__init__(self, _flags) -> ctree_visitor_t
"""
if self.__class__ == ctree_visitor_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_ctree_visitor_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def apply_to(self, *args):
"""
apply_to(self, item, parent) -> int
"""
return _ida_hexrays.ctree_visitor_t_apply_to(self, *args)
def apply_to_exprs(self, *args):
"""
apply_to_exprs(self, item, parent) -> int
"""
return _ida_hexrays.ctree_visitor_t_apply_to_exprs(self, *args)
def parent_expr(self, *args):
"""
parent_expr(self) -> cexpr_t
"""
return _ida_hexrays.ctree_visitor_t_parent_expr(self, *args)
def parent_insn(self, *args):
"""
parent_insn(self) -> cinsn_t
"""
return _ida_hexrays.ctree_visitor_t_parent_insn(self, *args)
def visit_insn(self, *args):
"""
visit_insn(self, arg0) -> int
"""
return _ida_hexrays.ctree_visitor_t_visit_insn(self, *args)
def visit_expr(self, *args):
"""
visit_expr(self, arg0) -> int
"""
return _ida_hexrays.ctree_visitor_t_visit_expr(self, *args)
def leave_insn(self, *args):
"""
leave_insn(self, arg0) -> int
"""
return _ida_hexrays.ctree_visitor_t_leave_insn(self, *args)
def leave_expr(self, *args):
"""
leave_expr(self, arg0) -> int
"""
return _ida_hexrays.ctree_visitor_t_leave_expr(self, *args)
__swig_destroy__ = _ida_hexrays.delete_ctree_visitor_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_ctree_visitor_t(self)
return weakref_proxy(self)
ctree_visitor_t_swigregister = _ida_hexrays.ctree_visitor_t_swigregister
ctree_visitor_t_swigregister(ctree_visitor_t)
CV_FAST = _ida_hexrays.CV_FAST
"""
do not maintain parent information
"""
CV_PRUNE = _ida_hexrays.CV_PRUNE
"""
this bit is set by visit...() to prune the walk
"""
CV_PARENTS = _ida_hexrays.CV_PARENTS
"""
maintain parent information
"""
CV_POST = _ida_hexrays.CV_POST
"""
call the leave...() functions
"""
CV_RESTART = _ida_hexrays.CV_RESTART
"""
restart enumeration at the top expr (apply_to_exprs)
"""
CV_INSNS = _ida_hexrays.CV_INSNS
"""
visit only statements, prune all expressions do not use before the
final ctree maturity because expressions may contain statements at
intermediate stages (see cot_insn). Otherwise you risk missing
statements embedded into expressions.
"""
class ctree_parentee_t(ctree_visitor_t):
"""
Proxy of C++ ctree_parentee_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, post=False) -> ctree_parentee_t
"""
if self.__class__ == ctree_parentee_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_ctree_parentee_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def recalc_parent_types(self, *args):
"""
recalc_parent_types(self) -> bool
"""
return _ida_hexrays.ctree_parentee_t_recalc_parent_types(self, *args)
__swig_destroy__ = _ida_hexrays.delete_ctree_parentee_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_ctree_parentee_t(self)
return weakref_proxy(self)
ctree_parentee_t_swigregister = _ida_hexrays.ctree_parentee_t_swigregister
ctree_parentee_t_swigregister(ctree_parentee_t)
class cfunc_parentee_t(ctree_parentee_t):
"""
Proxy of C++ cfunc_parentee_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
func = _swig_property(_ida_hexrays.cfunc_parentee_t_func_get, _ida_hexrays.cfunc_parentee_t_func_set)
def __init__(self, *args):
"""
__init__(self, f, post=False) -> cfunc_parentee_t
"""
if self.__class__ == cfunc_parentee_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_cfunc_parentee_t(_self, *args)
try: self.this.append(this)
except: self.this = this
def calc_rvalue_type(self, *args):
"""
calc_rvalue_type(self, target, e) -> bool
"""
return _ida_hexrays.cfunc_parentee_t_calc_rvalue_type(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cfunc_parentee_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_cfunc_parentee_t(self)
return weakref_proxy(self)
cfunc_parentee_t_swigregister = _ida_hexrays.cfunc_parentee_t_swigregister
cfunc_parentee_t_swigregister(cfunc_parentee_t)
CMAT_ZERO = _ida_hexrays.CMAT_ZERO
CMAT_BUILT = _ida_hexrays.CMAT_BUILT
CMAT_TRANS1 = _ida_hexrays.CMAT_TRANS1
CMAT_NICE = _ida_hexrays.CMAT_NICE
CMAT_TRANS2 = _ida_hexrays.CMAT_TRANS2
CMAT_CPA = _ida_hexrays.CMAT_CPA
CMAT_TRANS3 = _ida_hexrays.CMAT_TRANS3
CMAT_CASTED = _ida_hexrays.CMAT_CASTED
CMAT_FINAL = _ida_hexrays.CMAT_FINAL
ITP_EMPTY = _ida_hexrays.ITP_EMPTY
ITP_ARG1 = _ida_hexrays.ITP_ARG1
ITP_ARG64 = _ida_hexrays.ITP_ARG64
ITP_BRACE1 = _ida_hexrays.ITP_BRACE1
ITP_INNER_LAST = _ida_hexrays.ITP_INNER_LAST
ITP_ASM = _ida_hexrays.ITP_ASM
ITP_ELSE = _ida_hexrays.ITP_ELSE
ITP_DO = _ida_hexrays.ITP_DO
ITP_SEMI = _ida_hexrays.ITP_SEMI
ITP_CURLY1 = _ida_hexrays.ITP_CURLY1
ITP_CURLY2 = _ida_hexrays.ITP_CURLY2
ITP_BRACE2 = _ida_hexrays.ITP_BRACE2
ITP_COLON = _ida_hexrays.ITP_COLON
ITP_BLOCK1 = _ida_hexrays.ITP_BLOCK1
ITP_BLOCK2 = _ida_hexrays.ITP_BLOCK2
ITP_CASE = _ida_hexrays.ITP_CASE
ITP_SIGN = _ida_hexrays.ITP_SIGN
class treeloc_t(object):
"""
Proxy of C++ treeloc_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.treeloc_t_ea_get, _ida_hexrays.treeloc_t_ea_set)
itp = _swig_property(_ida_hexrays.treeloc_t_itp_get, _ida_hexrays.treeloc_t_itp_set)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.treeloc_t___lt__(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.treeloc_t___eq__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> treeloc_t
"""
this = _ida_hexrays.new_treeloc_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_treeloc_t
__del__ = lambda self : None;
treeloc_t_swigregister = _ida_hexrays.treeloc_t_swigregister
treeloc_t_swigregister(treeloc_t)
RETRIEVE_ONCE = _ida_hexrays.RETRIEVE_ONCE
RETRIEVE_ALWAYS = _ida_hexrays.RETRIEVE_ALWAYS
class citem_cmt_t(object):
"""
Proxy of C++ citem_cmt_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
used = _swig_property(_ida_hexrays.citem_cmt_t_used_get, _ida_hexrays.citem_cmt_t_used_set)
def __init__(self, *args):
"""
__init__(self) -> citem_cmt_t
__init__(self, s) -> citem_cmt_t
"""
this = _ida_hexrays.new_citem_cmt_t(*args)
try: self.this.append(this)
except: self.this = this
def c_str(self, *args):
"""
c_str(self) -> char const *
"""
return _ida_hexrays.citem_cmt_t_c_str(self, *args)
def __str__(self, *args):
"""
__str__(self) -> char const *
"""
return _ida_hexrays.citem_cmt_t___str__(self, *args)
__swig_destroy__ = _ida_hexrays.delete_citem_cmt_t
__del__ = lambda self : None;
citem_cmt_t_swigregister = _ida_hexrays.citem_cmt_t_swigregister
citem_cmt_t_swigregister(citem_cmt_t)
class citem_locator_t(object):
"""
Proxy of C++ citem_locator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.citem_locator_t_ea_get, _ida_hexrays.citem_locator_t_ea_set)
op = _swig_property(_ida_hexrays.citem_locator_t_op_get, _ida_hexrays.citem_locator_t_op_set)
def __init__(self, *args):
"""
__init__(self, _ea, _op) -> citem_locator_t
__init__(self, i) -> citem_locator_t
"""
this = _ida_hexrays.new_citem_locator_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.citem_locator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.citem_locator_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.citem_locator_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.citem_locator_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.citem_locator_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.citem_locator_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.citem_locator_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_citem_locator_t
__del__ = lambda self : None;
citem_locator_t_swigregister = _ida_hexrays.citem_locator_t_swigregister
citem_locator_t_swigregister(citem_locator_t)
class bit_bound_t(object):
"""
Proxy of C++ bit_bound_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
nbits = _swig_property(_ida_hexrays.bit_bound_t_nbits_get, _ida_hexrays.bit_bound_t_nbits_set)
sbits = _swig_property(_ida_hexrays.bit_bound_t_sbits_get, _ida_hexrays.bit_bound_t_sbits_set)
def __init__(self, *args):
"""
__init__(self, n=0, s=0) -> bit_bound_t
"""
this = _ida_hexrays.new_bit_bound_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_bit_bound_t
__del__ = lambda self : None;
bit_bound_t_swigregister = _ida_hexrays.bit_bound_t_swigregister
bit_bound_t_swigregister(bit_bound_t)
class citem_t(object):
"""
Proxy of C++ citem_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.citem_t_ea_get, _ida_hexrays.citem_t_ea_set)
label_num = _swig_property(_ida_hexrays.citem_t_label_num_get, _ida_hexrays.citem_t_label_num_set)
index = _swig_property(_ida_hexrays.citem_t_index_get, _ida_hexrays.citem_t_index_set)
def __init__(self, *args):
"""
__init__(self) -> citem_t
__init__(self, o) -> citem_t
"""
this = _ida_hexrays.new_citem_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.citem_t_swap(self, *args)
def is_expr(self, *args):
"""
is_expr(self) -> bool
"""
return _ida_hexrays.citem_t_is_expr(self, *args)
def contains_expr(self, *args):
"""
contains_expr(self, e) -> bool
"""
return _ida_hexrays.citem_t_contains_expr(self, *args)
def contains_label(self, *args):
"""
contains_label(self) -> bool
"""
return _ida_hexrays.citem_t_contains_label(self, *args)
def find_parent_of(self, *args):
"""
find_parent_of(self, sitem) -> citem_t
find_parent_of(self, item) -> citem_t
"""
return _ida_hexrays.citem_t_find_parent_of(self, *args)
def find_closest_addr(self, *args):
"""
find_closest_addr(self, _ea) -> citem_t
"""
return _ida_hexrays.citem_t_find_closest_addr(self, *args)
def print1(self, *args):
"""
print1(self, func)
"""
return _ida_hexrays.citem_t_print1(self, *args)
__swig_destroy__ = _ida_hexrays.delete_citem_t
__del__ = lambda self : None;
cinsn = _swig_property(_ida_hexrays.citem_t_cinsn_get)
cexpr = _swig_property(_ida_hexrays.citem_t_cexpr_get)
def _get_op(self, *args):
"""
_get_op(self) -> ctype_t
"""
return _ida_hexrays.citem_t__get_op(self, *args)
def _set_op(self, *args):
"""
_set_op(self, v)
"""
return _ida_hexrays.citem_t__set_op(self, *args)
def _ensure_no_op(self):
if self.op not in [cot_empty, cit_empty]:
raise Exception("%s has op %s; cannot be modified" % (self, self.op))
return True
op = property(
_get_op,
lambda self, v: self._ensure_no_op() and self._set_op(v))
def __dbg_get_meminfo(self, *args):
"""
__dbg_get_meminfo(self) -> qstring
"""
return _ida_hexrays.citem_t___dbg_get_meminfo(self, *args)
def __dbg_get_registered_kind(self, *args):
"""
__dbg_get_registered_kind(self) -> int
"""
return _ida_hexrays.citem_t___dbg_get_registered_kind(self, *args)
def _obj_id(self, *args):
"""
_obj_id(self) -> PyObject *
"""
return _ida_hexrays.citem_t__obj_id(self, *args)
obj_id = property(_obj_id)
def _ensure_cond(self, ok, cond_str):
if not ok:
raise Exception("Condition \"%s\" not verified" % cond_str)
return True
def _ensure_no_obj(self, o, attr, attr_is_acquired):
if attr_is_acquired and o is not None:
raise Exception("%s already owns attribute \"%s\" (%s); cannot be modified" % (self, attr, o))
return True
def _acquire_ownership(self, v, acquire):
if acquire and (v is not None) and not isinstance(v, (int, long)):
if not v.thisown:
raise Exception("%s is already owned, and cannot be reused" % v)
v.thisown = False
dereg = getattr(v, "_deregister", None)
if dereg:
dereg()
return True
def _maybe_disown_and_deregister(self):
if self.thisown:
self.thisown = False
self._deregister()
def _own_and_register(self):
assert(not self.thisown)
self.thisown = True
self._register()
def replace_by(self, o):
assert(isinstance(o, (cexpr_t, cinsn_t)))
o._maybe_disown_and_deregister()
self._replace_by(o)
def _meminfo(self):
cpp = self.__dbg_get_meminfo()
rkind = self.__dbg_get_registered_kind()
rkind_str = [
"(not owned)",
"cfuncptr_t",
"cinsn_t",
"cexpr_t",
"cblock_t",
"mbl_array_t",
"mop_t",
"minsn_t",
"optinsn_t",
"optblock_t",
"valrng_t"][rkind]
return "%s [thisown=%s, owned by IDAPython as=%s]" % (
cpp,
self.thisown,
rkind_str)
meminfo = property(_meminfo)
citem_t_swigregister = _ida_hexrays.citem_t_swigregister
citem_t_swigregister(citem_t)
class cexpr_t(citem_t):
"""
Proxy of C++ cexpr_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
type = _swig_property(_ida_hexrays.cexpr_t_type_get, _ida_hexrays.cexpr_t_type_set)
exflags = _swig_property(_ida_hexrays.cexpr_t_exflags_get, _ida_hexrays.cexpr_t_exflags_set)
def cpadone(self, *args):
"""
cpadone(self) -> bool
"""
return _ida_hexrays.cexpr_t_cpadone(self, *args)
def is_odd_lvalue(self, *args):
"""
is_odd_lvalue(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_odd_lvalue(self, *args)
def is_fpop(self, *args):
"""
is_fpop(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_fpop(self, *args)
def is_cstr(self, *args):
"""
is_cstr(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_cstr(self, *args)
def is_undef_val(self, *args):
"""
is_undef_val(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_undef_val(self, *args)
def is_jumpout(self, *args):
"""
is_jumpout(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_jumpout(self, *args)
def is_vftable(self, *args):
"""
is_vftable(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_vftable(self, *args)
def set_cpadone(self, *args):
"""
set_cpadone(self)
"""
return _ida_hexrays.cexpr_t_set_cpadone(self, *args)
def set_vftable(self, *args):
"""
set_vftable(self)
"""
return _ida_hexrays.cexpr_t_set_vftable(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cexpr_t
__init__(self, cop, _x) -> cexpr_t
__init__(self, cop, _x, _y) -> cexpr_t
__init__(self, cop, _x, _y, _z) -> cexpr_t
__init__(self, r) -> cexpr_t
"""
this = _ida_hexrays.new_cexpr_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.cexpr_t_swap(self, *args)
def assign(self, *args):
"""
assign(self, r) -> cexpr_t
"""
return _ida_hexrays.cexpr_t_assign(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cexpr_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cexpr_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cexpr_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cexpr_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cexpr_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cexpr_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cexpr_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cexpr_t
__del__ = lambda self : None;
def _replace_by(self, *args):
"""
_replace_by(self, r)
"""
return _ida_hexrays.cexpr_t__replace_by(self, *args)
def cleanup(self, *args):
"""
cleanup(self)
"""
return _ida_hexrays.cexpr_t_cleanup(self, *args)
def put_number(self, *args):
"""
put_number(self, func, value, nbytes, sign=no_sign)
"""
return _ida_hexrays.cexpr_t_put_number(self, *args)
def print1(self, *args):
"""
print1(self, func)
"""
return _ida_hexrays.cexpr_t_print1(self, *args)
def calc_type(self, *args):
"""
calc_type(self, recursive)
"""
return _ida_hexrays.cexpr_t_calc_type(self, *args)
def equal_effect(self, *args):
"""
equal_effect(self, r) -> bool
"""
return _ida_hexrays.cexpr_t_equal_effect(self, *args)
def is_child_of(self, *args):
"""
is_child_of(self, parent) -> bool
"""
return _ida_hexrays.cexpr_t_is_child_of(self, *args)
def contains_operator(self, *args):
"""
contains_operator(self, needed_op, times=1) -> bool
"""
return _ida_hexrays.cexpr_t_contains_operator(self, *args)
def contains_comma(self, *args):
"""
contains_comma(self, times=1) -> bool
"""
return _ida_hexrays.cexpr_t_contains_comma(self, *args)
def contains_insn(self, *args):
"""
contains_insn(self, times=1) -> bool
"""
return _ida_hexrays.cexpr_t_contains_insn(self, *args)
def contains_insn_or_label(self, *args):
"""
contains_insn_or_label(self) -> bool
"""
return _ida_hexrays.cexpr_t_contains_insn_or_label(self, *args)
def contains_comma_or_insn_or_label(self, *args):
"""
contains_comma_or_insn_or_label(self, maxcommas=1) -> bool
"""
return _ida_hexrays.cexpr_t_contains_comma_or_insn_or_label(self, *args)
def is_nice_expr(self, *args):
"""
is_nice_expr(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_nice_expr(self, *args)
def is_nice_cond(self, *args):
"""
is_nice_cond(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_nice_cond(self, *args)
def is_call_object_of(self, *args):
"""
is_call_object_of(self, parent) -> bool
"""
return _ida_hexrays.cexpr_t_is_call_object_of(self, *args)
def is_call_arg_of(self, *args):
"""
is_call_arg_of(self, parent) -> bool
"""
return _ida_hexrays.cexpr_t_is_call_arg_of(self, *args)
def get_type_sign(self, *args):
"""
get_type_sign(self) -> type_sign_t
"""
return _ida_hexrays.cexpr_t_get_type_sign(self, *args)
def is_type_unsigned(self, *args):
"""
is_type_unsigned(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_type_unsigned(self, *args)
def is_type_signed(self, *args):
"""
is_type_signed(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_type_signed(self, *args)
def get_high_nbit_bound(self, *args):
"""
get_high_nbit_bound(self) -> bit_bound_t
"""
return _ida_hexrays.cexpr_t_get_high_nbit_bound(self, *args)
def get_low_nbit_bound(self, *args):
"""
get_low_nbit_bound(self) -> int
"""
return _ida_hexrays.cexpr_t_get_low_nbit_bound(self, *args)
def requires_lvalue(self, *args):
"""
requires_lvalue(self, child) -> bool
"""
return _ida_hexrays.cexpr_t_requires_lvalue(self, *args)
def has_side_effects(self, *args):
"""
has_side_effects(self) -> bool
"""
return _ida_hexrays.cexpr_t_has_side_effects(self, *args)
def numval(self, *args):
"""
numval(self) -> uint64
"""
return _ida_hexrays.cexpr_t_numval(self, *args)
def is_const_value(self, *args):
"""
is_const_value(self, _v) -> bool
"""
return _ida_hexrays.cexpr_t_is_const_value(self, *args)
def is_negative_const(self, *args):
"""
is_negative_const(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_negative_const(self, *args)
def is_non_negative_const(self, *args):
"""
is_non_negative_const(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_non_negative_const(self, *args)
def is_non_zero_const(self, *args):
"""
is_non_zero_const(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_non_zero_const(self, *args)
def is_zero_const(self, *args):
"""
is_zero_const(self) -> bool
"""
return _ida_hexrays.cexpr_t_is_zero_const(self, *args)
def get_const_value(self, *args):
"""
get_const_value(self) -> bool
"""
return _ida_hexrays.cexpr_t_get_const_value(self, *args)
def maybe_ptr(self, *args):
"""
maybe_ptr(self) -> bool
"""
return _ida_hexrays.cexpr_t_maybe_ptr(self, *args)
def get_ptr_or_array(self, *args):
"""
get_ptr_or_array(self) -> cexpr_t
"""
return _ida_hexrays.cexpr_t_get_ptr_or_array(self, *args)
def find_op(self, *args):
"""
find_op(self, _op) -> cexpr_t
find_op(self, _op) -> cexpr_t
"""
return _ida_hexrays.cexpr_t_find_op(self, *args)
def find_num_op(self, *args):
"""
find_num_op(self) -> cexpr_t
find_num_op(self) -> cexpr_t
"""
return _ida_hexrays.cexpr_t_find_num_op(self, *args)
def theother(self, *args):
"""
theother(self, what) -> cexpr_t
theother(self, what) -> cexpr_t
"""
return _ida_hexrays.cexpr_t_theother(self, *args)
def get_1num_op(self, *args):
"""
get_1num_op(self, o1, o2) -> bool
"""
return _ida_hexrays.cexpr_t_get_1num_op(self, *args)
def _register(self, *args):
"""
_register(self)
"""
return _ida_hexrays.cexpr_t__register(self, *args)
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.cexpr_t__deregister(self, *args)
def _get_n(self, *args):
"""
_get_n(self) -> cnumber_t
"""
return _ida_hexrays.cexpr_t__get_n(self, *args)
def _set_n(self, *args):
"""
_set_n(self, _v)
"""
return _ida_hexrays.cexpr_t__set_n(self, *args)
n = property( lambda self: self._get_n() if self.op == cot_num else None, lambda self, v: self._ensure_cond(self.op == cot_num,"self.op == cot_num") and self._ensure_no_obj(self._get_n(),"n", True) and self._acquire_ownership(v, True) and self._set_n(v))
def _get_fpc(self, *args):
"""
_get_fpc(self) -> fnumber_t
"""
return _ida_hexrays.cexpr_t__get_fpc(self, *args)
def _set_fpc(self, *args):
"""
_set_fpc(self, _v)
"""
return _ida_hexrays.cexpr_t__set_fpc(self, *args)
fpc = property( lambda self: self._get_fpc() if self.op == cot_fnum else None, lambda self, v: self._ensure_cond(self.op == cot_fnum,"self.op == cot_fnum") and self._ensure_no_obj(self._get_fpc(),"fpc", True) and self._acquire_ownership(v, True) and self._set_fpc(v))
def get_v(self, *args):
"""
get_v(self) -> var_ref_t
"""
return _ida_hexrays.cexpr_t_get_v(self, *args)
def set_v(self, *args):
"""
set_v(self, v)
"""
return _ida_hexrays.cexpr_t_set_v(self, *args)
v = property(lambda self: self.get_v(), lambda self, v: self.set_v(v))
def _get_obj_ea(self, *args):
"""
_get_obj_ea(self) -> ea_t
"""
return _ida_hexrays.cexpr_t__get_obj_ea(self, *args)
def _set_obj_ea(self, *args):
"""
_set_obj_ea(self, _v)
"""
return _ida_hexrays.cexpr_t__set_obj_ea(self, *args)
obj_ea = property( lambda self: self._get_obj_ea() if self.op == cot_obj else ida_idaapi.BADADDR, lambda self, v: self._ensure_cond(self.op == cot_obj,"self.op == cot_obj") and self._ensure_no_obj(self._get_obj_ea(),"obj_ea", False) and self._acquire_ownership(v, False) and self._set_obj_ea(v))
def _get_refwidth(self, *args):
"""
_get_refwidth(self) -> int
"""
return _ida_hexrays.cexpr_t__get_refwidth(self, *args)
def _set_refwidth(self, *args):
"""
_set_refwidth(self, _v)
"""
return _ida_hexrays.cexpr_t__set_refwidth(self, *args)
refwidth = property( lambda self: self._get_refwidth() if True else 0, lambda self, v: self._ensure_cond(True,"True") and self._ensure_no_obj(self._get_refwidth(),"refwidth", False) and self._acquire_ownership(v, False) and self._set_refwidth(v))
def _get_x(self, *args):
"""
_get_x(self) -> cexpr_t
"""
return _ida_hexrays.cexpr_t__get_x(self, *args)
def _set_x(self, *args):
"""
_set_x(self, _v)
"""
return _ida_hexrays.cexpr_t__set_x(self, *args)
x = property( lambda self: self._get_x() if op_uses_x(self.op) else None, lambda self, v: self._ensure_cond(op_uses_x(self.op),"op_uses_x(self.op)") and self._ensure_no_obj(self._get_x(),"x", True) and self._acquire_ownership(v, True) and self._set_x(v))
def _get_y(self, *args):
"""
_get_y(self) -> cexpr_t
"""
return _ida_hexrays.cexpr_t__get_y(self, *args)
def _set_y(self, *args):
"""
_set_y(self, _v)
"""
return _ida_hexrays.cexpr_t__set_y(self, *args)
y = property( lambda self: self._get_y() if op_uses_y(self.op) else None, lambda self, v: self._ensure_cond(op_uses_y(self.op),"op_uses_y(self.op)") and self._ensure_no_obj(self._get_y(),"y", True) and self._acquire_ownership(v, True) and self._set_y(v))
def _get_a(self, *args):
"""
_get_a(self) -> carglist_t
"""
return _ida_hexrays.cexpr_t__get_a(self, *args)
def _set_a(self, *args):
"""
_set_a(self, _v)
"""
return _ida_hexrays.cexpr_t__set_a(self, *args)
a = property( lambda self: self._get_a() if self.op == cot_call else None, lambda self, v: self._ensure_cond(self.op == cot_call,"self.op == cot_call") and self._ensure_no_obj(self._get_a(),"a", True) and self._acquire_ownership(v, True) and self._set_a(v))
def _get_m(self, *args):
"""
_get_m(self) -> int
"""
return _ida_hexrays.cexpr_t__get_m(self, *args)
def _set_m(self, *args):
"""
_set_m(self, _v)
"""
return _ida_hexrays.cexpr_t__set_m(self, *args)
m = property( lambda self: self._get_m() if (self.op == cot_memptr or self.op == cot_memref) else 0, lambda self, v: self._ensure_cond((self.op == cot_memptr or self.op == cot_memref),"(self.op == cot_memptr or self.op == cot_memref)") and self._ensure_no_obj(self._get_m(),"m", False) and self._acquire_ownership(v, False) and self._set_m(v))
def _get_z(self, *args):
"""
_get_z(self) -> cexpr_t
"""
return _ida_hexrays.cexpr_t__get_z(self, *args)
def _set_z(self, *args):
"""
_set_z(self, _v)
"""
return _ida_hexrays.cexpr_t__set_z(self, *args)
z = property( lambda self: self._get_z() if op_uses_z(self.op) else None, lambda self, v: self._ensure_cond(op_uses_z(self.op),"op_uses_z(self.op)") and self._ensure_no_obj(self._get_z(),"z", True) and self._acquire_ownership(v, True) and self._set_z(v))
def _get_ptrsize(self, *args):
"""
_get_ptrsize(self) -> int
"""
return _ida_hexrays.cexpr_t__get_ptrsize(self, *args)
def _set_ptrsize(self, *args):
"""
_set_ptrsize(self, _v)
"""
return _ida_hexrays.cexpr_t__set_ptrsize(self, *args)
ptrsize = property( lambda self: self._get_ptrsize() if (self.op == cot_ptr or self.op == cot_memptr) else 0, lambda self, v: self._ensure_cond((self.op == cot_ptr or self.op == cot_memptr),"(self.op == cot_ptr or self.op == cot_memptr)") and self._ensure_no_obj(self._get_ptrsize(),"ptrsize", False) and self._acquire_ownership(v, False) and self._set_ptrsize(v))
def _get_insn(self, *args):
"""
_get_insn(self) -> cinsn_t
"""
return _ida_hexrays.cexpr_t__get_insn(self, *args)
def _set_insn(self, *args):
"""
_set_insn(self, _v)
"""
return _ida_hexrays.cexpr_t__set_insn(self, *args)
insn = property( lambda self: self._get_insn() if self.op == cot_insn else None, lambda self, v: self._ensure_cond(self.op == cot_insn,"self.op == cot_insn") and self._ensure_no_obj(self._get_insn(),"insn", True) and self._acquire_ownership(v, True) and self._set_insn(v))
def _get_helper(self, *args):
"""
_get_helper(self) -> char *
"""
return _ida_hexrays.cexpr_t__get_helper(self, *args)
def _set_helper(self, *args):
"""
_set_helper(self, _v)
"""
return _ida_hexrays.cexpr_t__set_helper(self, *args)
helper = property( lambda self: self._get_helper() if self.op == cot_helper else None, lambda self, v: self._ensure_cond(self.op == cot_helper,"self.op == cot_helper") and self._ensure_no_obj(self._get_helper(),"helper", False) and self._acquire_ownership(v, False) and self._set_helper(v))
def _get_string(self, *args):
"""
_get_string(self) -> char *
"""
return _ida_hexrays.cexpr_t__get_string(self, *args)
def _set_string(self, *args):
"""
_set_string(self, _v)
"""
return _ida_hexrays.cexpr_t__set_string(self, *args)
string = property( lambda self: self._get_string() if self.op == cot_str else None, lambda self, v: self._ensure_cond(self.op == cot_str,"self.op == cot_str") and self._ensure_no_obj(self._get_string(),"string", False) and self._acquire_ownership(v, False) and self._set_string(v))
cexpr_t_swigregister = _ida_hexrays.cexpr_t_swigregister
cexpr_t_swigregister(cexpr_t)
EXFL_CPADONE = _ida_hexrays.EXFL_CPADONE
"""
pointer arithmetic correction done
"""
EXFL_LVALUE = _ida_hexrays.EXFL_LVALUE
"""
expression is lvalue even if it doesn't look like it
"""
EXFL_FPOP = _ida_hexrays.EXFL_FPOP
"""
floating point operation
"""
EXFL_ALONE = _ida_hexrays.EXFL_ALONE
"""
standalone helper
"""
EXFL_CSTR = _ida_hexrays.EXFL_CSTR
"""
string literal
"""
EXFL_PARTIAL = _ida_hexrays.EXFL_PARTIAL
"""
type of the expression is considered partial
"""
EXFL_UNDEF = _ida_hexrays.EXFL_UNDEF
"""
expression uses undefined value
"""
EXFL_JUMPOUT = _ida_hexrays.EXFL_JUMPOUT
"""
jump out-of-function
"""
EXFL_VFTABLE = _ida_hexrays.EXFL_VFTABLE
"""
is ptr to vftable (used for cot_memptr, cot_memref)
"""
EXFL_ALL = _ida_hexrays.EXFL_ALL
"""
all currently defined bits
"""
class ceinsn_t(object):
"""
Proxy of C++ ceinsn_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
expr = _swig_property(_ida_hexrays.ceinsn_t_expr_get, _ida_hexrays.ceinsn_t_expr_set)
def __init__(self, *args):
"""
__init__(self) -> ceinsn_t
"""
this = _ida_hexrays.new_ceinsn_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ceinsn_t
__del__ = lambda self : None;
ceinsn_t_swigregister = _ida_hexrays.ceinsn_t_swigregister
ceinsn_t_swigregister(ceinsn_t)
CALC_CURLY_BRACES = _ida_hexrays.CALC_CURLY_BRACES
NO_CURLY_BRACES = _ida_hexrays.NO_CURLY_BRACES
USE_CURLY_BRACES = _ida_hexrays.USE_CURLY_BRACES
class cif_t(ceinsn_t):
"""
Proxy of C++ cif_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ithen = _swig_property(_ida_hexrays.cif_t_ithen_get, _ida_hexrays.cif_t_ithen_set)
ielse = _swig_property(_ida_hexrays.cif_t_ielse_get, _ida_hexrays.cif_t_ielse_set)
def __init__(self, *args):
"""
__init__(self) -> cif_t
__init__(self, r) -> cif_t
"""
this = _ida_hexrays.new_cif_t(*args)
try: self.this.append(this)
except: self.this = this
def assign(self, *args):
"""
assign(self, r) -> cif_t
"""
return _ida_hexrays.cif_t_assign(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cif_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cif_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cif_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cif_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cif_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cif_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cif_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cif_t
__del__ = lambda self : None;
def cleanup(self, *args):
"""
cleanup(self)
"""
return _ida_hexrays.cif_t_cleanup(self, *args)
cif_t_swigregister = _ida_hexrays.cif_t_swigregister
cif_t_swigregister(cif_t)
class cloop_t(ceinsn_t):
"""
Proxy of C++ cloop_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
body = _swig_property(_ida_hexrays.cloop_t_body_get, _ida_hexrays.cloop_t_body_set)
def __init__(self, *args):
"""
__init__(self) -> cloop_t
__init__(self, b) -> cloop_t
__init__(self, r) -> cloop_t
"""
this = _ida_hexrays.new_cloop_t(*args)
try: self.this.append(this)
except: self.this = this
def assign(self, *args):
"""
assign(self, r) -> cloop_t
"""
return _ida_hexrays.cloop_t_assign(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cloop_t
__del__ = lambda self : None;
def cleanup(self, *args):
"""
cleanup(self)
"""
return _ida_hexrays.cloop_t_cleanup(self, *args)
cloop_t_swigregister = _ida_hexrays.cloop_t_swigregister
cloop_t_swigregister(cloop_t)
class cfor_t(cloop_t):
"""
Proxy of C++ cfor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
init = _swig_property(_ida_hexrays.cfor_t_init_get, _ida_hexrays.cfor_t_init_set)
step = _swig_property(_ida_hexrays.cfor_t_step_get, _ida_hexrays.cfor_t_step_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cfor_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cfor_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cfor_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cfor_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cfor_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cfor_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cfor_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cfor_t
"""
this = _ida_hexrays.new_cfor_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cfor_t
__del__ = lambda self : None;
cfor_t_swigregister = _ida_hexrays.cfor_t_swigregister
cfor_t_swigregister(cfor_t)
class cwhile_t(cloop_t):
"""
Proxy of C++ cwhile_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cwhile_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cwhile_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cwhile_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cwhile_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cwhile_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cwhile_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cwhile_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cwhile_t
"""
this = _ida_hexrays.new_cwhile_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cwhile_t
__del__ = lambda self : None;
cwhile_t_swigregister = _ida_hexrays.cwhile_t_swigregister
cwhile_t_swigregister(cwhile_t)
class cdo_t(cloop_t):
"""
Proxy of C++ cdo_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cdo_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cdo_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cdo_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cdo_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cdo_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cdo_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cdo_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cdo_t
"""
this = _ida_hexrays.new_cdo_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cdo_t
__del__ = lambda self : None;
cdo_t_swigregister = _ida_hexrays.cdo_t_swigregister
cdo_t_swigregister(cdo_t)
class creturn_t(ceinsn_t):
"""
Proxy of C++ creturn_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.creturn_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.creturn_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.creturn_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.creturn_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.creturn_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.creturn_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.creturn_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> creturn_t
"""
this = _ida_hexrays.new_creturn_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_creturn_t
__del__ = lambda self : None;
creturn_t_swigregister = _ida_hexrays.creturn_t_swigregister
creturn_t_swigregister(creturn_t)
class cgoto_t(object):
"""
Proxy of C++ cgoto_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
label_num = _swig_property(_ida_hexrays.cgoto_t_label_num_get, _ida_hexrays.cgoto_t_label_num_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cgoto_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cgoto_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cgoto_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cgoto_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cgoto_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cgoto_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cgoto_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cgoto_t
"""
this = _ida_hexrays.new_cgoto_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cgoto_t
__del__ = lambda self : None;
cgoto_t_swigregister = _ida_hexrays.cgoto_t_swigregister
cgoto_t_swigregister(cgoto_t)
class casm_t(ida_pro.eavec_t):
"""
Proxy of C++ casm_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self, ea) -> casm_t
__init__(self, r) -> casm_t
"""
this = _ida_hexrays.new_casm_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.casm_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.casm_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.casm_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.casm_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.casm_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.casm_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.casm_t_compare(self, *args)
def one_insn(self, *args):
"""
one_insn(self) -> bool
"""
return _ida_hexrays.casm_t_one_insn(self, *args)
__swig_destroy__ = _ida_hexrays.delete_casm_t
__del__ = lambda self : None;
casm_t_swigregister = _ida_hexrays.casm_t_swigregister
casm_t_swigregister(casm_t)
class cinsn_t(citem_t):
"""
Proxy of C++ cinsn_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(self) -> cinsn_t
__init__(self, r) -> cinsn_t
"""
this = _ida_hexrays.new_cinsn_t(*args)
try: self.this.append(this)
except: self.this = this
def swap(self, *args):
"""
swap(self, r)
"""
return _ida_hexrays.cinsn_t_swap(self, *args)
def assign(self, *args):
"""
assign(self, r) -> cinsn_t
"""
return _ida_hexrays.cinsn_t_assign(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cinsn_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cinsn_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cinsn_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cinsn_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cinsn_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cinsn_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cinsn_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_cinsn_t
__del__ = lambda self : None;
def _replace_by(self, *args):
"""
_replace_by(self, r)
"""
return _ida_hexrays.cinsn_t__replace_by(self, *args)
def cleanup(self, *args):
"""
cleanup(self)
"""
return _ida_hexrays.cinsn_t_cleanup(self, *args)
def zero(self, *args):
"""
zero(self)
"""
return _ida_hexrays.cinsn_t_zero(self, *args)
def new_insn(self, *args):
"""
new_insn(self, insn_ea) -> cinsn_t
"""
return _ida_hexrays.cinsn_t_new_insn(self, *args)
def create_if(self, *args):
"""
create_if(self, cnd) -> cif_t
"""
return _ida_hexrays.cinsn_t_create_if(self, *args)
def _print(self, *args):
"""
_print(self, indent, vp, use_curly=CALC_CURLY_BRACES)
"""
return _ida_hexrays.cinsn_t__print(self, *args)
def print1(self, *args):
"""
print1(self, func)
"""
return _ida_hexrays.cinsn_t_print1(self, *args)
def is_ordinary_flow(self, *args):
"""
is_ordinary_flow(self) -> bool
"""
return _ida_hexrays.cinsn_t_is_ordinary_flow(self, *args)
def contains_insn(self, *args):
"""
contains_insn(self, type, times=1) -> bool
"""
return _ida_hexrays.cinsn_t_contains_insn(self, *args)
def collect_free_breaks(self, *args):
"""
collect_free_breaks(self, breaks) -> bool
"""
return _ida_hexrays.cinsn_t_collect_free_breaks(self, *args)
def collect_free_continues(self, *args):
"""
collect_free_continues(self, continues) -> bool
"""
return _ida_hexrays.cinsn_t_collect_free_continues(self, *args)
def contains_free_break(self, *args):
"""
contains_free_break(self) -> bool
"""
return _ida_hexrays.cinsn_t_contains_free_break(self, *args)
def contains_free_continue(self, *args):
"""
contains_free_continue(self) -> bool
"""
return _ida_hexrays.cinsn_t_contains_free_continue(self, *args)
def _register(self, *args):
"""
_register(self)
"""
return _ida_hexrays.cinsn_t__register(self, *args)
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.cinsn_t__deregister(self, *args)
def _get_cblock(self, *args):
"""
_get_cblock(self) -> cblock_t
"""
return _ida_hexrays.cinsn_t__get_cblock(self, *args)
def _set_cblock(self, *args):
"""
_set_cblock(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cblock(self, *args)
cblock = property( lambda self: self._get_cblock() if self.op == cit_block else None, lambda self, v: self._ensure_cond(self.op == cit_block,"self.op == cit_block") and self._ensure_no_obj(self._get_cblock(),"cblock", True) and self._acquire_ownership(v, True) and self._set_cblock(v))
def _get_cexpr(self, *args):
"""
_get_cexpr(self) -> cexpr_t
"""
return _ida_hexrays.cinsn_t__get_cexpr(self, *args)
def _set_cexpr(self, *args):
"""
_set_cexpr(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cexpr(self, *args)
cexpr = property( lambda self: self._get_cexpr() if self.op == cit_expr else None, lambda self, v: self._ensure_cond(self.op == cit_expr,"self.op == cit_expr") and self._ensure_no_obj(self._get_cexpr(),"cexpr", True) and self._acquire_ownership(v, True) and self._set_cexpr(v))
def _get_cif(self, *args):
"""
_get_cif(self) -> cif_t
"""
return _ida_hexrays.cinsn_t__get_cif(self, *args)
def _set_cif(self, *args):
"""
_set_cif(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cif(self, *args)
cif = property( lambda self: self._get_cif() if self.op == cit_if else None, lambda self, v: self._ensure_cond(self.op == cit_if,"self.op == cit_if") and self._ensure_no_obj(self._get_cif(),"cif", True) and self._acquire_ownership(v, True) and self._set_cif(v))
def _get_cfor(self, *args):
"""
_get_cfor(self) -> cfor_t
"""
return _ida_hexrays.cinsn_t__get_cfor(self, *args)
def _set_cfor(self, *args):
"""
_set_cfor(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cfor(self, *args)
cfor = property( lambda self: self._get_cfor() if self.op == cit_for else None, lambda self, v: self._ensure_cond(self.op == cit_for,"self.op == cit_for") and self._ensure_no_obj(self._get_cfor(),"cfor", True) and self._acquire_ownership(v, True) and self._set_cfor(v))
def _get_cwhile(self, *args):
"""
_get_cwhile(self) -> cwhile_t
"""
return _ida_hexrays.cinsn_t__get_cwhile(self, *args)
def _set_cwhile(self, *args):
"""
_set_cwhile(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cwhile(self, *args)
cwhile = property( lambda self: self._get_cwhile() if self.op == cit_while else None, lambda self, v: self._ensure_cond(self.op == cit_while,"self.op == cit_while") and self._ensure_no_obj(self._get_cwhile(),"cwhile", True) and self._acquire_ownership(v, True) and self._set_cwhile(v))
def _get_cdo(self, *args):
"""
_get_cdo(self) -> cdo_t
"""
return _ida_hexrays.cinsn_t__get_cdo(self, *args)
def _set_cdo(self, *args):
"""
_set_cdo(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cdo(self, *args)
cdo = property( lambda self: self._get_cdo() if self.op == cit_do else None, lambda self, v: self._ensure_cond(self.op == cit_do,"self.op == cit_do") and self._ensure_no_obj(self._get_cdo(),"cdo", True) and self._acquire_ownership(v, True) and self._set_cdo(v))
def _get_cswitch(self, *args):
"""
_get_cswitch(self) -> cswitch_t
"""
return _ida_hexrays.cinsn_t__get_cswitch(self, *args)
def _set_cswitch(self, *args):
"""
_set_cswitch(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cswitch(self, *args)
cswitch = property( lambda self: self._get_cswitch() if self.op == cit_switch else None, lambda self, v: self._ensure_cond(self.op == cit_switch,"self.op == cit_switch") and self._ensure_no_obj(self._get_cswitch(),"cswitch", True) and self._acquire_ownership(v, True) and self._set_cswitch(v))
def _get_creturn(self, *args):
"""
_get_creturn(self) -> creturn_t
"""
return _ida_hexrays.cinsn_t__get_creturn(self, *args)
def _set_creturn(self, *args):
"""
_set_creturn(self, _v)
"""
return _ida_hexrays.cinsn_t__set_creturn(self, *args)
creturn = property( lambda self: self._get_creturn() if self.op == cit_return else None, lambda self, v: self._ensure_cond(self.op == cit_return,"self.op == cit_return") and self._ensure_no_obj(self._get_creturn(),"creturn", True) and self._acquire_ownership(v, True) and self._set_creturn(v))
def _get_cgoto(self, *args):
"""
_get_cgoto(self) -> cgoto_t
"""
return _ida_hexrays.cinsn_t__get_cgoto(self, *args)
def _set_cgoto(self, *args):
"""
_set_cgoto(self, _v)
"""
return _ida_hexrays.cinsn_t__set_cgoto(self, *args)
cgoto = property( lambda self: self._get_cgoto() if self.op == cit_goto else None, lambda self, v: self._ensure_cond(self.op == cit_goto,"self.op == cit_goto") and self._ensure_no_obj(self._get_cgoto(),"cgoto", True) and self._acquire_ownership(v, True) and self._set_cgoto(v))
def _get_casm(self, *args):
"""
_get_casm(self) -> casm_t
"""
return _ida_hexrays.cinsn_t__get_casm(self, *args)
def _set_casm(self, *args):
"""
_set_casm(self, _v)
"""
return _ida_hexrays.cinsn_t__set_casm(self, *args)
casm = property( lambda self: self._get_casm() if self.op == cit_asm else None, lambda self, v: self._ensure_cond(self.op == cit_asm,"self.op == cit_asm") and self._ensure_no_obj(self._get_casm(),"casm", True) and self._acquire_ownership(v, True) and self._set_casm(v))
def insn_is_epilog(*args):
"""
insn_is_epilog(insn) -> bool
"""
return _ida_hexrays.cinsn_t_insn_is_epilog(*args)
insn_is_epilog = staticmethod(insn_is_epilog)
def is_epilog(self):
return cinsn_t.insn_is_epilog(self)
cinsn_t_swigregister = _ida_hexrays.cinsn_t_swigregister
cinsn_t_swigregister(cinsn_t)
def cinsn_t_insn_is_epilog(*args):
"""
cinsn_t_insn_is_epilog(insn) -> bool
"""
return _ida_hexrays.cinsn_t_insn_is_epilog(*args)
class cblock_t(qlist_cinsn_t):
"""
Proxy of C++ cblock_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cblock_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cblock_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cblock_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cblock_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cblock_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cblock_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cblock_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cblock_t
"""
this = _ida_hexrays.new_cblock_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cblock_t
__del__ = lambda self : None;
def _deregister(self, *args):
"""
_deregister(self)
"""
return _ida_hexrays.cblock_t__deregister(self, *args)
cblock_t_swigregister = _ida_hexrays.cblock_t_swigregister
cblock_t_swigregister(cblock_t)
class carg_t(cexpr_t):
"""
Proxy of C++ carg_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
is_vararg = _swig_property(_ida_hexrays.carg_t_is_vararg_get, _ida_hexrays.carg_t_is_vararg_set)
formal_type = _swig_property(_ida_hexrays.carg_t_formal_type_get, _ida_hexrays.carg_t_formal_type_set)
def consume_cexpr(self, *args):
"""
consume_cexpr(self, e)
"""
return _ida_hexrays.carg_t_consume_cexpr(self, *args)
def __init__(self, *args):
"""
__init__(self) -> carg_t
"""
this = _ida_hexrays.new_carg_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.carg_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.carg_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.carg_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.carg_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.carg_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.carg_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.carg_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_carg_t
__del__ = lambda self : None;
carg_t_swigregister = _ida_hexrays.carg_t_swigregister
carg_t_swigregister(carg_t)
class carglist_t(qvector_carg_t):
"""
Proxy of C++ carglist_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
functype = _swig_property(_ida_hexrays.carglist_t_functype_get, _ida_hexrays.carglist_t_functype_set)
flags = _swig_property(_ida_hexrays.carglist_t_flags_get, _ida_hexrays.carglist_t_flags_set)
def __init__(self, *args):
"""
__init__(self) -> carglist_t
__init__(self, ftype, fl=0) -> carglist_t
"""
this = _ida_hexrays.new_carglist_t(*args)
try: self.this.append(this)
except: self.this = this
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.carglist_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.carglist_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.carglist_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.carglist_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.carglist_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.carglist_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.carglist_t_compare(self, *args)
__swig_destroy__ = _ida_hexrays.delete_carglist_t
__del__ = lambda self : None;
carglist_t_swigregister = _ida_hexrays.carglist_t_swigregister
carglist_t_swigregister(carglist_t)
CFL_FINAL = _ida_hexrays.CFL_FINAL
"""
call type is final, should not be changed
"""
CFL_HELPER = _ida_hexrays.CFL_HELPER
"""
created from a decompiler helper function
"""
class ccase_t(cinsn_t):
"""
Proxy of C++ ccase_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
values = _swig_property(_ida_hexrays.ccase_t_values_get, _ida_hexrays.ccase_t_values_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ccase_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ccase_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.ccase_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.ccase_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.ccase_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.ccase_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.ccase_t_compare(self, *args)
def size(self, *args):
"""
size(self) -> size_t
"""
return _ida_hexrays.ccase_t_size(self, *args)
def value(self, *args):
"""
value(self, i) -> uint64 const &
"""
return _ida_hexrays.ccase_t_value(self, *args)
def __init__(self, *args):
"""
__init__(self) -> ccase_t
"""
this = _ida_hexrays.new_ccase_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ccase_t
__del__ = lambda self : None;
ccase_t_swigregister = _ida_hexrays.ccase_t_swigregister
ccase_t_swigregister(ccase_t)
class ccases_t(qvector_ccase_t):
"""
Proxy of C++ ccases_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ccases_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ccases_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.ccases_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.ccases_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.ccases_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.ccases_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.ccases_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> ccases_t
"""
this = _ida_hexrays.new_ccases_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ccases_t
__del__ = lambda self : None;
ccases_t_swigregister = _ida_hexrays.ccases_t_swigregister
ccases_t_swigregister(ccases_t)
class cswitch_t(ceinsn_t):
"""
Proxy of C++ cswitch_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
mvnf = _swig_property(_ida_hexrays.cswitch_t_mvnf_get, _ida_hexrays.cswitch_t_mvnf_set)
cases = _swig_property(_ida_hexrays.cswitch_t_cases_get, _ida_hexrays.cswitch_t_cases_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.cswitch_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.cswitch_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.cswitch_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.cswitch_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.cswitch_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.cswitch_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.cswitch_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self) -> cswitch_t
"""
this = _ida_hexrays.new_cswitch_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_cswitch_t
__del__ = lambda self : None;
cswitch_t_swigregister = _ida_hexrays.cswitch_t_swigregister
cswitch_t_swigregister(cswitch_t)
class ctree_anchor_t(object):
"""
Proxy of C++ ctree_anchor_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
value = _swig_property(_ida_hexrays.ctree_anchor_t_value_get, _ida_hexrays.ctree_anchor_t_value_set)
def __init__(self, *args):
"""
__init__(self) -> ctree_anchor_t
"""
this = _ida_hexrays.new_ctree_anchor_t(*args)
try: self.this.append(this)
except: self.this = this
def get_index(self, *args):
"""
get_index(self) -> int
"""
return _ida_hexrays.ctree_anchor_t_get_index(self, *args)
def get_itp(self, *args):
"""
get_itp(self) -> item_preciser_t
"""
return _ida_hexrays.ctree_anchor_t_get_itp(self, *args)
def is_valid_anchor(self, *args):
"""
is_valid_anchor(self) -> bool
"""
return _ida_hexrays.ctree_anchor_t_is_valid_anchor(self, *args)
def is_citem_anchor(self, *args):
"""
is_citem_anchor(self) -> bool
"""
return _ida_hexrays.ctree_anchor_t_is_citem_anchor(self, *args)
def is_lvar_anchor(self, *args):
"""
is_lvar_anchor(self) -> bool
"""
return _ida_hexrays.ctree_anchor_t_is_lvar_anchor(self, *args)
def is_itp_anchor(self, *args):
"""
is_itp_anchor(self) -> bool
"""
return _ida_hexrays.ctree_anchor_t_is_itp_anchor(self, *args)
def is_blkcmt_anchor(self, *args):
"""
is_blkcmt_anchor(self) -> bool
"""
return _ida_hexrays.ctree_anchor_t_is_blkcmt_anchor(self, *args)
__swig_destroy__ = _ida_hexrays.delete_ctree_anchor_t
__del__ = lambda self : None;
ctree_anchor_t_swigregister = _ida_hexrays.ctree_anchor_t_swigregister
ctree_anchor_t_swigregister(ctree_anchor_t)
ANCHOR_INDEX = _ida_hexrays.ANCHOR_INDEX
ANCHOR_MASK = _ida_hexrays.ANCHOR_MASK
ANCHOR_CITEM = _ida_hexrays.ANCHOR_CITEM
"""
c-tree item
"""
ANCHOR_LVAR = _ida_hexrays.ANCHOR_LVAR
"""
declaration of local variable
"""
ANCHOR_ITP = _ida_hexrays.ANCHOR_ITP
"""
item type preciser
"""
ANCHOR_BLKCMT = _ida_hexrays.ANCHOR_BLKCMT
"""
block comment (for ctree items)
"""
VDI_NONE = _ida_hexrays.VDI_NONE
VDI_EXPR = _ida_hexrays.VDI_EXPR
VDI_LVAR = _ida_hexrays.VDI_LVAR
VDI_FUNC = _ida_hexrays.VDI_FUNC
VDI_TAIL = _ida_hexrays.VDI_TAIL
class ctree_item_t(object):
"""
Proxy of C++ ctree_item_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
citype = _swig_property(_ida_hexrays.ctree_item_t_citype_get, _ida_hexrays.ctree_item_t_citype_set)
def __init__(self, *args):
"""
__init__(self) -> ctree_item_t
"""
this = _ida_hexrays.new_ctree_item_t(*args)
try: self.this.append(this)
except: self.this = this
def get_memptr(self, *args):
"""
get_memptr(self, p_sptr=None) -> member_t *
"""
return _ida_hexrays.ctree_item_t_get_memptr(self, *args)
def get_lvar(self, *args):
"""
get_lvar(self) -> lvar_t
"""
return _ida_hexrays.ctree_item_t_get_lvar(self, *args)
def get_ea(self, *args):
"""
get_ea(self) -> ea_t
"""
return _ida_hexrays.ctree_item_t_get_ea(self, *args)
def get_label_num(self, *args):
"""
get_label_num(self, gln_flags) -> int
"""
return _ida_hexrays.ctree_item_t_get_label_num(self, *args)
def is_citem(self, *args):
"""
is_citem(self) -> bool
"""
return _ida_hexrays.ctree_item_t_is_citem(self, *args)
def _get_it(self, *args):
"""
_get_it(self) -> citem_t
"""
return _ida_hexrays.ctree_item_t__get_it(self, *args)
it = property(lambda self: self._get_it())
def _get_e(self, *args):
"""
_get_e(self) -> cexpr_t
"""
return _ida_hexrays.ctree_item_t__get_e(self, *args)
e = property(lambda self: self._get_e())
def _get_i(self, *args):
"""
_get_i(self) -> cinsn_t
"""
return _ida_hexrays.ctree_item_t__get_i(self, *args)
i = property(lambda self: self._get_i())
def _get_l(self, *args):
"""
_get_l(self) -> lvar_t
"""
return _ida_hexrays.ctree_item_t__get_l(self, *args)
l = property(lambda self: self._get_l())
def _get_f(self, *args):
"""
_get_f(self) -> cfunc_t
"""
return _ida_hexrays.ctree_item_t__get_f(self, *args)
f = property(lambda self: self._get_f())
loc = _swig_property(_ida_hexrays.ctree_item_t_loc_get)
__swig_destroy__ = _ida_hexrays.delete_ctree_item_t
__del__ = lambda self : None;
ctree_item_t_swigregister = _ida_hexrays.ctree_item_t_swigregister
ctree_item_t_swigregister(ctree_item_t)
GLN_CURRENT = _ida_hexrays.GLN_CURRENT
"""
get label of the current item
"""
GLN_GOTO_TARGET = _ida_hexrays.GLN_GOTO_TARGET
"""
get goto target
"""
GLN_ALL = _ida_hexrays.GLN_ALL
"""
get both
"""
FORBID_UNUSED_LABELS = _ida_hexrays.FORBID_UNUSED_LABELS
ALLOW_UNUSED_LABELS = _ida_hexrays.ALLOW_UNUSED_LABELS
def _ll_lnot(*args):
"""
_ll_lnot(e) -> cexpr_t
"""
return _ida_hexrays._ll_lnot(*args)
def _ll_new_block(*args):
"""
_ll_new_block() -> cinsn_t
"""
return _ida_hexrays._ll_new_block(*args)
def _ll_create_helper(*args):
"""
_ll_create_helper(standalone, type, format) -> cexpr_t
"""
return _ida_hexrays._ll_create_helper(*args)
def _ll_call_helper(*args):
"""
_ll_call_helper(rettype, args, format) -> cexpr_t
"""
return _ida_hexrays._ll_call_helper(*args)
def _ll_make_num(*args):
"""
_ll_make_num(n, func=None, ea=BADADDR, opnum=0, sign=no_sign, size=0) -> cexpr_t
"""
return _ida_hexrays._ll_make_num(*args)
def _ll_make_ref(*args):
"""
_ll_make_ref(e) -> cexpr_t
"""
return _ida_hexrays._ll_make_ref(*args)
def _ll_dereference(*args):
"""
_ll_dereference(e, ptrsize, is_flt=False) -> cexpr_t
"""
return _ida_hexrays._ll_dereference(*args)
def save_user_labels(*args):
"""
save_user_labels(func_ea, user_labels)
Save user defined labels into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param user_labels: collection of user defined labels (C++: const
user_labels_t *)
"""
return _ida_hexrays.save_user_labels(*args)
def save_user_cmts(*args):
"""
save_user_cmts(func_ea, user_cmts)
Save user defined comments into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param user_cmts: collection of user defined comments (C++: const
user_cmts_t *)
"""
return _ida_hexrays.save_user_cmts(*args)
def save_user_numforms(*args):
"""
save_user_numforms(func_ea, numforms)
Save user defined number formats into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param numforms: collection of user defined comments (C++: const
user_numforms_t *)
"""
return _ida_hexrays.save_user_numforms(*args)
def save_user_iflags(*args):
"""
save_user_iflags(func_ea, iflags)
Save user defined citem iflags into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param iflags: collection of user defined citem iflags (C++: const
user_iflags_t *)
"""
return _ida_hexrays.save_user_iflags(*args)
def save_user_unions(*args):
"""
save_user_unions(func_ea, unions)
Save user defined union field selections into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param unions: collection of union field selections (C++: const
user_unions_t *)
"""
return _ida_hexrays.save_user_unions(*args)
def restore_user_labels(*args):
"""
restore_user_labels(func_ea) -> user_labels_t
Restore user defined labels from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined labels. The returned object must
be deleted by the caller using delete_user_labels()
"""
return _ida_hexrays.restore_user_labels(*args)
def restore_user_cmts(*args):
"""
restore_user_cmts(func_ea) -> user_cmts_t
Restore user defined comments from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined comments. The returned object must
be deleted by the caller using delete_user_cmts()
"""
return _ida_hexrays.restore_user_cmts(*args)
def restore_user_numforms(*args):
"""
restore_user_numforms(func_ea) -> user_numforms_t
Restore user defined number formats from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined number formats. The returned
object must be deleted by the caller using
delete_user_numforms()
"""
return _ida_hexrays.restore_user_numforms(*args)
def restore_user_iflags(*args):
"""
restore_user_iflags(func_ea) -> user_iflags_t
Restore user defined citem iflags from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined iflags. The returned object must
be deleted by the caller using delete_user_iflags()
"""
return _ida_hexrays.restore_user_iflags(*args)
def restore_user_unions(*args):
"""
restore_user_unions(func_ea) -> user_unions_t
Restore user defined union field selections from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of union field selections The returned object must
be deleted by the caller using delete_user_unions()
"""
return _ida_hexrays.restore_user_unions(*args)
class cfunc_t(object):
"""
Proxy of C++ cfunc_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
entry_ea = _swig_property(_ida_hexrays.cfunc_t_entry_ea_get, _ida_hexrays.cfunc_t_entry_ea_set)
mba = _swig_property(_ida_hexrays.cfunc_t_mba_get, _ida_hexrays.cfunc_t_mba_set)
body = _swig_property(_ida_hexrays.cfunc_t_body_get, _ida_hexrays.cfunc_t_body_set)
argidx = _swig_property(_ida_hexrays.cfunc_t_argidx_get)
maturity = _swig_property(_ida_hexrays.cfunc_t_maturity_get, _ida_hexrays.cfunc_t_maturity_set)
user_labels = _swig_property(_ida_hexrays.cfunc_t_user_labels_get, _ida_hexrays.cfunc_t_user_labels_set)
user_cmts = _swig_property(_ida_hexrays.cfunc_t_user_cmts_get, _ida_hexrays.cfunc_t_user_cmts_set)
numforms = _swig_property(_ida_hexrays.cfunc_t_numforms_get, _ida_hexrays.cfunc_t_numforms_set)
user_iflags = _swig_property(_ida_hexrays.cfunc_t_user_iflags_get, _ida_hexrays.cfunc_t_user_iflags_set)
user_unions = _swig_property(_ida_hexrays.cfunc_t_user_unions_get, _ida_hexrays.cfunc_t_user_unions_set)
refcnt = _swig_property(_ida_hexrays.cfunc_t_refcnt_get, _ida_hexrays.cfunc_t_refcnt_set)
statebits = _swig_property(_ida_hexrays.cfunc_t_statebits_get, _ida_hexrays.cfunc_t_statebits_set)
hdrlines = _swig_property(_ida_hexrays.cfunc_t_hdrlines_get, _ida_hexrays.cfunc_t_hdrlines_set)
treeitems = _swig_property(_ida_hexrays.cfunc_t_treeitems_get, _ida_hexrays.cfunc_t_treeitems_set)
__swig_destroy__ = _ida_hexrays.delete_cfunc_t
__del__ = lambda self : None;
def release(self, *args):
"""
release(self)
"""
return _ida_hexrays.cfunc_t_release(self, *args)
def build_c_tree(self, *args):
"""
build_c_tree(self)
"""
return _ida_hexrays.cfunc_t_build_c_tree(self, *args)
def verify(self, *args):
"""
verify(self, aul, even_without_debugger)
"""
return _ida_hexrays.cfunc_t_verify(self, *args)
def print_dcl(self, *args):
"""
print_dcl(self)
"""
return _ida_hexrays.cfunc_t_print_dcl(self, *args)
def print_func(self, *args):
"""
print_func(self, vp)
"""
return _ida_hexrays.cfunc_t_print_func(self, *args)
def get_func_type(self, *args):
"""
get_func_type(self, type) -> bool
"""
return _ida_hexrays.cfunc_t_get_func_type(self, *args)
def get_lvars(self, *args):
"""
get_lvars(self) -> lvars_t
"""
return _ida_hexrays.cfunc_t_get_lvars(self, *args)
def get_stkoff_delta(self, *args):
"""
get_stkoff_delta(self) -> sval_t
"""
return _ida_hexrays.cfunc_t_get_stkoff_delta(self, *args)
def find_label(self, *args):
"""
find_label(self, label) -> citem_t
"""
return _ida_hexrays.cfunc_t_find_label(self, *args)
def remove_unused_labels(self, *args):
"""
remove_unused_labels(self)
"""
return _ida_hexrays.cfunc_t_remove_unused_labels(self, *args)
def get_user_cmt(self, *args):
"""
get_user_cmt(self, loc, rt) -> char const *
"""
return _ida_hexrays.cfunc_t_get_user_cmt(self, *args)
def set_user_cmt(self, *args):
"""
set_user_cmt(self, loc, cmt)
"""
return _ida_hexrays.cfunc_t_set_user_cmt(self, *args)
def get_user_iflags(self, *args):
"""
get_user_iflags(self, loc) -> int32
"""
return _ida_hexrays.cfunc_t_get_user_iflags(self, *args)
def set_user_iflags(self, *args):
"""
set_user_iflags(self, loc, iflags)
"""
return _ida_hexrays.cfunc_t_set_user_iflags(self, *args)
def has_orphan_cmts(self, *args):
"""
has_orphan_cmts(self) -> bool
"""
return _ida_hexrays.cfunc_t_has_orphan_cmts(self, *args)
def del_orphan_cmts(self, *args):
"""
del_orphan_cmts(self) -> int
"""
return _ida_hexrays.cfunc_t_del_orphan_cmts(self, *args)
def get_user_union_selection(self, *args):
"""
get_user_union_selection(self, ea, path) -> bool
"""
return _ida_hexrays.cfunc_t_get_user_union_selection(self, *args)
def set_user_union_selection(self, *args):
"""
set_user_union_selection(self, ea, path)
"""
return _ida_hexrays.cfunc_t_set_user_union_selection(self, *args)
def save_user_labels(self, *args):
"""
save_user_labels(self)
"""
return _ida_hexrays.cfunc_t_save_user_labels(self, *args)
def save_user_cmts(self, *args):
"""
save_user_cmts(self)
"""
return _ida_hexrays.cfunc_t_save_user_cmts(self, *args)
def save_user_numforms(self, *args):
"""
save_user_numforms(self)
"""
return _ida_hexrays.cfunc_t_save_user_numforms(self, *args)
def save_user_iflags(self, *args):
"""
save_user_iflags(self)
"""
return _ida_hexrays.cfunc_t_save_user_iflags(self, *args)
def save_user_unions(self, *args):
"""
save_user_unions(self)
"""
return _ida_hexrays.cfunc_t_save_user_unions(self, *args)
def get_line_item(self, *args):
"""
get_line_item(self, line, x, is_ctree_line, phead, pitem, ptail) -> bool
"""
return _ida_hexrays.cfunc_t_get_line_item(self, *args)
def get_warnings(self, *args):
"""
get_warnings(self) -> hexwarns_t
"""
return _ida_hexrays.cfunc_t_get_warnings(self, *args)
def get_eamap(self, *args):
"""
get_eamap(self) -> eamap_t
"""
return _ida_hexrays.cfunc_t_get_eamap(self, *args)
def get_boundaries(self, *args):
"""
get_boundaries(self) -> boundaries_t
"""
return _ida_hexrays.cfunc_t_get_boundaries(self, *args)
def get_pseudocode(self, *args):
"""
get_pseudocode(self) -> strvec_t
"""
return _ida_hexrays.cfunc_t_get_pseudocode(self, *args)
def refresh_func_ctext(self, *args):
"""
refresh_func_ctext(self)
"""
return _ida_hexrays.cfunc_t_refresh_func_ctext(self, *args)
def gather_derefs(self, *args):
"""
gather_derefs(self, ci, udm=None) -> bool
"""
return _ida_hexrays.cfunc_t_gather_derefs(self, *args)
def find_item_coords(self, *args):
"""
find_item_coords(self, item, px, py) -> bool
find_item_coords(self, item) -> PyObject *
"""
return _ida_hexrays.cfunc_t_find_item_coords(self, *args)
def __str__(self, *args):
"""
__str__(self) -> qstring
"""
return _ida_hexrays.cfunc_t___str__(self, *args)
cfunc_t_swigregister = _ida_hexrays.cfunc_t_swigregister
cfunc_t_swigregister(cfunc_t)
CIT_COLLAPSED = _ida_hexrays.CIT_COLLAPSED
"""
display element in collapsed form
"""
CFS_BOUNDS = _ida_hexrays.CFS_BOUNDS
"""
'eamap' and 'boundaries' are ready
"""
CFS_TEXT = _ida_hexrays.CFS_TEXT
"""
'sv' is ready (and hdrlines)
"""
CFS_LVARS_HIDDEN = _ida_hexrays.CFS_LVARS_HIDDEN
"""
local variable definitions are collapsed
"""
DECOMP_NO_WAIT = _ida_hexrays.DECOMP_NO_WAIT
"""
do not display waitbox
"""
DECOMP_NO_CACHE = _ida_hexrays.DECOMP_NO_CACHE
"""
do not use decompilation cache
"""
DECOMP_NO_FRAME = _ida_hexrays.DECOMP_NO_FRAME
"""
do not use function frame info (only snippet mode)
"""
DECOMP_WARNINGS = _ida_hexrays.DECOMP_WARNINGS
"""
display warnings in the output window
"""
def decompile(*args):
"""
decompile(mbr, hf, flags=0) -> cfuncptr_t
Decompile a snippet or a function.
@param mbr: what to decompile (C++: const mba_ranges_t &)
@param hf: extended error information (if failed) (C++:
hexrays_failure_t *)
@param flags: bitwise combination of decompile() flags ... bits (C++:
int)
@return: pointer to the decompilation result (a reference counted
pointer). NULL if failed.
"""
return _ida_hexrays.decompile(*args)
def decompile_func(*args):
"""
decompile_func(pfn, hf, flags=0) -> cfuncptr_t
Decompile a function. Multiple decompilations of the same function
return the same object.
@param pfn: pointer to function to decompile (C++: func_t *)
@param hf: extended error information (if failed) (C++:
hexrays_failure_t *)
@param flags: bitwise combination of decompile() flags ... bits (C++:
int)
@return: pointer to the decompilation result (a reference counted
pointer). NULL if failed.
"""
return _ida_hexrays.decompile_func(*args)
def gen_microcode(*args):
"""
gen_microcode(mbr, hf, retlist=None, flags=0, reqmat=MMAT_GLBOPT3) -> mbl_array_t
Generate microcode of an arbitrary code snippet
@param mbr: snippet ranges (C++: const mba_ranges_t &)
@param hf: extended error information (if failed) (C++:
hexrays_failure_t *)
@param retlist: list of registers the snippet returns (C++: const
mlist_t *)
@param flags: bitwise combination of decompile() flags ... bits (C++:
int)
@param reqmat: required microcode maturity (C++: mba_maturity_t)
@return: pointer to the microcode, NULL if failed.
"""
return _ida_hexrays.gen_microcode(*args)
def mark_cfunc_dirty(*args):
"""
mark_cfunc_dirty(ea, close_views=False) -> bool
Flush the cached decompilation results. Erases a cache entry for the
specified function.
@param ea: function to erase from the cache (C++: ea_t)
@param close_views: close pseudocode windows that show the function
(C++: bool)
@return: if a cache entry existed.
"""
return _ida_hexrays.mark_cfunc_dirty(*args)
def clear_cached_cfuncs(*args):
"""
clear_cached_cfuncs()
Flush all cached decompilation results.
"""
return _ida_hexrays.clear_cached_cfuncs(*args)
def has_cached_cfunc(*args):
"""
has_cached_cfunc(ea) -> bool
Do we have a cached decompilation result for 'ea'?
@param ea (C++: ea_t)
"""
return _ida_hexrays.has_cached_cfunc(*args)
def get_ctype_name(*args):
"""
get_ctype_name(op) -> char const *
"""
return _ida_hexrays.get_ctype_name(*args)
def create_field_name(*args):
"""
create_field_name(type, offset=BADADDR) -> qstring
"""
return _ida_hexrays.create_field_name(*args)
hxe_flowchart = _ida_hexrays.hxe_flowchart
hxe_stkpnts = _ida_hexrays.hxe_stkpnts
hxe_prolog = _ida_hexrays.hxe_prolog
hxe_microcode = _ida_hexrays.hxe_microcode
hxe_preoptimized = _ida_hexrays.hxe_preoptimized
hxe_locopt = _ida_hexrays.hxe_locopt
hxe_prealloc = _ida_hexrays.hxe_prealloc
hxe_glbopt = _ida_hexrays.hxe_glbopt
hxe_structural = _ida_hexrays.hxe_structural
hxe_maturity = _ida_hexrays.hxe_maturity
hxe_interr = _ida_hexrays.hxe_interr
hxe_combine = _ida_hexrays.hxe_combine
hxe_print_func = _ida_hexrays.hxe_print_func
hxe_func_printed = _ida_hexrays.hxe_func_printed
hxe_resolve_stkaddrs = _ida_hexrays.hxe_resolve_stkaddrs
hxe_open_pseudocode = _ida_hexrays.hxe_open_pseudocode
hxe_switch_pseudocode = _ida_hexrays.hxe_switch_pseudocode
hxe_refresh_pseudocode = _ida_hexrays.hxe_refresh_pseudocode
hxe_close_pseudocode = _ida_hexrays.hxe_close_pseudocode
hxe_keyboard = _ida_hexrays.hxe_keyboard
hxe_right_click = _ida_hexrays.hxe_right_click
hxe_double_click = _ida_hexrays.hxe_double_click
hxe_curpos = _ida_hexrays.hxe_curpos
hxe_create_hint = _ida_hexrays.hxe_create_hint
hxe_text_ready = _ida_hexrays.hxe_text_ready
hxe_populating_popup = _ida_hexrays.hxe_populating_popup
lxe_lvar_name_changed = _ida_hexrays.lxe_lvar_name_changed
lxe_lvar_type_changed = _ida_hexrays.lxe_lvar_type_changed
lxe_lvar_cmt_changed = _ida_hexrays.lxe_lvar_cmt_changed
lxe_lvar_mapping_changed = _ida_hexrays.lxe_lvar_mapping_changed
hxe_cmt_changed = _ida_hexrays.hxe_cmt_changed
USE_KEYBOARD = _ida_hexrays.USE_KEYBOARD
USE_MOUSE = _ida_hexrays.USE_MOUSE
class ctext_position_t(object):
"""
Proxy of C++ ctext_position_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
lnnum = _swig_property(_ida_hexrays.ctext_position_t_lnnum_get, _ida_hexrays.ctext_position_t_lnnum_set)
x = _swig_property(_ida_hexrays.ctext_position_t_x_get, _ida_hexrays.ctext_position_t_x_set)
y = _swig_property(_ida_hexrays.ctext_position_t_y_get, _ida_hexrays.ctext_position_t_y_set)
def in_ctree(self, *args):
"""
in_ctree(self, hdrlines) -> bool
"""
return _ida_hexrays.ctext_position_t_in_ctree(self, *args)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ctext_position_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ctext_position_t___ne__(self, *args)
def __lt__(self, *args):
"""
__lt__(self, r) -> bool
"""
return _ida_hexrays.ctext_position_t___lt__(self, *args)
def __gt__(self, *args):
"""
__gt__(self, r) -> bool
"""
return _ida_hexrays.ctext_position_t___gt__(self, *args)
def __le__(self, *args):
"""
__le__(self, r) -> bool
"""
return _ida_hexrays.ctext_position_t___le__(self, *args)
def __ge__(self, *args):
"""
__ge__(self, r) -> bool
"""
return _ida_hexrays.ctext_position_t___ge__(self, *args)
def compare(self, *args):
"""
compare(self, r) -> int
"""
return _ida_hexrays.ctext_position_t_compare(self, *args)
def __init__(self, *args):
"""
__init__(self, _lnnum=-1, _x=0, _y=0) -> ctext_position_t
"""
this = _ida_hexrays.new_ctext_position_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ctext_position_t
__del__ = lambda self : None;
ctext_position_t_swigregister = _ida_hexrays.ctext_position_t_swigregister
ctext_position_t_swigregister(ctext_position_t)
HEXRAYS_API_MAGIC = cvar.HEXRAYS_API_MAGIC
class history_item_t(ctext_position_t):
"""
Proxy of C++ history_item_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
ea = _swig_property(_ida_hexrays.history_item_t_ea_get, _ida_hexrays.history_item_t_ea_set)
end = _swig_property(_ida_hexrays.history_item_t_end_get, _ida_hexrays.history_item_t_end_set)
def __init__(self, *args):
"""
__init__(self, _ea=BADADDR, _lnnum=-1, _x=0, _y=0) -> history_item_t
__init__(self, _ea, p) -> history_item_t
"""
this = _ida_hexrays.new_history_item_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_history_item_t
__del__ = lambda self : None;
history_item_t_swigregister = _ida_hexrays.history_item_t_swigregister
history_item_t_swigregister(history_item_t)
class vdui_t(object):
"""
Proxy of C++ vdui_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
flags = _swig_property(_ida_hexrays.vdui_t_flags_get, _ida_hexrays.vdui_t_flags_set)
def visible(self, *args):
"""
visible(self) -> bool
"""
return _ida_hexrays.vdui_t_visible(self, *args)
def valid(self, *args):
"""
valid(self) -> bool
"""
return _ida_hexrays.vdui_t_valid(self, *args)
def locked(self, *args):
"""
locked(self) -> bool
"""
return _ida_hexrays.vdui_t_locked(self, *args)
def set_visible(self, *args):
"""
set_visible(self, v)
"""
return _ida_hexrays.vdui_t_set_visible(self, *args)
def set_valid(self, *args):
"""
set_valid(self, v)
"""
return _ida_hexrays.vdui_t_set_valid(self, *args)
def set_locked(self, *args):
"""
set_locked(self, v) -> bool
"""
return _ida_hexrays.vdui_t_set_locked(self, *args)
view_idx = _swig_property(_ida_hexrays.vdui_t_view_idx_get, _ida_hexrays.vdui_t_view_idx_set)
ct = _swig_property(_ida_hexrays.vdui_t_ct_get, _ida_hexrays.vdui_t_ct_set)
toplevel = _swig_property(_ida_hexrays.vdui_t_toplevel_get, _ida_hexrays.vdui_t_toplevel_set)
mba = _swig_property(_ida_hexrays.vdui_t_mba_get, _ida_hexrays.vdui_t_mba_set)
cfunc = _swig_property(_ida_hexrays.vdui_t_cfunc_get, _ida_hexrays.vdui_t_cfunc_set)
last_code = _swig_property(_ida_hexrays.vdui_t_last_code_get, _ida_hexrays.vdui_t_last_code_set)
cpos = _swig_property(_ida_hexrays.vdui_t_cpos_get, _ida_hexrays.vdui_t_cpos_set)
head = _swig_property(_ida_hexrays.vdui_t_head_get, _ida_hexrays.vdui_t_head_set)
item = _swig_property(_ida_hexrays.vdui_t_item_get, _ida_hexrays.vdui_t_item_set)
tail = _swig_property(_ida_hexrays.vdui_t_tail_get, _ida_hexrays.vdui_t_tail_set)
def refresh_view(self, *args):
"""
refresh_view(self, redo_mba)
"""
return _ida_hexrays.vdui_t_refresh_view(self, *args)
def refresh_ctext(self, *args):
"""
refresh_ctext(self, activate=True)
"""
return _ida_hexrays.vdui_t_refresh_ctext(self, *args)
def switch_to(self, *args):
"""
switch_to(self, f, activate)
"""
return _ida_hexrays.vdui_t_switch_to(self, *args)
def in_ctree(self, *args):
"""
in_ctree(self) -> bool
"""
return _ida_hexrays.vdui_t_in_ctree(self, *args)
def get_number(self, *args):
"""
get_number(self) -> cnumber_t
"""
return _ida_hexrays.vdui_t_get_number(self, *args)
def get_current_label(self, *args):
"""
get_current_label(self) -> int
"""
return _ida_hexrays.vdui_t_get_current_label(self, *args)
def clear(self, *args):
"""
clear(self)
"""
return _ida_hexrays.vdui_t_clear(self, *args)
def refresh_cpos(self, *args):
"""
refresh_cpos(self, idv) -> bool
"""
return _ida_hexrays.vdui_t_refresh_cpos(self, *args)
def get_current_item(self, *args):
"""
get_current_item(self, idv) -> bool
"""
return _ida_hexrays.vdui_t_get_current_item(self, *args)
def ui_rename_lvar(self, *args):
"""
ui_rename_lvar(self, v) -> bool
"""
return _ida_hexrays.vdui_t_ui_rename_lvar(self, *args)
def rename_lvar(self, *args):
"""
rename_lvar(self, v, name, is_user_name) -> bool
"""
return _ida_hexrays.vdui_t_rename_lvar(self, *args)
def ui_set_call_type(self, *args):
"""
ui_set_call_type(self, e) -> bool
"""
return _ida_hexrays.vdui_t_ui_set_call_type(self, *args)
def ui_set_lvar_type(self, *args):
"""
ui_set_lvar_type(self, v) -> bool
"""
return _ida_hexrays.vdui_t_ui_set_lvar_type(self, *args)
def set_lvar_type(self, *args):
"""
set_lvar_type(self, v, type) -> bool
"""
return _ida_hexrays.vdui_t_set_lvar_type(self, *args)
def set_noptr_lvar(self, *args):
"""
set_noptr_lvar(self, v) -> bool
"""
return _ida_hexrays.vdui_t_set_noptr_lvar(self, *args)
def ui_edit_lvar_cmt(self, *args):
"""
ui_edit_lvar_cmt(self, v) -> bool
"""
return _ida_hexrays.vdui_t_ui_edit_lvar_cmt(self, *args)
def set_lvar_cmt(self, *args):
"""
set_lvar_cmt(self, v, cmt) -> bool
"""
return _ida_hexrays.vdui_t_set_lvar_cmt(self, *args)
def ui_map_lvar(self, *args):
"""
ui_map_lvar(self, v) -> bool
"""
return _ida_hexrays.vdui_t_ui_map_lvar(self, *args)
def ui_unmap_lvar(self, *args):
"""
ui_unmap_lvar(self, v) -> bool
"""
return _ida_hexrays.vdui_t_ui_unmap_lvar(self, *args)
def map_lvar(self, *args):
"""
map_lvar(self, frm, to) -> bool
"""
return _ida_hexrays.vdui_t_map_lvar(self, *args)
def set_strmem_type(self, *args):
"""
set_strmem_type(self, sptr, mptr) -> bool
"""
return _ida_hexrays.vdui_t_set_strmem_type(self, *args)
def rename_strmem(self, *args):
"""
rename_strmem(self, sptr, mptr) -> bool
"""
return _ida_hexrays.vdui_t_rename_strmem(self, *args)
def set_global_type(self, *args):
"""
set_global_type(self, ea) -> bool
"""
return _ida_hexrays.vdui_t_set_global_type(self, *args)
def rename_global(self, *args):
"""
rename_global(self, ea) -> bool
"""
return _ida_hexrays.vdui_t_rename_global(self, *args)
def rename_label(self, *args):
"""
rename_label(self, label) -> bool
"""
return _ida_hexrays.vdui_t_rename_label(self, *args)
def jump_enter(self, *args):
"""
jump_enter(self, idv, omflags) -> bool
"""
return _ida_hexrays.vdui_t_jump_enter(self, *args)
def ctree_to_disasm(self, *args):
"""
ctree_to_disasm(self) -> bool
"""
return _ida_hexrays.vdui_t_ctree_to_disasm(self, *args)
def calc_cmt_type(self, *args):
"""
calc_cmt_type(self, lnnum, cmttype) -> cmt_type_t
"""
return _ida_hexrays.vdui_t_calc_cmt_type(self, *args)
def edit_cmt(self, *args):
"""
edit_cmt(self, loc) -> bool
"""
return _ida_hexrays.vdui_t_edit_cmt(self, *args)
def edit_func_cmt(self, *args):
"""
edit_func_cmt(self) -> bool
"""
return _ida_hexrays.vdui_t_edit_func_cmt(self, *args)
def del_orphan_cmts(self, *args):
"""
del_orphan_cmts(self) -> bool
"""
return _ida_hexrays.vdui_t_del_orphan_cmts(self, *args)
def set_num_radix(self, *args):
"""
set_num_radix(self, base) -> bool
"""
return _ida_hexrays.vdui_t_set_num_radix(self, *args)
def set_num_enum(self, *args):
"""
set_num_enum(self) -> bool
"""
return _ida_hexrays.vdui_t_set_num_enum(self, *args)
def set_num_stroff(self, *args):
"""
set_num_stroff(self) -> bool
"""
return _ida_hexrays.vdui_t_set_num_stroff(self, *args)
def invert_sign(self, *args):
"""
invert_sign(self) -> bool
"""
return _ida_hexrays.vdui_t_invert_sign(self, *args)
def invert_bits(self, *args):
"""
invert_bits(self) -> bool
"""
return _ida_hexrays.vdui_t_invert_bits(self, *args)
def collapse_item(self, *args):
"""
collapse_item(self, hide) -> bool
"""
return _ida_hexrays.vdui_t_collapse_item(self, *args)
def collapse_lvars(self, *args):
"""
collapse_lvars(self, hide) -> bool
"""
return _ida_hexrays.vdui_t_collapse_lvars(self, *args)
def split_item(self, *args):
"""
split_item(self, split) -> bool
"""
return _ida_hexrays.vdui_t_split_item(self, *args)
__swig_destroy__ = _ida_hexrays.delete_vdui_t
__del__ = lambda self : None;
vdui_t_swigregister = _ida_hexrays.vdui_t_swigregister
vdui_t_swigregister(vdui_t)
CMT_NONE = cvar.CMT_NONE
CMT_TAIL = cvar.CMT_TAIL
CMT_BLOCK1 = cvar.CMT_BLOCK1
CMT_BLOCK2 = cvar.CMT_BLOCK2
CMT_LVAR = cvar.CMT_LVAR
CMT_FUNC = cvar.CMT_FUNC
CMT_ALL = cvar.CMT_ALL
VDUI_VISIBLE = _ida_hexrays.VDUI_VISIBLE
"""
is visible?
"""
VDUI_VALID = _ida_hexrays.VDUI_VALID
"""
is valid?
"""
VDUI_LOCKED = _ida_hexrays.VDUI_LOCKED
"""
is locked?
"""
class ui_stroff_op_t(object):
"""
Proxy of C++ ui_stroff_op_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
text = _swig_property(_ida_hexrays.ui_stroff_op_t_text_get, _ida_hexrays.ui_stroff_op_t_text_set)
offset = _swig_property(_ida_hexrays.ui_stroff_op_t_offset_get, _ida_hexrays.ui_stroff_op_t_offset_set)
def __eq__(self, *args):
"""
__eq__(self, r) -> bool
"""
return _ida_hexrays.ui_stroff_op_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, r) -> bool
"""
return _ida_hexrays.ui_stroff_op_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> ui_stroff_op_t
"""
this = _ida_hexrays.new_ui_stroff_op_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ui_stroff_op_t
__del__ = lambda self : None;
ui_stroff_op_t_swigregister = _ida_hexrays.ui_stroff_op_t_swigregister
ui_stroff_op_t_swigregister(ui_stroff_op_t)
class ui_stroff_applicator_t(object):
"""
Proxy of C++ ui_stroff_applicator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def apply(self, *args):
"""
apply(self, opnum, path) -> bool
"""
return _ida_hexrays.ui_stroff_applicator_t_apply(self, *args)
def __init__(self, *args):
"""
__init__(self) -> ui_stroff_applicator_t
"""
if self.__class__ == ui_stroff_applicator_t:
_self = None
else:
_self = self
this = _ida_hexrays.new_ui_stroff_applicator_t(_self, *args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_ui_stroff_applicator_t
__del__ = lambda self : None;
def __disown__(self):
self.this.disown()
_ida_hexrays.disown_ui_stroff_applicator_t(self)
return weakref_proxy(self)
ui_stroff_applicator_t_swigregister = _ida_hexrays.ui_stroff_applicator_t_swigregister
ui_stroff_applicator_t_swigregister(ui_stroff_applicator_t)
def select_udt_by_offset(*args):
"""
select_udt_by_offset(udts, ops, applicator) -> int
Select UDT
@param udts: list of UDT tinfo_t for the selection, if NULL or empty
then UDTs from the "Local types" will be used (C++: const
qvector < tinfo_t > *)
@param ops: operands (C++: const ui_stroff_ops_t &)
@param applicator (C++: ui_stroff_applicator_t &)
"""
return _ida_hexrays.select_udt_by_offset(*args)
hx_user_numforms_begin = _ida_hexrays.hx_user_numforms_begin
hx_user_numforms_end = _ida_hexrays.hx_user_numforms_end
hx_user_numforms_next = _ida_hexrays.hx_user_numforms_next
hx_user_numforms_prev = _ida_hexrays.hx_user_numforms_prev
hx_user_numforms_first = _ida_hexrays.hx_user_numforms_first
hx_user_numforms_second = _ida_hexrays.hx_user_numforms_second
hx_user_numforms_find = _ida_hexrays.hx_user_numforms_find
hx_user_numforms_insert = _ida_hexrays.hx_user_numforms_insert
hx_user_numforms_erase = _ida_hexrays.hx_user_numforms_erase
hx_user_numforms_clear = _ida_hexrays.hx_user_numforms_clear
hx_user_numforms_size = _ida_hexrays.hx_user_numforms_size
hx_user_numforms_free = _ida_hexrays.hx_user_numforms_free
hx_user_numforms_new = _ida_hexrays.hx_user_numforms_new
hx_lvar_mapping_begin = _ida_hexrays.hx_lvar_mapping_begin
hx_lvar_mapping_end = _ida_hexrays.hx_lvar_mapping_end
hx_lvar_mapping_next = _ida_hexrays.hx_lvar_mapping_next
hx_lvar_mapping_prev = _ida_hexrays.hx_lvar_mapping_prev
hx_lvar_mapping_first = _ida_hexrays.hx_lvar_mapping_first
hx_lvar_mapping_second = _ida_hexrays.hx_lvar_mapping_second
hx_lvar_mapping_find = _ida_hexrays.hx_lvar_mapping_find
hx_lvar_mapping_insert = _ida_hexrays.hx_lvar_mapping_insert
hx_lvar_mapping_erase = _ida_hexrays.hx_lvar_mapping_erase
hx_lvar_mapping_clear = _ida_hexrays.hx_lvar_mapping_clear
hx_lvar_mapping_size = _ida_hexrays.hx_lvar_mapping_size
hx_lvar_mapping_free = _ida_hexrays.hx_lvar_mapping_free
hx_lvar_mapping_new = _ida_hexrays.hx_lvar_mapping_new
hx_udcall_map_begin = _ida_hexrays.hx_udcall_map_begin
hx_udcall_map_end = _ida_hexrays.hx_udcall_map_end
hx_udcall_map_next = _ida_hexrays.hx_udcall_map_next
hx_udcall_map_prev = _ida_hexrays.hx_udcall_map_prev
hx_udcall_map_first = _ida_hexrays.hx_udcall_map_first
hx_udcall_map_second = _ida_hexrays.hx_udcall_map_second
hx_udcall_map_find = _ida_hexrays.hx_udcall_map_find
hx_udcall_map_insert = _ida_hexrays.hx_udcall_map_insert
hx_udcall_map_erase = _ida_hexrays.hx_udcall_map_erase
hx_udcall_map_clear = _ida_hexrays.hx_udcall_map_clear
hx_udcall_map_size = _ida_hexrays.hx_udcall_map_size
hx_udcall_map_free = _ida_hexrays.hx_udcall_map_free
hx_udcall_map_new = _ida_hexrays.hx_udcall_map_new
hx_user_cmts_begin = _ida_hexrays.hx_user_cmts_begin
hx_user_cmts_end = _ida_hexrays.hx_user_cmts_end
hx_user_cmts_next = _ida_hexrays.hx_user_cmts_next
hx_user_cmts_prev = _ida_hexrays.hx_user_cmts_prev
hx_user_cmts_first = _ida_hexrays.hx_user_cmts_first
hx_user_cmts_second = _ida_hexrays.hx_user_cmts_second
hx_user_cmts_find = _ida_hexrays.hx_user_cmts_find
hx_user_cmts_insert = _ida_hexrays.hx_user_cmts_insert
hx_user_cmts_erase = _ida_hexrays.hx_user_cmts_erase
hx_user_cmts_clear = _ida_hexrays.hx_user_cmts_clear
hx_user_cmts_size = _ida_hexrays.hx_user_cmts_size
hx_user_cmts_free = _ida_hexrays.hx_user_cmts_free
hx_user_cmts_new = _ida_hexrays.hx_user_cmts_new
hx_user_iflags_begin = _ida_hexrays.hx_user_iflags_begin
hx_user_iflags_end = _ida_hexrays.hx_user_iflags_end
hx_user_iflags_next = _ida_hexrays.hx_user_iflags_next
hx_user_iflags_prev = _ida_hexrays.hx_user_iflags_prev
hx_user_iflags_first = _ida_hexrays.hx_user_iflags_first
hx_user_iflags_second = _ida_hexrays.hx_user_iflags_second
hx_user_iflags_find = _ida_hexrays.hx_user_iflags_find
hx_user_iflags_insert = _ida_hexrays.hx_user_iflags_insert
hx_user_iflags_erase = _ida_hexrays.hx_user_iflags_erase
hx_user_iflags_clear = _ida_hexrays.hx_user_iflags_clear
hx_user_iflags_size = _ida_hexrays.hx_user_iflags_size
hx_user_iflags_free = _ida_hexrays.hx_user_iflags_free
hx_user_iflags_new = _ida_hexrays.hx_user_iflags_new
hx_user_unions_begin = _ida_hexrays.hx_user_unions_begin
hx_user_unions_end = _ida_hexrays.hx_user_unions_end
hx_user_unions_next = _ida_hexrays.hx_user_unions_next
hx_user_unions_prev = _ida_hexrays.hx_user_unions_prev
hx_user_unions_first = _ida_hexrays.hx_user_unions_first
hx_user_unions_second = _ida_hexrays.hx_user_unions_second
hx_user_unions_find = _ida_hexrays.hx_user_unions_find
hx_user_unions_insert = _ida_hexrays.hx_user_unions_insert
hx_user_unions_erase = _ida_hexrays.hx_user_unions_erase
hx_user_unions_clear = _ida_hexrays.hx_user_unions_clear
hx_user_unions_size = _ida_hexrays.hx_user_unions_size
hx_user_unions_free = _ida_hexrays.hx_user_unions_free
hx_user_unions_new = _ida_hexrays.hx_user_unions_new
hx_user_labels_begin = _ida_hexrays.hx_user_labels_begin
hx_user_labels_end = _ida_hexrays.hx_user_labels_end
hx_user_labels_next = _ida_hexrays.hx_user_labels_next
hx_user_labels_prev = _ida_hexrays.hx_user_labels_prev
hx_user_labels_first = _ida_hexrays.hx_user_labels_first
hx_user_labels_second = _ida_hexrays.hx_user_labels_second
hx_user_labels_find = _ida_hexrays.hx_user_labels_find
hx_user_labels_insert = _ida_hexrays.hx_user_labels_insert
hx_user_labels_erase = _ida_hexrays.hx_user_labels_erase
hx_user_labels_clear = _ida_hexrays.hx_user_labels_clear
hx_user_labels_size = _ida_hexrays.hx_user_labels_size
hx_user_labels_free = _ida_hexrays.hx_user_labels_free
hx_user_labels_new = _ida_hexrays.hx_user_labels_new
hx_eamap_begin = _ida_hexrays.hx_eamap_begin
hx_eamap_end = _ida_hexrays.hx_eamap_end
hx_eamap_next = _ida_hexrays.hx_eamap_next
hx_eamap_prev = _ida_hexrays.hx_eamap_prev
hx_eamap_first = _ida_hexrays.hx_eamap_first
hx_eamap_second = _ida_hexrays.hx_eamap_second
hx_eamap_find = _ida_hexrays.hx_eamap_find
hx_eamap_insert = _ida_hexrays.hx_eamap_insert
hx_eamap_erase = _ida_hexrays.hx_eamap_erase
hx_eamap_clear = _ida_hexrays.hx_eamap_clear
hx_eamap_size = _ida_hexrays.hx_eamap_size
hx_eamap_free = _ida_hexrays.hx_eamap_free
hx_eamap_new = _ida_hexrays.hx_eamap_new
hx_boundaries_begin = _ida_hexrays.hx_boundaries_begin
hx_boundaries_end = _ida_hexrays.hx_boundaries_end
hx_boundaries_next = _ida_hexrays.hx_boundaries_next
hx_boundaries_prev = _ida_hexrays.hx_boundaries_prev
hx_boundaries_first = _ida_hexrays.hx_boundaries_first
hx_boundaries_second = _ida_hexrays.hx_boundaries_second
hx_boundaries_find = _ida_hexrays.hx_boundaries_find
hx_boundaries_insert = _ida_hexrays.hx_boundaries_insert
hx_boundaries_erase = _ida_hexrays.hx_boundaries_erase
hx_boundaries_clear = _ida_hexrays.hx_boundaries_clear
hx_boundaries_size = _ida_hexrays.hx_boundaries_size
hx_boundaries_free = _ida_hexrays.hx_boundaries_free
hx_boundaries_new = _ida_hexrays.hx_boundaries_new
hx_block_chains_begin = _ida_hexrays.hx_block_chains_begin
hx_block_chains_end = _ida_hexrays.hx_block_chains_end
hx_block_chains_next = _ida_hexrays.hx_block_chains_next
hx_block_chains_prev = _ida_hexrays.hx_block_chains_prev
hx_block_chains_get = _ida_hexrays.hx_block_chains_get
hx_block_chains_find = _ida_hexrays.hx_block_chains_find
hx_block_chains_insert = _ida_hexrays.hx_block_chains_insert
hx_block_chains_erase = _ida_hexrays.hx_block_chains_erase
hx_block_chains_clear = _ida_hexrays.hx_block_chains_clear
hx_block_chains_size = _ida_hexrays.hx_block_chains_size
hx_block_chains_free = _ida_hexrays.hx_block_chains_free
hx_block_chains_new = _ida_hexrays.hx_block_chains_new
hx_valrng_t_clear = _ida_hexrays.hx_valrng_t_clear
hx_valrng_t_copy = _ida_hexrays.hx_valrng_t_copy
hx_valrng_t_assign = _ida_hexrays.hx_valrng_t_assign
hx_valrng_t_compare = _ida_hexrays.hx_valrng_t_compare
hx_valrng_t_set_eq = _ida_hexrays.hx_valrng_t_set_eq
hx_valrng_t_set_cmp = _ida_hexrays.hx_valrng_t_set_cmp
hx_valrng_t_reduce_size = _ida_hexrays.hx_valrng_t_reduce_size
hx_valrng_t_intersect_with = _ida_hexrays.hx_valrng_t_intersect_with
hx_valrng_t_unite_with = _ida_hexrays.hx_valrng_t_unite_with
hx_valrng_t_inverse = _ida_hexrays.hx_valrng_t_inverse
hx_valrng_t_has = _ida_hexrays.hx_valrng_t_has
hx_valrng_t_print = _ida_hexrays.hx_valrng_t_print
hx_valrng_t_dstr = _ida_hexrays.hx_valrng_t_dstr
hx_valrng_t_cvt_to_single_value = _ida_hexrays.hx_valrng_t_cvt_to_single_value
hx_valrng_t_cvt_to_cmp = _ida_hexrays.hx_valrng_t_cvt_to_cmp
hx_get_merror_desc = _ida_hexrays.hx_get_merror_desc
hx_reg2mreg = _ida_hexrays.hx_reg2mreg
hx_mreg2reg = _ida_hexrays.hx_mreg2reg
hx_install_optinsn_handler = _ida_hexrays.hx_install_optinsn_handler
hx_remove_optinsn_handler = _ida_hexrays.hx_remove_optinsn_handler
hx_install_optblock_handler = _ida_hexrays.hx_install_optblock_handler
hx_remove_optblock_handler = _ida_hexrays.hx_remove_optblock_handler
hx_must_mcode_close_block = _ida_hexrays.hx_must_mcode_close_block
hx_is_mcode_propagatable = _ida_hexrays.hx_is_mcode_propagatable
hx_negate_mcode_relation = _ida_hexrays.hx_negate_mcode_relation
hx_swap_mcode_relation = _ida_hexrays.hx_swap_mcode_relation
hx_get_signed_mcode = _ida_hexrays.hx_get_signed_mcode
hx_get_unsigned_mcode = _ida_hexrays.hx_get_unsigned_mcode
hx_mcode_modifies_d = _ida_hexrays.hx_mcode_modifies_d
hx_operand_locator_t_compare = _ida_hexrays.hx_operand_locator_t_compare
hx_vd_printer_t_print = _ida_hexrays.hx_vd_printer_t_print
hx_file_printer_t_print = _ida_hexrays.hx_file_printer_t_print
hx_qstring_printer_t_print = _ida_hexrays.hx_qstring_printer_t_print
hx_dstr = _ida_hexrays.hx_dstr
hx_is_type_correct = _ida_hexrays.hx_is_type_correct
hx_is_small_udt = _ida_hexrays.hx_is_small_udt
hx_is_nonbool_type = _ida_hexrays.hx_is_nonbool_type
hx_is_bool_type = _ida_hexrays.hx_is_bool_type
hx_partial_type_num = _ida_hexrays.hx_partial_type_num
hx_get_float_type = _ida_hexrays.hx_get_float_type
hx_get_int_type_by_width_and_sign = _ida_hexrays.hx_get_int_type_by_width_and_sign
hx_get_unk_type = _ida_hexrays.hx_get_unk_type
hx_dummy_ptrtype = _ida_hexrays.hx_dummy_ptrtype
hx_get_member_type = _ida_hexrays.hx_get_member_type
hx_make_pointer = _ida_hexrays.hx_make_pointer
hx_create_typedef = _ida_hexrays.hx_create_typedef
hx_get_type = _ida_hexrays.hx_get_type
hx_set_type = _ida_hexrays.hx_set_type
hx_vdloc_t_dstr = _ida_hexrays.hx_vdloc_t_dstr
hx_vdloc_t_compare = _ida_hexrays.hx_vdloc_t_compare
hx_vdloc_t_is_aliasable = _ida_hexrays.hx_vdloc_t_is_aliasable
hx_print_vdloc = _ida_hexrays.hx_print_vdloc
hx_arglocs_overlap = _ida_hexrays.hx_arglocs_overlap
hx_lvar_locator_t_compare = _ida_hexrays.hx_lvar_locator_t_compare
hx_lvar_locator_t_dstr = _ida_hexrays.hx_lvar_locator_t_dstr
hx_lvar_t_dstr = _ida_hexrays.hx_lvar_t_dstr
hx_lvar_t_is_promoted_arg = _ida_hexrays.hx_lvar_t_is_promoted_arg
hx_lvar_t_accepts_type = _ida_hexrays.hx_lvar_t_accepts_type
hx_lvar_t_set_lvar_type = _ida_hexrays.hx_lvar_t_set_lvar_type
hx_lvar_t_set_width = _ida_hexrays.hx_lvar_t_set_width
hx_lvar_t_append_list = _ida_hexrays.hx_lvar_t_append_list
hx_lvars_t_find_stkvar = _ida_hexrays.hx_lvars_t_find_stkvar
hx_lvars_t_find = _ida_hexrays.hx_lvars_t_find
hx_lvars_t_find_lvar = _ida_hexrays.hx_lvars_t_find_lvar
hx_restore_user_lvar_settings = _ida_hexrays.hx_restore_user_lvar_settings
hx_save_user_lvar_settings = _ida_hexrays.hx_save_user_lvar_settings
hx_modify_user_lvars = _ida_hexrays.hx_modify_user_lvars
hx_restore_user_defined_calls = _ida_hexrays.hx_restore_user_defined_calls
hx_save_user_defined_calls = _ida_hexrays.hx_save_user_defined_calls
hx_parse_user_call = _ida_hexrays.hx_parse_user_call
hx_convert_to_user_call = _ida_hexrays.hx_convert_to_user_call
hx_install_microcode_filter = _ida_hexrays.hx_install_microcode_filter
hx_udc_filter_t_init = _ida_hexrays.hx_udc_filter_t_init
hx_udc_filter_t_apply = _ida_hexrays.hx_udc_filter_t_apply
hx_bitset_t_bitset_t = _ida_hexrays.hx_bitset_t_bitset_t
hx_bitset_t_copy = _ida_hexrays.hx_bitset_t_copy
hx_bitset_t_add = _ida_hexrays.hx_bitset_t_add
hx_bitset_t_add_ = _ida_hexrays.hx_bitset_t_add_
hx_bitset_t_add__ = _ida_hexrays.hx_bitset_t_add__
hx_bitset_t_sub = _ida_hexrays.hx_bitset_t_sub
hx_bitset_t_sub_ = _ida_hexrays.hx_bitset_t_sub_
hx_bitset_t_sub__ = _ida_hexrays.hx_bitset_t_sub__
hx_bitset_t_cut_at = _ida_hexrays.hx_bitset_t_cut_at
hx_bitset_t_shift_down = _ida_hexrays.hx_bitset_t_shift_down
hx_bitset_t_has = _ida_hexrays.hx_bitset_t_has
hx_bitset_t_has_all = _ida_hexrays.hx_bitset_t_has_all
hx_bitset_t_has_any = _ida_hexrays.hx_bitset_t_has_any
hx_bitset_t_dstr = _ida_hexrays.hx_bitset_t_dstr
hx_bitset_t_empty = _ida_hexrays.hx_bitset_t_empty
hx_bitset_t_count = _ida_hexrays.hx_bitset_t_count
hx_bitset_t_count_ = _ida_hexrays.hx_bitset_t_count_
hx_bitset_t_last = _ida_hexrays.hx_bitset_t_last
hx_bitset_t_fill_with_ones = _ida_hexrays.hx_bitset_t_fill_with_ones
hx_bitset_t_has_common = _ida_hexrays.hx_bitset_t_has_common
hx_bitset_t_intersect = _ida_hexrays.hx_bitset_t_intersect
hx_bitset_t_is_subset_of = _ida_hexrays.hx_bitset_t_is_subset_of
hx_bitset_t_compare = _ida_hexrays.hx_bitset_t_compare
hx_bitset_t_goup = _ida_hexrays.hx_bitset_t_goup
hx_ivl_t_dstr = _ida_hexrays.hx_ivl_t_dstr
hx_ivl_t_compare = _ida_hexrays.hx_ivl_t_compare
hx_ivlset_t_add = _ida_hexrays.hx_ivlset_t_add
hx_ivlset_t_add_ = _ida_hexrays.hx_ivlset_t_add_
hx_ivlset_t_addmasked = _ida_hexrays.hx_ivlset_t_addmasked
hx_ivlset_t_sub = _ida_hexrays.hx_ivlset_t_sub
hx_ivlset_t_sub_ = _ida_hexrays.hx_ivlset_t_sub_
hx_ivlset_t_has_common = _ida_hexrays.hx_ivlset_t_has_common
hx_ivlset_t_print = _ida_hexrays.hx_ivlset_t_print
hx_ivlset_t_dstr = _ida_hexrays.hx_ivlset_t_dstr
hx_ivlset_t_count = _ida_hexrays.hx_ivlset_t_count
hx_ivlset_t_has_common_ = _ida_hexrays.hx_ivlset_t_has_common_
hx_ivlset_t_contains = _ida_hexrays.hx_ivlset_t_contains
hx_ivlset_t_includes = _ida_hexrays.hx_ivlset_t_includes
hx_ivlset_t_intersect = _ida_hexrays.hx_ivlset_t_intersect
hx_ivlset_t_compare = _ida_hexrays.hx_ivlset_t_compare
hx_get_mreg_name = _ida_hexrays.hx_get_mreg_name
hx_rlist_t_print = _ida_hexrays.hx_rlist_t_print
hx_rlist_t_dstr = _ida_hexrays.hx_rlist_t_dstr
hx_mlist_t_addmem = _ida_hexrays.hx_mlist_t_addmem
hx_mlist_t_print = _ida_hexrays.hx_mlist_t_print
hx_mlist_t_dstr = _ida_hexrays.hx_mlist_t_dstr
hx_mlist_t_compare = _ida_hexrays.hx_mlist_t_compare
hx_lvar_ref_t_compare = _ida_hexrays.hx_lvar_ref_t_compare
hx_lvar_ref_t_var = _ida_hexrays.hx_lvar_ref_t_var
hx_stkvar_ref_t_compare = _ida_hexrays.hx_stkvar_ref_t_compare
hx_stkvar_ref_t_get_stkvar = _ida_hexrays.hx_stkvar_ref_t_get_stkvar
hx_fnumber_t_print = _ida_hexrays.hx_fnumber_t_print
hx_fnumber_t_dstr = _ida_hexrays.hx_fnumber_t_dstr
hx_mop_t_copy = _ida_hexrays.hx_mop_t_copy
hx_mop_t_assign = _ida_hexrays.hx_mop_t_assign
hx_mop_t_swap = _ida_hexrays.hx_mop_t_swap
hx_mop_t_erase = _ida_hexrays.hx_mop_t_erase
hx_mop_t_print = _ida_hexrays.hx_mop_t_print
hx_mop_t_dstr = _ida_hexrays.hx_mop_t_dstr
hx_mop_t_create_from_mlist = _ida_hexrays.hx_mop_t_create_from_mlist
hx_mop_t_create_from_ivlset = _ida_hexrays.hx_mop_t_create_from_ivlset
hx_mop_t_create_from_vdloc = _ida_hexrays.hx_mop_t_create_from_vdloc
hx_mop_t_create_from_scattered_vdloc = _ida_hexrays.hx_mop_t_create_from_scattered_vdloc
hx_mop_t_create_from_insn = _ida_hexrays.hx_mop_t_create_from_insn
hx_mop_t_make_number = _ida_hexrays.hx_mop_t_make_number
hx_mop_t_make_fpnum = _ida_hexrays.hx_mop_t_make_fpnum
hx_mop_t_make_reg_pair = _ida_hexrays.hx_mop_t_make_reg_pair
hx_mop_t_make_helper = _ida_hexrays.hx_mop_t_make_helper
hx_mop_t_is_bit_reg = _ida_hexrays.hx_mop_t_is_bit_reg
hx_mop_t_may_use_aliased_memory = _ida_hexrays.hx_mop_t_may_use_aliased_memory
hx_mop_t_is01 = _ida_hexrays.hx_mop_t_is01
hx_mop_t_is_sign_extended_from = _ida_hexrays.hx_mop_t_is_sign_extended_from
hx_mop_t_is_zero_extended_from = _ida_hexrays.hx_mop_t_is_zero_extended_from
hx_mop_t_equal_mops = _ida_hexrays.hx_mop_t_equal_mops
hx_mop_t_lexcompare = _ida_hexrays.hx_mop_t_lexcompare
hx_mop_t_for_all_ops = _ida_hexrays.hx_mop_t_for_all_ops
hx_mop_t_for_all_scattered_submops = _ida_hexrays.hx_mop_t_for_all_scattered_submops
hx_mop_t_is_constant = _ida_hexrays.hx_mop_t_is_constant
hx_mop_t_get_stkoff = _ida_hexrays.hx_mop_t_get_stkoff
hx_mop_t_make_low_half = _ida_hexrays.hx_mop_t_make_low_half
hx_mop_t_make_high_half = _ida_hexrays.hx_mop_t_make_high_half
hx_mop_t_make_first_half = _ida_hexrays.hx_mop_t_make_first_half
hx_mop_t_make_second_half = _ida_hexrays.hx_mop_t_make_second_half
hx_mop_t_shift_mop = _ida_hexrays.hx_mop_t_shift_mop
hx_mop_t_change_size = _ida_hexrays.hx_mop_t_change_size
hx_mop_t_preserve_side_effects = _ida_hexrays.hx_mop_t_preserve_side_effects
hx_mop_t_apply_ld_mcode = _ida_hexrays.hx_mop_t_apply_ld_mcode
hx_mcallarg_t_print = _ida_hexrays.hx_mcallarg_t_print
hx_mcallarg_t_dstr = _ida_hexrays.hx_mcallarg_t_dstr
hx_mcallarg_t_set_regarg = _ida_hexrays.hx_mcallarg_t_set_regarg
hx_mcallinfo_t_lexcompare = _ida_hexrays.hx_mcallinfo_t_lexcompare
hx_mcallinfo_t_set_type = _ida_hexrays.hx_mcallinfo_t_set_type
hx_mcallinfo_t_get_type = _ida_hexrays.hx_mcallinfo_t_get_type
hx_mcallinfo_t_print = _ida_hexrays.hx_mcallinfo_t_print
hx_mcallinfo_t_dstr = _ida_hexrays.hx_mcallinfo_t_dstr
hx_mcases_t_compare = _ida_hexrays.hx_mcases_t_compare
hx_mcases_t_print = _ida_hexrays.hx_mcases_t_print
hx_mcases_t_dstr = _ida_hexrays.hx_mcases_t_dstr
hx_vivl_t_extend_to_cover = _ida_hexrays.hx_vivl_t_extend_to_cover
hx_vivl_t_intersect = _ida_hexrays.hx_vivl_t_intersect
hx_vivl_t_print = _ida_hexrays.hx_vivl_t_print
hx_vivl_t_dstr = _ida_hexrays.hx_vivl_t_dstr
hx_chain_t_print = _ida_hexrays.hx_chain_t_print
hx_chain_t_dstr = _ida_hexrays.hx_chain_t_dstr
hx_chain_t_append_list = _ida_hexrays.hx_chain_t_append_list
hx_block_chains_t_get_chain = _ida_hexrays.hx_block_chains_t_get_chain
hx_block_chains_t_print = _ida_hexrays.hx_block_chains_t_print
hx_block_chains_t_dstr = _ida_hexrays.hx_block_chains_t_dstr
hx_graph_chains_t_for_all_chains = _ida_hexrays.hx_graph_chains_t_for_all_chains
hx_graph_chains_t_release = _ida_hexrays.hx_graph_chains_t_release
hx_minsn_t_init = _ida_hexrays.hx_minsn_t_init
hx_minsn_t_copy = _ida_hexrays.hx_minsn_t_copy
hx_minsn_t_swap = _ida_hexrays.hx_minsn_t_swap
hx_minsn_t_print = _ida_hexrays.hx_minsn_t_print
hx_minsn_t_dstr = _ida_hexrays.hx_minsn_t_dstr
hx_minsn_t_setaddr = _ida_hexrays.hx_minsn_t_setaddr
hx_minsn_t_optimize_subtree = _ida_hexrays.hx_minsn_t_optimize_subtree
hx_minsn_t_for_all_ops = _ida_hexrays.hx_minsn_t_for_all_ops
hx_minsn_t_for_all_insns = _ida_hexrays.hx_minsn_t_for_all_insns
hx_minsn_t__make_nop = _ida_hexrays.hx_minsn_t__make_nop
hx_minsn_t_equal_insns = _ida_hexrays.hx_minsn_t_equal_insns
hx_minsn_t_lexcompare = _ida_hexrays.hx_minsn_t_lexcompare
hx_minsn_t_is_noret_call = _ida_hexrays.hx_minsn_t_is_noret_call
hx_minsn_t_is_helper = _ida_hexrays.hx_minsn_t_is_helper
hx_minsn_t_find_call = _ida_hexrays.hx_minsn_t_find_call
hx_minsn_t_has_side_effects = _ida_hexrays.hx_minsn_t_has_side_effects
hx_minsn_t_find_opcode = _ida_hexrays.hx_minsn_t_find_opcode
hx_minsn_t_find_ins_op = _ida_hexrays.hx_minsn_t_find_ins_op
hx_minsn_t_find_num_op = _ida_hexrays.hx_minsn_t_find_num_op
hx_minsn_t_modifes_d = _ida_hexrays.hx_minsn_t_modifes_d
hx_minsn_t_is_between = _ida_hexrays.hx_minsn_t_is_between
hx_minsn_t_may_use_aliased_memory = _ida_hexrays.hx_minsn_t_may_use_aliased_memory
hx_getf_reginsn = _ida_hexrays.hx_getf_reginsn
hx_getb_reginsn = _ida_hexrays.hx_getb_reginsn
hx_mblock_t_init = _ida_hexrays.hx_mblock_t_init
hx_mblock_t_print = _ida_hexrays.hx_mblock_t_print
hx_mblock_t_dump = _ida_hexrays.hx_mblock_t_dump
hx_mblock_t_vdump_block = _ida_hexrays.hx_mblock_t_vdump_block
hx_mblock_t_insert_into_block = _ida_hexrays.hx_mblock_t_insert_into_block
hx_mblock_t_remove_from_block = _ida_hexrays.hx_mblock_t_remove_from_block
hx_mblock_t_for_all_insns = _ida_hexrays.hx_mblock_t_for_all_insns
hx_mblock_t_for_all_ops = _ida_hexrays.hx_mblock_t_for_all_ops
hx_mblock_t_for_all_uses = _ida_hexrays.hx_mblock_t_for_all_uses
hx_mblock_t_optimize_insn = _ida_hexrays.hx_mblock_t_optimize_insn
hx_mblock_t_optimize_block = _ida_hexrays.hx_mblock_t_optimize_block
hx_mblock_t_build_lists = _ida_hexrays.hx_mblock_t_build_lists
hx_mblock_t_append_use_list = _ida_hexrays.hx_mblock_t_append_use_list
hx_mblock_t_append_def_list = _ida_hexrays.hx_mblock_t_append_def_list
hx_mblock_t_build_use_list = _ida_hexrays.hx_mblock_t_build_use_list
hx_mblock_t_build_def_list = _ida_hexrays.hx_mblock_t_build_def_list
hx_mblock_t_find_first_use = _ida_hexrays.hx_mblock_t_find_first_use
hx_mblock_t_find_redefinition = _ida_hexrays.hx_mblock_t_find_redefinition
hx_mblock_t_is_rhs_redefined = _ida_hexrays.hx_mblock_t_is_rhs_redefined
hx_mblock_t_find_access = _ida_hexrays.hx_mblock_t_find_access
hx_mblock_t_get_valranges = _ida_hexrays.hx_mblock_t_get_valranges
hx_mbl_array_t_idaloc2vd = _ida_hexrays.hx_mbl_array_t_idaloc2vd
hx_mbl_array_t_vd2idaloc = _ida_hexrays.hx_mbl_array_t_vd2idaloc
hx_mbl_array_t_term = _ida_hexrays.hx_mbl_array_t_term
hx_mbl_array_t_optimize_local = _ida_hexrays.hx_mbl_array_t_optimize_local
hx_mbl_array_t_build_graph = _ida_hexrays.hx_mbl_array_t_build_graph
hx_mbl_array_t_get_graph = _ida_hexrays.hx_mbl_array_t_get_graph
hx_mbl_array_t_analyze_calls = _ida_hexrays.hx_mbl_array_t_analyze_calls
hx_mbl_array_t_optimize_global = _ida_hexrays.hx_mbl_array_t_optimize_global
hx_mbl_array_t_alloc_lvars = _ida_hexrays.hx_mbl_array_t_alloc_lvars
hx_mbl_array_t_dump = _ida_hexrays.hx_mbl_array_t_dump
hx_mbl_array_t_vdump_mba = _ida_hexrays.hx_mbl_array_t_vdump_mba
hx_mbl_array_t_print = _ida_hexrays.hx_mbl_array_t_print
hx_mbl_array_t_verify = _ida_hexrays.hx_mbl_array_t_verify
hx_mbl_array_t_mark_chains_dirty = _ida_hexrays.hx_mbl_array_t_mark_chains_dirty
hx_mbl_array_t_insert_block = _ida_hexrays.hx_mbl_array_t_insert_block
hx_mbl_array_t_remove_block = _ida_hexrays.hx_mbl_array_t_remove_block
hx_mbl_array_t_remove_empty_blocks = _ida_hexrays.hx_mbl_array_t_remove_empty_blocks
hx_mbl_array_t_combine_blocks = _ida_hexrays.hx_mbl_array_t_combine_blocks
hx_mbl_array_t_for_all_ops = _ida_hexrays.hx_mbl_array_t_for_all_ops
hx_mbl_array_t_for_all_insns = _ida_hexrays.hx_mbl_array_t_for_all_insns
hx_mbl_array_t_for_all_topinsns = _ida_hexrays.hx_mbl_array_t_for_all_topinsns
hx_mbl_array_t_find_mop = _ida_hexrays.hx_mbl_array_t_find_mop
hx_mbl_array_t_arg = _ida_hexrays.hx_mbl_array_t_arg
hx_mbl_array_t_serialize = _ida_hexrays.hx_mbl_array_t_serialize
hx_mbl_array_t_deserialize = _ida_hexrays.hx_mbl_array_t_deserialize
hx_mbl_graph_t_is_accessed_globally = _ida_hexrays.hx_mbl_graph_t_is_accessed_globally
hx_mbl_graph_t_get_ud = _ida_hexrays.hx_mbl_graph_t_get_ud
hx_mbl_graph_t_get_du = _ida_hexrays.hx_mbl_graph_t_get_du
hx_codegen_t_emit = _ida_hexrays.hx_codegen_t_emit
hx_codegen_t_emit_ = _ida_hexrays.hx_codegen_t_emit_
hx_is_kreg = _ida_hexrays.hx_is_kreg
hx_get_temp_regs = _ida_hexrays.hx_get_temp_regs
hx_get_hexrays_version = _ida_hexrays.hx_get_hexrays_version
hx_open_pseudocode = _ida_hexrays.hx_open_pseudocode
hx_close_pseudocode = _ida_hexrays.hx_close_pseudocode
hx_get_widget_vdui = _ida_hexrays.hx_get_widget_vdui
hx_decompile_many = _ida_hexrays.hx_decompile_many
hx_hexrays_failure_t_desc = _ida_hexrays.hx_hexrays_failure_t_desc
hx_send_database = _ida_hexrays.hx_send_database
hx_gco_info_t_append_to_list = _ida_hexrays.hx_gco_info_t_append_to_list
hx_get_current_operand = _ida_hexrays.hx_get_current_operand
hx_remitem = _ida_hexrays.hx_remitem
hx_negated_relation = _ida_hexrays.hx_negated_relation
hx_swapped_relation = _ida_hexrays.hx_swapped_relation
hx_get_op_signness = _ida_hexrays.hx_get_op_signness
hx_asgop = _ida_hexrays.hx_asgop
hx_asgop_revert = _ida_hexrays.hx_asgop_revert
hx_cnumber_t_print = _ida_hexrays.hx_cnumber_t_print
hx_cnumber_t_value = _ida_hexrays.hx_cnumber_t_value
hx_cnumber_t_assign = _ida_hexrays.hx_cnumber_t_assign
hx_cnumber_t_compare = _ida_hexrays.hx_cnumber_t_compare
hx_var_ref_t_compare = _ida_hexrays.hx_var_ref_t_compare
hx_ctree_visitor_t_apply_to = _ida_hexrays.hx_ctree_visitor_t_apply_to
hx_ctree_visitor_t_apply_to_exprs = _ida_hexrays.hx_ctree_visitor_t_apply_to_exprs
hx_ctree_parentee_t_recalc_parent_types = _ida_hexrays.hx_ctree_parentee_t_recalc_parent_types
hx_cfunc_parentee_t_calc_rvalue_type = _ida_hexrays.hx_cfunc_parentee_t_calc_rvalue_type
hx_citem_locator_t_compare = _ida_hexrays.hx_citem_locator_t_compare
hx_citem_t_contains_expr = _ida_hexrays.hx_citem_t_contains_expr
hx_citem_t_contains_label = _ida_hexrays.hx_citem_t_contains_label
hx_citem_t_find_parent_of = _ida_hexrays.hx_citem_t_find_parent_of
hx_citem_t_find_closest_addr = _ida_hexrays.hx_citem_t_find_closest_addr
hx_cexpr_t_assign = _ida_hexrays.hx_cexpr_t_assign
hx_cexpr_t_compare = _ida_hexrays.hx_cexpr_t_compare
hx_cexpr_t_replace_by = _ida_hexrays.hx_cexpr_t_replace_by
hx_cexpr_t_cleanup = _ida_hexrays.hx_cexpr_t_cleanup
hx_cexpr_t_put_number = _ida_hexrays.hx_cexpr_t_put_number
hx_cexpr_t_print1 = _ida_hexrays.hx_cexpr_t_print1
hx_cexpr_t_calc_type = _ida_hexrays.hx_cexpr_t_calc_type
hx_cexpr_t_equal_effect = _ida_hexrays.hx_cexpr_t_equal_effect
hx_cexpr_t_is_child_of = _ida_hexrays.hx_cexpr_t_is_child_of
hx_cexpr_t_contains_operator = _ida_hexrays.hx_cexpr_t_contains_operator
hx_cexpr_t_get_high_nbit_bound = _ida_hexrays.hx_cexpr_t_get_high_nbit_bound
hx_cexpr_t_get_low_nbit_bound = _ida_hexrays.hx_cexpr_t_get_low_nbit_bound
hx_cexpr_t_requires_lvalue = _ida_hexrays.hx_cexpr_t_requires_lvalue
hx_cexpr_t_has_side_effects = _ida_hexrays.hx_cexpr_t_has_side_effects
hx_cif_t_assign = _ida_hexrays.hx_cif_t_assign
hx_cif_t_compare = _ida_hexrays.hx_cif_t_compare
hx_cloop_t_assign = _ida_hexrays.hx_cloop_t_assign
hx_cfor_t_compare = _ida_hexrays.hx_cfor_t_compare
hx_cwhile_t_compare = _ida_hexrays.hx_cwhile_t_compare
hx_cdo_t_compare = _ida_hexrays.hx_cdo_t_compare
hx_creturn_t_compare = _ida_hexrays.hx_creturn_t_compare
hx_cgoto_t_compare = _ida_hexrays.hx_cgoto_t_compare
hx_casm_t_compare = _ida_hexrays.hx_casm_t_compare
hx_cinsn_t_assign = _ida_hexrays.hx_cinsn_t_assign
hx_cinsn_t_compare = _ida_hexrays.hx_cinsn_t_compare
hx_cinsn_t_replace_by = _ida_hexrays.hx_cinsn_t_replace_by
hx_cinsn_t_cleanup = _ida_hexrays.hx_cinsn_t_cleanup
hx_cinsn_t_new_insn = _ida_hexrays.hx_cinsn_t_new_insn
hx_cinsn_t_create_if = _ida_hexrays.hx_cinsn_t_create_if
hx_cinsn_t_print = _ida_hexrays.hx_cinsn_t_print
hx_cinsn_t_print1 = _ida_hexrays.hx_cinsn_t_print1
hx_cinsn_t_is_ordinary_flow = _ida_hexrays.hx_cinsn_t_is_ordinary_flow
hx_cinsn_t_contains_insn = _ida_hexrays.hx_cinsn_t_contains_insn
hx_cinsn_t_collect_free_breaks = _ida_hexrays.hx_cinsn_t_collect_free_breaks
hx_cinsn_t_collect_free_continues = _ida_hexrays.hx_cinsn_t_collect_free_continues
hx_cblock_t_compare = _ida_hexrays.hx_cblock_t_compare
hx_carglist_t_compare = _ida_hexrays.hx_carglist_t_compare
hx_ccase_t_compare = _ida_hexrays.hx_ccase_t_compare
hx_ccases_t_compare = _ida_hexrays.hx_ccases_t_compare
hx_cswitch_t_compare = _ida_hexrays.hx_cswitch_t_compare
hx_ctree_item_t_get_memptr = _ida_hexrays.hx_ctree_item_t_get_memptr
hx_ctree_item_t_get_lvar = _ida_hexrays.hx_ctree_item_t_get_lvar
hx_ctree_item_t_get_ea = _ida_hexrays.hx_ctree_item_t_get_ea
hx_ctree_item_t_get_label_num = _ida_hexrays.hx_ctree_item_t_get_label_num
hx_lnot = _ida_hexrays.hx_lnot
hx_new_block = _ida_hexrays.hx_new_block
hx_vcreate_helper = _ida_hexrays.hx_vcreate_helper
hx_vcall_helper = _ida_hexrays.hx_vcall_helper
hx_make_num = _ida_hexrays.hx_make_num
hx_make_ref = _ida_hexrays.hx_make_ref
hx_dereference = _ida_hexrays.hx_dereference
hx_save_user_labels = _ida_hexrays.hx_save_user_labels
hx_save_user_cmts = _ida_hexrays.hx_save_user_cmts
hx_save_user_numforms = _ida_hexrays.hx_save_user_numforms
hx_save_user_iflags = _ida_hexrays.hx_save_user_iflags
hx_save_user_unions = _ida_hexrays.hx_save_user_unions
hx_restore_user_labels = _ida_hexrays.hx_restore_user_labels
hx_restore_user_cmts = _ida_hexrays.hx_restore_user_cmts
hx_restore_user_numforms = _ida_hexrays.hx_restore_user_numforms
hx_restore_user_iflags = _ida_hexrays.hx_restore_user_iflags
hx_restore_user_unions = _ida_hexrays.hx_restore_user_unions
hx_cfunc_t_build_c_tree = _ida_hexrays.hx_cfunc_t_build_c_tree
hx_cfunc_t_verify = _ida_hexrays.hx_cfunc_t_verify
hx_cfunc_t_print_dcl = _ida_hexrays.hx_cfunc_t_print_dcl
hx_cfunc_t_print_func = _ida_hexrays.hx_cfunc_t_print_func
hx_cfunc_t_get_func_type = _ida_hexrays.hx_cfunc_t_get_func_type
hx_cfunc_t_get_lvars = _ida_hexrays.hx_cfunc_t_get_lvars
hx_cfunc_t_get_stkoff_delta = _ida_hexrays.hx_cfunc_t_get_stkoff_delta
hx_cfunc_t_find_label = _ida_hexrays.hx_cfunc_t_find_label
hx_cfunc_t_remove_unused_labels = _ida_hexrays.hx_cfunc_t_remove_unused_labels
hx_cfunc_t_get_user_cmt = _ida_hexrays.hx_cfunc_t_get_user_cmt
hx_cfunc_t_set_user_cmt = _ida_hexrays.hx_cfunc_t_set_user_cmt
hx_cfunc_t_get_user_iflags = _ida_hexrays.hx_cfunc_t_get_user_iflags
hx_cfunc_t_set_user_iflags = _ida_hexrays.hx_cfunc_t_set_user_iflags
hx_cfunc_t_has_orphan_cmts = _ida_hexrays.hx_cfunc_t_has_orphan_cmts
hx_cfunc_t_del_orphan_cmts = _ida_hexrays.hx_cfunc_t_del_orphan_cmts
hx_cfunc_t_get_user_union_selection = _ida_hexrays.hx_cfunc_t_get_user_union_selection
hx_cfunc_t_set_user_union_selection = _ida_hexrays.hx_cfunc_t_set_user_union_selection
hx_cfunc_t_get_line_item = _ida_hexrays.hx_cfunc_t_get_line_item
hx_cfunc_t_get_warnings = _ida_hexrays.hx_cfunc_t_get_warnings
hx_cfunc_t_get_eamap = _ida_hexrays.hx_cfunc_t_get_eamap
hx_cfunc_t_get_boundaries = _ida_hexrays.hx_cfunc_t_get_boundaries
hx_cfunc_t_get_pseudocode = _ida_hexrays.hx_cfunc_t_get_pseudocode
hx_cfunc_t_gather_derefs = _ida_hexrays.hx_cfunc_t_gather_derefs
hx_cfunc_t_find_item_coords = _ida_hexrays.hx_cfunc_t_find_item_coords
hx_cfunc_t_cleanup = _ida_hexrays.hx_cfunc_t_cleanup
hx_decompile = _ida_hexrays.hx_decompile
hx_gen_microcode = _ida_hexrays.hx_gen_microcode
hx_mark_cfunc_dirty = _ida_hexrays.hx_mark_cfunc_dirty
hx_clear_cached_cfuncs = _ida_hexrays.hx_clear_cached_cfuncs
hx_has_cached_cfunc = _ida_hexrays.hx_has_cached_cfunc
hx_get_ctype_name = _ida_hexrays.hx_get_ctype_name
hx_create_field_name = _ida_hexrays.hx_create_field_name
hx_install_hexrays_callback = _ida_hexrays.hx_install_hexrays_callback
hx_remove_hexrays_callback = _ida_hexrays.hx_remove_hexrays_callback
hx_vdui_t_set_locked = _ida_hexrays.hx_vdui_t_set_locked
hx_vdui_t_refresh_view = _ida_hexrays.hx_vdui_t_refresh_view
hx_vdui_t_refresh_ctext = _ida_hexrays.hx_vdui_t_refresh_ctext
hx_vdui_t_switch_to = _ida_hexrays.hx_vdui_t_switch_to
hx_vdui_t_get_number = _ida_hexrays.hx_vdui_t_get_number
hx_vdui_t_get_current_label = _ida_hexrays.hx_vdui_t_get_current_label
hx_vdui_t_clear = _ida_hexrays.hx_vdui_t_clear
hx_vdui_t_refresh_cpos = _ida_hexrays.hx_vdui_t_refresh_cpos
hx_vdui_t_get_current_item = _ida_hexrays.hx_vdui_t_get_current_item
hx_vdui_t_ui_rename_lvar = _ida_hexrays.hx_vdui_t_ui_rename_lvar
hx_vdui_t_rename_lvar = _ida_hexrays.hx_vdui_t_rename_lvar
hx_vdui_t_ui_set_call_type = _ida_hexrays.hx_vdui_t_ui_set_call_type
hx_vdui_t_ui_set_lvar_type = _ida_hexrays.hx_vdui_t_ui_set_lvar_type
hx_vdui_t_set_lvar_type = _ida_hexrays.hx_vdui_t_set_lvar_type
hx_vdui_t_ui_edit_lvar_cmt = _ida_hexrays.hx_vdui_t_ui_edit_lvar_cmt
hx_vdui_t_set_lvar_cmt = _ida_hexrays.hx_vdui_t_set_lvar_cmt
hx_vdui_t_ui_map_lvar = _ida_hexrays.hx_vdui_t_ui_map_lvar
hx_vdui_t_ui_unmap_lvar = _ida_hexrays.hx_vdui_t_ui_unmap_lvar
hx_vdui_t_map_lvar = _ida_hexrays.hx_vdui_t_map_lvar
hx_vdui_t_set_strmem_type = _ida_hexrays.hx_vdui_t_set_strmem_type
hx_vdui_t_rename_strmem = _ida_hexrays.hx_vdui_t_rename_strmem
hx_vdui_t_set_global_type = _ida_hexrays.hx_vdui_t_set_global_type
hx_vdui_t_rename_global = _ida_hexrays.hx_vdui_t_rename_global
hx_vdui_t_rename_label = _ida_hexrays.hx_vdui_t_rename_label
hx_vdui_t_jump_enter = _ida_hexrays.hx_vdui_t_jump_enter
hx_vdui_t_ctree_to_disasm = _ida_hexrays.hx_vdui_t_ctree_to_disasm
hx_vdui_t_calc_cmt_type = _ida_hexrays.hx_vdui_t_calc_cmt_type
hx_vdui_t_edit_cmt = _ida_hexrays.hx_vdui_t_edit_cmt
hx_vdui_t_edit_func_cmt = _ida_hexrays.hx_vdui_t_edit_func_cmt
hx_vdui_t_del_orphan_cmts = _ida_hexrays.hx_vdui_t_del_orphan_cmts
hx_vdui_t_set_num_radix = _ida_hexrays.hx_vdui_t_set_num_radix
hx_vdui_t_set_num_enum = _ida_hexrays.hx_vdui_t_set_num_enum
hx_vdui_t_set_num_stroff = _ida_hexrays.hx_vdui_t_set_num_stroff
hx_vdui_t_invert_sign = _ida_hexrays.hx_vdui_t_invert_sign
hx_vdui_t_invert_bits = _ida_hexrays.hx_vdui_t_invert_bits
hx_vdui_t_collapse_item = _ida_hexrays.hx_vdui_t_collapse_item
hx_vdui_t_collapse_lvars = _ida_hexrays.hx_vdui_t_collapse_lvars
hx_vdui_t_split_item = _ida_hexrays.hx_vdui_t_split_item
hx_hexrays_alloc = _ida_hexrays.hx_hexrays_alloc
hx_hexrays_free = _ida_hexrays.hx_hexrays_free
hx_vdui_t_set_noptr_lvar = _ida_hexrays.hx_vdui_t_set_noptr_lvar
hx_select_udt_by_offset = _ida_hexrays.hx_select_udt_by_offset
hx_mblock_t_get_valranges_ = _ida_hexrays.hx_mblock_t_get_valranges_
hx_cfunc_t_refresh_func_ctext = _ida_hexrays.hx_cfunc_t_refresh_func_ctext
hx_checkout_hexrays_license = _ida_hexrays.hx_checkout_hexrays_license
hx_mbl_array_t_copy_block = _ida_hexrays.hx_mbl_array_t_copy_block
hx_mblock_t_optimize_useless_jump = _ida_hexrays.hx_mblock_t_optimize_useless_jump
hx_mblock_t_get_reginsn_qty = _ida_hexrays.hx_mblock_t_get_reginsn_qty
class user_numforms_iterator_t(object):
"""
Proxy of C++ user_numforms_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_numforms_iterator_t_x_get, _ida_hexrays.user_numforms_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_numforms_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_numforms_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_numforms_iterator_t
"""
this = _ida_hexrays.new_user_numforms_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_numforms_iterator_t
__del__ = lambda self : None;
user_numforms_iterator_t_swigregister = _ida_hexrays.user_numforms_iterator_t_swigregister
user_numforms_iterator_t_swigregister(user_numforms_iterator_t)
def user_numforms_begin(*args):
"""
user_numforms_begin(map) -> user_numforms_iterator_t
Get iterator pointing to the beginning of user_numforms_t.
@param map (C++: const user_numforms_t *)
"""
return _ida_hexrays.user_numforms_begin(*args)
def user_numforms_end(*args):
"""
user_numforms_end(map) -> user_numforms_iterator_t
Get iterator pointing to the end of user_numforms_t.
@param map (C++: const user_numforms_t *)
"""
return _ida_hexrays.user_numforms_end(*args)
def user_numforms_next(*args):
"""
user_numforms_next(p) -> user_numforms_iterator_t
Move to the next element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_next(*args)
def user_numforms_prev(*args):
"""
user_numforms_prev(p) -> user_numforms_iterator_t
Move to the previous element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_prev(*args)
def user_numforms_first(*args):
"""
user_numforms_first(p) -> operand_locator_t
Get reference to the current map key.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_first(*args)
def user_numforms_second(*args):
"""
user_numforms_second(p) -> number_format_t
Get reference to the current map value.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_second(*args)
def user_numforms_find(*args):
"""
user_numforms_find(map, key) -> user_numforms_iterator_t
Find the specified key in user_numforms_t.
@param map (C++: const user_numforms_t *)
@param key (C++: const operand_locator_t &)
"""
return _ida_hexrays.user_numforms_find(*args)
def user_numforms_insert(*args):
"""
user_numforms_insert(map, key, val) -> user_numforms_iterator_t
Insert new ( 'operand_locator_t' , 'number_format_t' ) pair into
user_numforms_t.
@param map (C++: user_numforms_t *)
@param key (C++: const operand_locator_t &)
@param val (C++: const number_format_t &)
"""
return _ida_hexrays.user_numforms_insert(*args)
def user_numforms_erase(*args):
"""
user_numforms_erase(map, p)
Erase current element from user_numforms_t.
@param map (C++: user_numforms_t *)
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_erase(*args)
def user_numforms_clear(*args):
"""
user_numforms_clear(map)
Clear user_numforms_t.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_clear(*args)
def user_numforms_size(*args):
"""
user_numforms_size(map) -> size_t
Get size of user_numforms_t.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_size(*args)
def user_numforms_free(*args):
"""
user_numforms_free(map)
Delete user_numforms_t instance.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_free(*args)
def user_numforms_new(*args):
"""
user_numforms_new() -> user_numforms_t
Create a new user_numforms_t instance.
"""
return _ida_hexrays.user_numforms_new(*args)
class lvar_mapping_iterator_t(object):
"""
Proxy of C++ lvar_mapping_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.lvar_mapping_iterator_t_x_get, _ida_hexrays.lvar_mapping_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.lvar_mapping_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.lvar_mapping_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> lvar_mapping_iterator_t
"""
this = _ida_hexrays.new_lvar_mapping_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_lvar_mapping_iterator_t
__del__ = lambda self : None;
lvar_mapping_iterator_t_swigregister = _ida_hexrays.lvar_mapping_iterator_t_swigregister
lvar_mapping_iterator_t_swigregister(lvar_mapping_iterator_t)
def lvar_mapping_begin(*args):
"""
lvar_mapping_begin(map) -> lvar_mapping_iterator_t
Get iterator pointing to the beginning of lvar_mapping_t.
@param map (C++: const lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_begin(*args)
def lvar_mapping_end(*args):
"""
lvar_mapping_end(map) -> lvar_mapping_iterator_t
Get iterator pointing to the end of lvar_mapping_t.
@param map (C++: const lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_end(*args)
def lvar_mapping_next(*args):
"""
lvar_mapping_next(p) -> lvar_mapping_iterator_t
Move to the next element.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_next(*args)
def lvar_mapping_prev(*args):
"""
lvar_mapping_prev(p) -> lvar_mapping_iterator_t
Move to the previous element.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_prev(*args)
def lvar_mapping_first(*args):
"""
lvar_mapping_first(p) -> lvar_locator_t
Get reference to the current map key.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_first(*args)
def lvar_mapping_second(*args):
"""
lvar_mapping_second(p) -> lvar_locator_t
Get reference to the current map value.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_second(*args)
def lvar_mapping_find(*args):
"""
lvar_mapping_find(map, key) -> lvar_mapping_iterator_t
Find the specified key in lvar_mapping_t.
@param map (C++: const lvar_mapping_t *)
@param key (C++: const lvar_locator_t &)
"""
return _ida_hexrays.lvar_mapping_find(*args)
def lvar_mapping_insert(*args):
"""
lvar_mapping_insert(map, key, val) -> lvar_mapping_iterator_t
Insert new ( 'lvar_locator_t' , 'lvar_locator_t' ) pair into
lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
@param key (C++: const lvar_locator_t &)
@param val (C++: const lvar_locator_t &)
"""
return _ida_hexrays.lvar_mapping_insert(*args)
def lvar_mapping_erase(*args):
"""
lvar_mapping_erase(map, p)
Erase current element from lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_erase(*args)
def lvar_mapping_clear(*args):
"""
lvar_mapping_clear(map)
Clear lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_clear(*args)
def lvar_mapping_size(*args):
"""
lvar_mapping_size(map) -> size_t
Get size of lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_size(*args)
def lvar_mapping_free(*args):
"""
lvar_mapping_free(map)
Delete lvar_mapping_t instance.
@param map (C++: lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_free(*args)
def lvar_mapping_new(*args):
"""
lvar_mapping_new() -> lvar_mapping_t
Create a new lvar_mapping_t instance.
"""
return _ida_hexrays.lvar_mapping_new(*args)
class udcall_map_iterator_t(object):
"""
Proxy of C++ udcall_map_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.udcall_map_iterator_t_x_get, _ida_hexrays.udcall_map_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.udcall_map_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.udcall_map_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> udcall_map_iterator_t
"""
this = _ida_hexrays.new_udcall_map_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_udcall_map_iterator_t
__del__ = lambda self : None;
udcall_map_iterator_t_swigregister = _ida_hexrays.udcall_map_iterator_t_swigregister
udcall_map_iterator_t_swigregister(udcall_map_iterator_t)
def udcall_map_begin(*args):
"""
udcall_map_begin(map) -> udcall_map_iterator_t
Get iterator pointing to the beginning of udcall_map_t.
@param map (C++: const udcall_map_t *)
"""
return _ida_hexrays.udcall_map_begin(*args)
def udcall_map_end(*args):
"""
udcall_map_end(map) -> udcall_map_iterator_t
Get iterator pointing to the end of udcall_map_t.
@param map (C++: const udcall_map_t *)
"""
return _ida_hexrays.udcall_map_end(*args)
def udcall_map_next(*args):
"""
udcall_map_next(p) -> udcall_map_iterator_t
Move to the next element.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_next(*args)
def udcall_map_prev(*args):
"""
udcall_map_prev(p) -> udcall_map_iterator_t
Move to the previous element.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_prev(*args)
def udcall_map_first(*args):
"""
udcall_map_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_first(*args)
def udcall_map_second(*args):
"""
udcall_map_second(p) -> udcall_t
Get reference to the current map value.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_second(*args)
def udcall_map_find(*args):
"""
udcall_map_find(map, key) -> udcall_map_iterator_t
Find the specified key in udcall_map_t.
@param map (C++: const udcall_map_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.udcall_map_find(*args)
def udcall_map_insert(*args):
"""
udcall_map_insert(map, key, val) -> udcall_map_iterator_t
Insert new (ea_t, 'udcall_t' ) pair into udcall_map_t.
@param map (C++: udcall_map_t *)
@param key (C++: const ea_t &)
@param val (C++: const udcall_t &)
"""
return _ida_hexrays.udcall_map_insert(*args)
def udcall_map_erase(*args):
"""
udcall_map_erase(map, p)
Erase current element from udcall_map_t.
@param map (C++: udcall_map_t *)
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_erase(*args)
def udcall_map_clear(*args):
"""
udcall_map_clear(map)
Clear udcall_map_t.
@param map (C++: udcall_map_t *)
"""
return _ida_hexrays.udcall_map_clear(*args)
def udcall_map_size(*args):
"""
udcall_map_size(map) -> size_t
Get size of udcall_map_t.
@param map (C++: udcall_map_t *)
"""
return _ida_hexrays.udcall_map_size(*args)
def udcall_map_free(*args):
"""
udcall_map_free(map)
Delete udcall_map_t instance.
@param map (C++: udcall_map_t *)
"""
return _ida_hexrays.udcall_map_free(*args)
def udcall_map_new(*args):
"""
udcall_map_new() -> udcall_map_t *
Create a new udcall_map_t instance.
"""
return _ida_hexrays.udcall_map_new(*args)
class user_cmts_iterator_t(object):
"""
Proxy of C++ user_cmts_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_cmts_iterator_t_x_get, _ida_hexrays.user_cmts_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_cmts_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_cmts_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_cmts_iterator_t
"""
this = _ida_hexrays.new_user_cmts_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_cmts_iterator_t
__del__ = lambda self : None;
user_cmts_iterator_t_swigregister = _ida_hexrays.user_cmts_iterator_t_swigregister
user_cmts_iterator_t_swigregister(user_cmts_iterator_t)
def user_cmts_begin(*args):
"""
user_cmts_begin(map) -> user_cmts_iterator_t
Get iterator pointing to the beginning of user_cmts_t.
@param map (C++: const user_cmts_t *)
"""
return _ida_hexrays.user_cmts_begin(*args)
def user_cmts_end(*args):
"""
user_cmts_end(map) -> user_cmts_iterator_t
Get iterator pointing to the end of user_cmts_t.
@param map (C++: const user_cmts_t *)
"""
return _ida_hexrays.user_cmts_end(*args)
def user_cmts_next(*args):
"""
user_cmts_next(p) -> user_cmts_iterator_t
Move to the next element.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_next(*args)
def user_cmts_prev(*args):
"""
user_cmts_prev(p) -> user_cmts_iterator_t
Move to the previous element.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_prev(*args)
def user_cmts_first(*args):
"""
user_cmts_first(p) -> treeloc_t
Get reference to the current map key.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_first(*args)
def user_cmts_second(*args):
"""
user_cmts_second(p) -> citem_cmt_t
Get reference to the current map value.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_second(*args)
def user_cmts_find(*args):
"""
user_cmts_find(map, key) -> user_cmts_iterator_t
Find the specified key in user_cmts_t.
@param map (C++: const user_cmts_t *)
@param key (C++: const treeloc_t &)
"""
return _ida_hexrays.user_cmts_find(*args)
def user_cmts_insert(*args):
"""
user_cmts_insert(map, key, val) -> user_cmts_iterator_t
Insert new ( 'treeloc_t' , 'citem_cmt_t' ) pair into user_cmts_t.
@param map (C++: user_cmts_t *)
@param key (C++: const treeloc_t &)
@param val (C++: const citem_cmt_t &)
"""
return _ida_hexrays.user_cmts_insert(*args)
def user_cmts_erase(*args):
"""
user_cmts_erase(map, p)
Erase current element from user_cmts_t.
@param map (C++: user_cmts_t *)
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_erase(*args)
def user_cmts_clear(*args):
"""
user_cmts_clear(map)
Clear user_cmts_t.
@param map (C++: user_cmts_t *)
"""
return _ida_hexrays.user_cmts_clear(*args)
def user_cmts_size(*args):
"""
user_cmts_size(map) -> size_t
Get size of user_cmts_t.
@param map (C++: user_cmts_t *)
"""
return _ida_hexrays.user_cmts_size(*args)
def user_cmts_free(*args):
"""
user_cmts_free(map)
Delete user_cmts_t instance.
@param map (C++: user_cmts_t *)
"""
return _ida_hexrays.user_cmts_free(*args)
def user_cmts_new(*args):
"""
user_cmts_new() -> user_cmts_t
Create a new user_cmts_t instance.
"""
return _ida_hexrays.user_cmts_new(*args)
class user_iflags_iterator_t(object):
"""
Proxy of C++ user_iflags_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_iflags_iterator_t_x_get, _ida_hexrays.user_iflags_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_iflags_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_iflags_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_iflags_iterator_t
"""
this = _ida_hexrays.new_user_iflags_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_iflags_iterator_t
__del__ = lambda self : None;
user_iflags_iterator_t_swigregister = _ida_hexrays.user_iflags_iterator_t_swigregister
user_iflags_iterator_t_swigregister(user_iflags_iterator_t)
def user_iflags_begin(*args):
"""
user_iflags_begin(map) -> user_iflags_iterator_t
Get iterator pointing to the beginning of user_iflags_t.
@param map (C++: const user_iflags_t *)
"""
return _ida_hexrays.user_iflags_begin(*args)
def user_iflags_end(*args):
"""
user_iflags_end(map) -> user_iflags_iterator_t
Get iterator pointing to the end of user_iflags_t.
@param map (C++: const user_iflags_t *)
"""
return _ida_hexrays.user_iflags_end(*args)
def user_iflags_next(*args):
"""
user_iflags_next(p) -> user_iflags_iterator_t
Move to the next element.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_next(*args)
def user_iflags_prev(*args):
"""
user_iflags_prev(p) -> user_iflags_iterator_t
Move to the previous element.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_prev(*args)
def user_iflags_first(*args):
"""
user_iflags_first(p) -> citem_locator_t
Get reference to the current map key.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_first(*args)
def user_iflags_find(*args):
"""
user_iflags_find(map, key) -> user_iflags_iterator_t
Find the specified key in user_iflags_t.
@param map (C++: const user_iflags_t *)
@param key (C++: const citem_locator_t &)
"""
return _ida_hexrays.user_iflags_find(*args)
def user_iflags_insert(*args):
"""
user_iflags_insert(map, key, val) -> user_iflags_iterator_t
Insert new ( 'citem_locator_t' , int32) pair into user_iflags_t.
@param map (C++: user_iflags_t *)
@param key (C++: const citem_locator_t &)
@param val (C++: const int32 &)
"""
return _ida_hexrays.user_iflags_insert(*args)
def user_iflags_erase(*args):
"""
user_iflags_erase(map, p)
Erase current element from user_iflags_t.
@param map (C++: user_iflags_t *)
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_erase(*args)
def user_iflags_clear(*args):
"""
user_iflags_clear(map)
Clear user_iflags_t.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_clear(*args)
def user_iflags_size(*args):
"""
user_iflags_size(map) -> size_t
Get size of user_iflags_t.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_size(*args)
def user_iflags_free(*args):
"""
user_iflags_free(map)
Delete user_iflags_t instance.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_free(*args)
def user_iflags_new(*args):
"""
user_iflags_new() -> user_iflags_t
Create a new user_iflags_t instance.
"""
return _ida_hexrays.user_iflags_new(*args)
class user_unions_iterator_t(object):
"""
Proxy of C++ user_unions_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_unions_iterator_t_x_get, _ida_hexrays.user_unions_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_unions_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_unions_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_unions_iterator_t
"""
this = _ida_hexrays.new_user_unions_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_unions_iterator_t
__del__ = lambda self : None;
user_unions_iterator_t_swigregister = _ida_hexrays.user_unions_iterator_t_swigregister
user_unions_iterator_t_swigregister(user_unions_iterator_t)
def user_unions_begin(*args):
"""
user_unions_begin(map) -> user_unions_iterator_t
Get iterator pointing to the beginning of user_unions_t.
@param map (C++: const user_unions_t *)
"""
return _ida_hexrays.user_unions_begin(*args)
def user_unions_end(*args):
"""
user_unions_end(map) -> user_unions_iterator_t
Get iterator pointing to the end of user_unions_t.
@param map (C++: const user_unions_t *)
"""
return _ida_hexrays.user_unions_end(*args)
def user_unions_next(*args):
"""
user_unions_next(p) -> user_unions_iterator_t
Move to the next element.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_next(*args)
def user_unions_prev(*args):
"""
user_unions_prev(p) -> user_unions_iterator_t
Move to the previous element.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_prev(*args)
def user_unions_first(*args):
"""
user_unions_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_first(*args)
def user_unions_second(*args):
"""
user_unions_second(p) -> intvec_t
Get reference to the current map value.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_second(*args)
def user_unions_find(*args):
"""
user_unions_find(map, key) -> user_unions_iterator_t
Find the specified key in user_unions_t.
@param map (C++: const user_unions_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.user_unions_find(*args)
def user_unions_insert(*args):
"""
user_unions_insert(map, key, val) -> user_unions_iterator_t
Insert new (ea_t, intvec_t) pair into user_unions_t.
@param map (C++: user_unions_t *)
@param key (C++: const ea_t &)
@param val (C++: const intvec_t &)
"""
return _ida_hexrays.user_unions_insert(*args)
def user_unions_erase(*args):
"""
user_unions_erase(map, p)
Erase current element from user_unions_t.
@param map (C++: user_unions_t *)
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_erase(*args)
def user_unions_clear(*args):
"""
user_unions_clear(map)
Clear user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_clear(*args)
def user_unions_size(*args):
"""
user_unions_size(map) -> size_t
Get size of user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_size(*args)
def user_unions_free(*args):
"""
user_unions_free(map)
Delete user_unions_t instance.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_free(*args)
def user_unions_new(*args):
"""
user_unions_new() -> user_unions_t
Create a new user_unions_t instance.
"""
return _ida_hexrays.user_unions_new(*args)
class user_labels_iterator_t(object):
"""
Proxy of C++ user_labels_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.user_labels_iterator_t_x_get, _ida_hexrays.user_labels_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.user_labels_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.user_labels_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> user_labels_iterator_t
"""
this = _ida_hexrays.new_user_labels_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_user_labels_iterator_t
__del__ = lambda self : None;
user_labels_iterator_t_swigregister = _ida_hexrays.user_labels_iterator_t_swigregister
user_labels_iterator_t_swigregister(user_labels_iterator_t)
def user_labels_begin(*args):
"""
user_labels_begin(map) -> user_labels_iterator_t
Get iterator pointing to the beginning of user_labels_t.
@param map (C++: const user_labels_t *)
"""
return _ida_hexrays.user_labels_begin(*args)
def user_labels_end(*args):
"""
user_labels_end(map) -> user_labels_iterator_t
Get iterator pointing to the end of user_labels_t.
@param map (C++: const user_labels_t *)
"""
return _ida_hexrays.user_labels_end(*args)
def user_labels_next(*args):
"""
user_labels_next(p) -> user_labels_iterator_t
Move to the next element.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_next(*args)
def user_labels_prev(*args):
"""
user_labels_prev(p) -> user_labels_iterator_t
Move to the previous element.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_prev(*args)
def user_labels_first(*args):
"""
user_labels_first(p) -> int const &
Get reference to the current map key.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_first(*args)
def user_labels_second(*args):
"""
user_labels_second(p) -> qstring &
Get reference to the current map value.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_second(*args)
def user_labels_find(*args):
"""
user_labels_find(map, key) -> user_labels_iterator_t
Find the specified key in user_labels_t.
@param map (C++: const user_labels_t *)
@param key (C++: const int &)
"""
return _ida_hexrays.user_labels_find(*args)
def user_labels_insert(*args):
"""
user_labels_insert(map, key, val) -> user_labels_iterator_t
Insert new (int, qstring) pair into user_labels_t.
@param map (C++: user_labels_t *)
@param key (C++: const int &)
@param val (C++: const qstring &)
"""
return _ida_hexrays.user_labels_insert(*args)
def user_labels_erase(*args):
"""
user_labels_erase(map, p)
Erase current element from user_labels_t.
@param map (C++: user_labels_t *)
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_erase(*args)
def user_labels_clear(*args):
"""
user_labels_clear(map)
Clear user_labels_t.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_clear(*args)
def user_labels_size(*args):
"""
user_labels_size(map) -> size_t
Get size of user_labels_t.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_size(*args)
def user_labels_free(*args):
"""
user_labels_free(map)
Delete user_labels_t instance.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_free(*args)
def user_labels_new(*args):
"""
user_labels_new() -> user_labels_t
Create a new user_labels_t instance.
"""
return _ida_hexrays.user_labels_new(*args)
class eamap_iterator_t(object):
"""
Proxy of C++ eamap_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.eamap_iterator_t_x_get, _ida_hexrays.eamap_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.eamap_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.eamap_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> eamap_iterator_t
"""
this = _ida_hexrays.new_eamap_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_eamap_iterator_t
__del__ = lambda self : None;
eamap_iterator_t_swigregister = _ida_hexrays.eamap_iterator_t_swigregister
eamap_iterator_t_swigregister(eamap_iterator_t)
def eamap_begin(*args):
"""
eamap_begin(map) -> eamap_iterator_t
Get iterator pointing to the beginning of eamap_t.
@param map (C++: const eamap_t *)
"""
return _ida_hexrays.eamap_begin(*args)
def eamap_end(*args):
"""
eamap_end(map) -> eamap_iterator_t
Get iterator pointing to the end of eamap_t.
@param map (C++: const eamap_t *)
"""
return _ida_hexrays.eamap_end(*args)
def eamap_next(*args):
"""
eamap_next(p) -> eamap_iterator_t
Move to the next element.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_next(*args)
def eamap_prev(*args):
"""
eamap_prev(p) -> eamap_iterator_t
Move to the previous element.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_prev(*args)
def eamap_first(*args):
"""
eamap_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_first(*args)
def eamap_second(*args):
"""
eamap_second(p) -> cinsnptrvec_t
Get reference to the current map value.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_second(*args)
def eamap_find(*args):
"""
eamap_find(map, key) -> eamap_iterator_t
Find the specified key in eamap_t.
@param map (C++: const eamap_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.eamap_find(*args)
def eamap_insert(*args):
"""
eamap_insert(map, key, val) -> eamap_iterator_t
Insert new (ea_t, cinsnptrvec_t) pair into eamap_t.
@param map (C++: eamap_t *)
@param key (C++: const ea_t &)
@param val (C++: const cinsnptrvec_t &)
"""
return _ida_hexrays.eamap_insert(*args)
def eamap_erase(*args):
"""
eamap_erase(map, p)
Erase current element from eamap_t.
@param map (C++: eamap_t *)
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_erase(*args)
def eamap_clear(*args):
"""
eamap_clear(map)
Clear eamap_t.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_clear(*args)
def eamap_size(*args):
"""
eamap_size(map) -> size_t
Get size of eamap_t.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_size(*args)
def eamap_free(*args):
"""
eamap_free(map)
Delete eamap_t instance.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_free(*args)
def eamap_new(*args):
"""
eamap_new() -> eamap_t
Create a new eamap_t instance.
"""
return _ida_hexrays.eamap_new(*args)
class boundaries_iterator_t(object):
"""
Proxy of C++ boundaries_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.boundaries_iterator_t_x_get, _ida_hexrays.boundaries_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.boundaries_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.boundaries_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> boundaries_iterator_t
"""
this = _ida_hexrays.new_boundaries_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_boundaries_iterator_t
__del__ = lambda self : None;
boundaries_iterator_t_swigregister = _ida_hexrays.boundaries_iterator_t_swigregister
boundaries_iterator_t_swigregister(boundaries_iterator_t)
def boundaries_begin(*args):
"""
boundaries_begin(map) -> boundaries_iterator_t
Get iterator pointing to the beginning of boundaries_t.
@param map (C++: const boundaries_t *)
"""
return _ida_hexrays.boundaries_begin(*args)
def boundaries_end(*args):
"""
boundaries_end(map) -> boundaries_iterator_t
Get iterator pointing to the end of boundaries_t.
@param map (C++: const boundaries_t *)
"""
return _ida_hexrays.boundaries_end(*args)
def boundaries_next(*args):
"""
boundaries_next(p) -> boundaries_iterator_t
Move to the next element.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_next(*args)
def boundaries_prev(*args):
"""
boundaries_prev(p) -> boundaries_iterator_t
Move to the previous element.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_prev(*args)
def boundaries_first(*args):
"""
boundaries_first(p) -> cinsn_t
Get reference to the current map key.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_first(*args)
def boundaries_second(*args):
"""
boundaries_second(p) -> rangeset_t
Get reference to the current map value.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_second(*args)
def boundaries_erase(*args):
"""
boundaries_erase(map, p)
Erase current element from boundaries_t.
@param map (C++: boundaries_t *)
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_erase(*args)
def boundaries_clear(*args):
"""
boundaries_clear(map)
Clear boundaries_t.
@param map (C++: boundaries_t *)
"""
return _ida_hexrays.boundaries_clear(*args)
def boundaries_size(*args):
"""
boundaries_size(map) -> size_t
Get size of boundaries_t.
@param map (C++: boundaries_t *)
"""
return _ida_hexrays.boundaries_size(*args)
def boundaries_free(*args):
"""
boundaries_free(map)
Delete boundaries_t instance.
@param map (C++: boundaries_t *)
"""
return _ida_hexrays.boundaries_free(*args)
def boundaries_new(*args):
"""
boundaries_new() -> boundaries_t
Create a new boundaries_t instance.
"""
return _ida_hexrays.boundaries_new(*args)
class block_chains_iterator_t(object):
"""
Proxy of C++ block_chains_iterator_t class
"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
x = _swig_property(_ida_hexrays.block_chains_iterator_t_x_get, _ida_hexrays.block_chains_iterator_t_x_set)
def __eq__(self, *args):
"""
__eq__(self, p) -> bool
"""
return _ida_hexrays.block_chains_iterator_t___eq__(self, *args)
def __ne__(self, *args):
"""
__ne__(self, p) -> bool
"""
return _ida_hexrays.block_chains_iterator_t___ne__(self, *args)
def __init__(self, *args):
"""
__init__(self) -> block_chains_iterator_t
"""
this = _ida_hexrays.new_block_chains_iterator_t(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ida_hexrays.delete_block_chains_iterator_t
__del__ = lambda self : None;
block_chains_iterator_t_swigregister = _ida_hexrays.block_chains_iterator_t_swigregister
block_chains_iterator_t_swigregister(block_chains_iterator_t)
def block_chains_begin(*args):
"""
block_chains_begin(set) -> block_chains_iterator_t
Get iterator pointing to the beginning of 'block_chains_t' .
@param set (C++: const block_chains_t *)
"""
return _ida_hexrays.block_chains_begin(*args)
def block_chains_end(*args):
"""
block_chains_end(set) -> block_chains_iterator_t
Get iterator pointing to the end of 'block_chains_t' .
@param set (C++: const block_chains_t *)
"""
return _ida_hexrays.block_chains_end(*args)
def block_chains_next(*args):
"""
block_chains_next(p) -> block_chains_iterator_t
Move to the next element.
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_next(*args)
def block_chains_prev(*args):
"""
block_chains_prev(p) -> block_chains_iterator_t
Move to the previous element.
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_prev(*args)
def block_chains_get(*args):
"""
block_chains_get(p) -> chain_t
Get reference to the current set value.
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_get(*args)
def block_chains_find(*args):
"""
block_chains_find(set, val) -> block_chains_iterator_t
Find the specified key in set 'block_chains_t' .
@param set (C++: const block_chains_t *)
@param val (C++: const chain_t &)
"""
return _ida_hexrays.block_chains_find(*args)
def block_chains_insert(*args):
"""
block_chains_insert(set, val) -> block_chains_iterator_t
Insert new ( 'chain_t' ) into set 'block_chains_t' .
@param set (C++: block_chains_t *)
@param val (C++: const chain_t &)
"""
return _ida_hexrays.block_chains_insert(*args)
def block_chains_erase(*args):
"""
block_chains_erase(set, p)
Erase current element from 'block_chains_t' .
@param set (C++: block_chains_t *)
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_erase(*args)
def block_chains_clear(*args):
"""
block_chains_clear(set)
Clear 'block_chains_t' .
@param set (C++: block_chains_t *)
"""
return _ida_hexrays.block_chains_clear(*args)
def block_chains_size(*args):
"""
block_chains_size(set) -> size_t
Get size of 'block_chains_t' .
@param set (C++: block_chains_t *)
"""
return _ida_hexrays.block_chains_size(*args)
def block_chains_free(*args):
"""
block_chains_free(set)
Delete 'block_chains_t' instance.
@param set (C++: block_chains_t *)
"""
return _ida_hexrays.block_chains_free(*args)
def block_chains_new(*args):
"""
block_chains_new() -> block_chains_t
Create a new 'block_chains_t' instance.
"""
return _ida_hexrays.block_chains_new(*args)
#<pycode(py_hexrays)>
import ida_funcs
hexrays_failure_t.__str__ = lambda self: str("%x: %s" % (self.errea, self.desc()))
# ---------------------------------------------------------------------
# Renamings
is_allowed_on_small_struni = accepts_small_udts
is_small_struni = is_small_udt
# ---------------------------------------------------------------------
class DecompilationFailure(Exception):
"""
Raised on a decompilation error.
The associated hexrays_failure_t object is stored in the
'info' member of this exception.
"""
def __init__(self, info):
Exception.__init__(self, 'Decompilation failed: %s' % (str(info), ))
self.info = info
return
# ---------------------------------------------------------------------
def decompile(ea, hf=None, flags=0):
if isinstance(ea, (int, long)):
func = ida_funcs.get_func(ea)
if not func: return
elif type(ea) == ida_funcs.func_t:
func = ea
else:
raise RuntimeError('arg 1 of decompile expects either ea_t or cfunc_t argument')
if hf is None:
hf = hexrays_failure_t()
ptr = _ida_hexrays.decompile_func(func, hf, flags)
if ptr.__deref__() is None:
raise DecompilationFailure(hf)
return ptr
# ---------------------------------------------------------------------
# stringify all string types
#qtype.__str__ = qtype.c_str
#qstring.__str__ = qstring.c_str
#citem_cmt_t.__str__ = citem_cmt_t.c_str
# ---------------------------------------------------------------------
# listify all list types
import ida_idaapi
ida_idaapi._listify_types(
cinsnptrvec_t,
ctree_items_t,
qvector_lvar_t,
qvector_carg_t,
qvector_ccase_t,
hexwarns_t,
history_t,
lvar_saved_infos_t,
ui_stroff_ops_t)
def citem_to_specific_type(self):
"""
cast the citem_t object to its more specific type, either cexpr_t or cinsn_t.
"""
if self.op >= cot_empty and self.op <= cot_last:
return self.cexpr
elif self.op >= cit_empty and self.op < cit_end:
return self.cinsn
raise RuntimeError('unknown op type %s' % (repr(self.op), ))
citem_t.to_specific_type = property(citem_to_specific_type)
"""
array used for translating cinsn_t->op type to their names.
"""
cinsn_t.op_to_typename = {}
for k in dir(_ida_hexrays):
if k.startswith('cit_'):
cinsn_t.op_to_typename[getattr(_ida_hexrays, k)] = k[4:]
"""
array used for translating cexpr_t->op type to their names.
"""
cexpr_t.op_to_typename = {}
for k in dir(_ida_hexrays):
if k.startswith('cot_'):
cexpr_t.op_to_typename[getattr(_ida_hexrays, k)] = k[4:]
def property_op_to_typename(self):
return self.op_to_typename[self.op]
cinsn_t.opname = property(property_op_to_typename)
cexpr_t.opname = property(property_op_to_typename)
def cexpr_operands(self):
"""
return a dictionary with the operands of a cexpr_t.
"""
if self.op >= cot_comma and self.op <= cot_asgumod or \
self.op >= cot_lor and self.op <= cot_fdiv or \
self.op == cot_idx:
return {'x': self.x, 'y': self.y}
elif self.op == cot_tern:
return {'x': self.x, 'y': self.y, 'z': self.z}
elif self.op in [cot_fneg, cot_neg, cot_sizeof] or \
self.op >= cot_lnot and self.op <= cot_predec:
return {'x': self.x}
elif self.op == cot_cast:
return {'type': self.type, 'x': self.x}
elif self.op == cot_call:
return {'x': self.x, 'a': self.a}
elif self.op in [cot_memref, cot_memptr]:
return {'x': self.x, 'm': self.m}
elif self.op == cot_num:
return {'n': self.n}
elif self.op == cot_fnum:
return {'fpc': self.fpc}
elif self.op == cot_str:
return {'string': self.string}
elif self.op == cot_obj:
return {'obj_ea': self.obj_ea}
elif self.op == cot_var:
return {'v': self.v}
elif self.op == cot_helper:
return {'helper': self.helper}
raise RuntimeError('unknown op type %s' % self.opname)
cexpr_t.operands = property(cexpr_operands)
def cinsn_details(self):
"""
return the details pointer for the cinsn_t object depending on the value of its op member. \
this is one of the cblock_t, cif_t, etc. objects.
"""
if self.op not in self.op_to_typename:
raise RuntimeError('unknown item->op type')
opname = self.opname
if opname == 'empty':
return self
if opname in ['break', 'continue']:
return None
return getattr(self, 'c' + opname)
cinsn_t.details = property(cinsn_details)
def cblock_iter(self):
iter = self.begin()
for i in range(self.size()):
yield iter.cur
next(iter)
return
cblock_t.__iter__ = cblock_iter
cblock_t.__len__ = cblock_t.size
# cblock.find(cinsn_t) -> returns the iterator positioned at the given item
def cblock_find(self, item):
iter = self.begin()
for i in range(self.size()):
if iter.cur == item:
return iter
next(iter)
return
cblock_t.find = cblock_find
# cblock.index(cinsn_t) -> returns the index of the given item
def cblock_index(self, item):
iter = self.begin()
for i in range(self.size()):
if iter.cur == item:
return i
next(iter)
return
cblock_t.index = cblock_index
# cblock.at(int) -> returns the item at the given index index
def cblock_at(self, index):
iter = self.begin()
for i in range(self.size()):
if i == index:
return iter.cur
next(iter)
return
cblock_t.at = cblock_at
# cblock.remove(cinsn_t)
def cblock_remove(self, item):
iter = self.find(item)
self.erase(iter)
return
cblock_t.remove = cblock_remove
# cblock.insert(index, cinsn_t)
def cblock_insert(self, index, item):
pos = self.at(index)
iter = self.find(pos)
self.insert(iter, item)
return
cblock_t.insert = cblock_insert
cfuncptr_t.__str__ = lambda self: str(self.__deref__())
import ida_typeinf
def cfunc_type(self):
"""
Get the function's return type tinfo_t object.
"""
tif = ida_typeinf.tinfo_t()
result = self.get_func_type(tif)
if not result:
return
return tif
cfunc_t.type = property(cfunc_type)
cfuncptr_t.type = property(lambda self: self.__deref__().type)
cfunc_t.arguments = property(lambda self: [o for o in self.lvars if o.is_arg_var])
cfuncptr_t.arguments = property(lambda self: self.__deref__().arguments)
cfunc_t.lvars = property(cfunc_t.get_lvars)
cfuncptr_t.lvars = property(lambda self: self.__deref__().lvars)
cfunc_t.warnings = property(cfunc_t.get_warnings)
cfuncptr_t.warnings = property(lambda self: self.__deref__().warnings)
cfunc_t.pseudocode = property(cfunc_t.get_pseudocode)
cfuncptr_t.pseudocode = property(lambda self: self.__deref__().get_pseudocode())
cfunc_t.eamap = property(cfunc_t.get_eamap)
cfuncptr_t.eamap = property(lambda self: self.__deref__().get_eamap())
cfunc_t.boundaries = property(cfunc_t.get_boundaries)
cfuncptr_t.boundaries = property(lambda self: self.__deref__().get_boundaries())
#pragma SWIG nowarn=+503
lvar_t.used = property(lvar_t.used)
lvar_t.typed = property(lvar_t.typed)
lvar_t.mreg_done = property(lvar_t.mreg_done)
lvar_t.has_nice_name = property(lvar_t.has_nice_name)
lvar_t.is_unknown_width = property(lvar_t.is_unknown_width)
lvar_t.has_user_info = property(lvar_t.has_user_info)
lvar_t.has_user_name = property(lvar_t.has_user_name)
lvar_t.has_user_type = property(lvar_t.has_user_type)
lvar_t.is_result_var = property(lvar_t.is_result_var)
lvar_t.is_arg_var = property(lvar_t.is_arg_var)
lvar_t.is_fake_var = property(lvar_t.is_fake_var)
lvar_t.is_overlapped_var = property(lvar_t.is_overlapped_var)
lvar_t.is_floating_var = property(lvar_t.is_floating_var)
lvar_t.is_spoiled_var = property(lvar_t.is_spoiled_var)
lvar_t.is_mapdst_var = property(lvar_t.is_mapdst_var)
# dictify all dict-like types
def _map_as_dict(maptype, name, keytype, valuetype):
maptype.keytype = keytype
maptype.valuetype = valuetype
for fctname in ['begin', 'end', 'first', 'second', 'next', \
'find', 'insert', 'erase', 'clear', 'size']:
fct = getattr(_ida_hexrays, name + '_' + fctname)
setattr(maptype, '__' + fctname, fct)
maptype.__len__ = maptype.size
maptype.__getitem__ = maptype.at
maptype.begin = lambda self, *args: self.__begin(self, *args)
maptype.end = lambda self, *args: self.__end(self, *args)
maptype.first = lambda self, *args: self.__first(*args)
maptype.second = lambda self, *args: self.__second(*args)
maptype.next = lambda self, *args: self.__next(*args)
maptype.find = lambda self, *args: self.__find(self, *args)
maptype.insert = lambda self, *args: self.__insert(self, *args)
maptype.erase = lambda self, *args: self.__erase(self, *args)
maptype.clear = lambda self, *args: self.__clear(self, *args)
maptype.size = lambda self, *args: self.__size(self, *args)
def _map___iter__(self):
"""
Iterate over dictionary keys.
"""
return self.iterkeys()
maptype.__iter__ = _map___iter__
def _map___getitem__(self, key):
"""
Returns the value associated with the provided key.
"""
if not isinstance(key, self.keytype):
raise KeyError('type of key should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))
if key not in self:
raise KeyError('key not found')
return self.second(self.find(key))
maptype.__getitem__ = _map___getitem__
def _map___setitem__(self, key, value):
"""
Returns the value associated with the provided key.
"""
if not isinstance(key, self.keytype):
raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))
if not isinstance(value, self.valuetype):
raise KeyError('type of `value` should be ' + repr(self.valuetype) + ' but got ' + type(value))
self.insert(key, value)
return
maptype.__setitem__ = _map___setitem__
def _map___delitem__(self, key):
"""
Removes the value associated with the provided key.
"""
if not isinstance(key, self.keytype):
raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))
if key not in self:
raise KeyError('key not found')
self.erase(self.find(key))
return
maptype.__delitem__ = _map___delitem__
def _map___contains__(self, key):
"""
Returns true if the specified key exists in the .
"""
if not isinstance(key, self.keytype):
raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))
if self.find(key) != self.end():
return True
return False
maptype.__contains__ = _map___contains__
def _map_clear(self):
self.clear()
return
maptype.clear = _map_clear
def _map_copy(self):
ret = {}
for k in self.iterkeys():
ret[k] = self[k]
return ret
maptype.copy = _map_copy
def _map_get(self, key, default=None):
if key in self:
return self[key]
return default
maptype.get = _map_get
def _map_iterkeys(self):
iter = self.begin()
while iter != self.end():
yield self.first(iter)
iter = self.next(iter)
return
maptype.iterkeys = _map_iterkeys
def _map_itervalues(self):
iter = self.begin()
while iter != self.end():
yield self.second(iter)
iter = self.next(iter)
return
maptype.itervalues = _map_itervalues
def _map_iteritems(self):
iter = self.begin()
while iter != self.end():
yield (self.first(iter), self.second(iter))
iter = self.next(iter)
return
maptype.iteritems = _map_iteritems
def _map_keys(self):
return list(self.iterkeys())
maptype.keys = _map_keys
def _map_values(self):
return list(self.itervalues())
maptype.values = _map_values
def _map_items(self):
return list(self.iteritems())
maptype.items = _map_items
def _map_has_key(self, key):
return key in self
maptype.has_key = _map_has_key
def _map_pop(self, key):
"""
Sets the value associated with the provided key.
"""
if not isinstance(key, self.keytype):
raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))
if key not in self:
raise KeyError('key not found')
ret = self[key]
del self[key]
return ret
maptype.pop = _map_pop
def _map_popitem(self):
"""
Sets the value associated with the provided key.
"""
if len(self) == 0:
raise KeyError('key not found')
key = self.keys()[0]
return (key, self.pop(key))
maptype.popitem = _map_popitem
def _map_setdefault(self, key, default=None):
"""
Sets the value associated with the provided key.
"""
if not isinstance(key, self.keytype):
raise KeyError('type of `key` should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))
if key in self:
return self[key]
self[key] = default
return default
maptype.setdefault = _map_setdefault
#_map_as_dict(user_labels_t, 'user_labels', (int, long), qstring)
_map_as_dict(user_cmts_t, 'user_cmts', treeloc_t, citem_cmt_t)
_map_as_dict(user_numforms_t, 'user_numforms', operand_locator_t, number_format_t)
_map_as_dict(user_iflags_t, 'user_iflags', citem_locator_t, int)
import ida_pro
_map_as_dict(user_unions_t, 'user_unions', (int, long), ida_pro.intvec_t)
_map_as_dict(eamap_t, 'eamap', long, cinsnptrvec_t)
import ida_range
_map_as_dict(boundaries_t, 'boundaries', cinsn_t, ida_range.rangeset_t)
#
# Object ownership
#
def _call_with_transferrable_ownership(fun, *args):
e = args[0]
was_owned = e.thisown
res = fun(e, *args[1:])
# ATM, 'res' doesn't own the resulting cexpr_t.
# In case 'fun'
# - created a new object: we want to own that one in case 'e' was owned
# - didn't create a new object: we will remove & re-gain ownership on
# the same underlying cexpr_t. No biggie.
if was_owned:
if res:
e._maybe_disown_and_deregister()
res._own_and_register()
else:
debug_hexrays_ctree("NOTE: call_with_transferrable_ownership() called with non-IDAPython-owned object. Is this intentional?")
return res
def lnot(e):
return _call_with_transferrable_ownership(_ll_lnot, e)
def make_ref(e):
return _call_with_transferrable_ownership(_ll_make_ref, e)
def dereference(e, ptrsize, is_float=False):
return _call_with_transferrable_ownership(_ll_dereference, e, ptrsize, is_float)
def call_helper(rettype, args, *rest):
res = _ll_call_helper(rettype, args, *rest)
if res:
res._own_and_register()
if type(args) == carglist_t:
args.thisown = False
return res
def new_block():
res = _ll_new_block()
if res:
res._own_and_register()
return res
def make_num(*args):
res = _ll_make_num(*args)
if res:
res._own_and_register()
return res
def create_helper(*args):
res = _ll_create_helper(*args)
if res:
res._own_and_register()
return res
# ----------------
class __cbhooks_t(Hexrays_Hooks):
instances = []
def __init__(self, callback):
self.callback = callback
self.instances.append(self)
Hexrays_Hooks.__init__(self)
def maturity(self, *args): return self.callback(hxe_maturity, *args)
def interr(self, *args): return self.callback(hxe_interr, *args)
def print_func(self, *args): return self.callback(hxe_print_func, *args)
def func_printed(self, *args): return self.callback(hxe_func_printed, *args)
def open_pseudocode(self, *args): return self.callback(hxe_open_pseudocode, *args)
def switch_pseudocode(self, *args): return self.callback(hxe_switch_pseudocode, *args)
def refresh_pseudocode(self, *args): return self.callback(hxe_refresh_pseudocode, *args)
def close_pseudocode(self, *args): return self.callback(hxe_close_pseudocode, *args)
def keyboard(self, *args): return self.callback(hxe_keyboard, *args)
def right_click(self, *args): return self.callback(hxe_right_click, *args)
def double_click(self, *args): return self.callback(hxe_double_click, *args)
def curpos(self, *args): return self.callback(hxe_curpos, *args)
def create_hint(self, *args): return self.callback(hxe_create_hint, *args)
def text_ready(self, *args): return self.callback(hxe_text_ready, *args)
def populating_popup(self, *args): return self.callback(hxe_populating_popup, *args)
def install_hexrays_callback(callback):
"Deprecated. Please use Hexrays_Hooks instead"
h = __cbhooks_t(callback)
h.hook()
return True
def remove_hexrays_callback(callback):
"Deprecated. Please use Hexrays_Hooks instead"
for inst in __cbhooks_t.instances:
if inst.callback == callback:
inst.unhook()
__cbhooks_t.instances.remove(inst)
return 1
return 0
#</pycode(py_hexrays)>
if _BC695:
get_tform_vdui=get_widget_vdui
hx_get_tform_vdui=hx_get_widget_vdui
HEXRAYS_API_MAGIC1=(HEXRAYS_API_MAGIC>>32)
HEXRAYS_API_MAGIC2=(HEXRAYS_API_MAGIC&0xFFFFFFFF)
| 2.015625 | 2 |
webapp/gen_graphs.py | bfitzy2142/NET4901-SP | 3 | 4823 | #!/usr/bin/env python3
"""
@author: <NAME>
MySql Parser for graphical presentation
"""
import mysql.connector
import datetime
from mysql.connector import Error
from datetime import datetime, timedelta
import json
class sql_graph_info():
def __init__(self, node, interface, time, sql_creds, db):
"""
Initializer for the sql_graph_info Object.
"""
self.node = node
self.interface = interface
self.time = time
self.sql_creds = sql_creds
self.db = db
def db_pull(self, node, interface, time, ):
""" Pulls the RX and TX information from the database
to display for the graphs page.
Arguments:
node [str] -- The node that holds the interface which
is to presented.
interface [str] -- The interface in which the counter
information will be based off of.
time [str] -- Time ranging from 30 minutes to 10 Years
Returns:
dict -- containing arrays of the counter values at
their coresponding timestamp.
"""
data_end = datetime.now()
if time == '1':
data_start = datetime.now() - timedelta(hours=0, minutes=30)
elif time == '2':
data_start = datetime.now() - timedelta(hours=1)
elif time == '3':
data_start = datetime.now() - timedelta(hours=2)
elif time == '4':
data_start = datetime.now() - timedelta(hours=6)
elif time == '5':
data_start = datetime.now() - timedelta(days=1)
else:
data_start = datetime.now() - timedelta(days=3650)
data_end.strftime('%Y-%m-%d %H:%M:%S')
data_start.strftime('%Y-%m-%d %H:%M:%S')
node_st = "openflow" + node
query = (
f"SELECT timestamp, Rx_pckts, Tx_pckts, Rx_drops, Tx_drops "
f"FROM {node_st}_counters WHERE "
f"Interface='openflow:{node}:{interface}'"
f"AND timestamp >= '{data_start}'"
f"AND timestamp < '{data_end}'"
)
mydb = mysql.connector.connect(
host=self.sql_creds['host'],
user=self.sql_creds['user'],
passwd=self.sql_creds['password'],
database=self.db
)
cur = mydb.cursor()
cur.execute(query)
response = cur.fetchall()
graphPoints = []
displayPoints = []
dataPointDict = {}
for dataPoint in response:
date = str(dataPoint[0])
rx_count = int(dataPoint[1])
tx_count = int(dataPoint[2])
rx_drops = int(dataPoint[3])
tx_drops = int(dataPoint[4])
if dataPointDict:
old_rx_c = int(dataPointDict['rx_count'])
old_tx_c = int(dataPointDict["tx_count"])
old_rx_d = int(dataPointDict["rx_drops"])
old_tx_d = int(dataPointDict["tx_drops"])
dif_rx_c = rx_count - old_rx_c
dif_tx_c = tx_count - old_tx_c
dif_rx_d = rx_drops - old_rx_d
dif_tx_d = tx_drops - old_tx_d
difDict = {"date": date, "rx_count": dif_rx_c,
"tx_count": dif_tx_c,
"rx_drops": dif_rx_d,
"tx_drops": dif_tx_d}
displayPoints.append(difDict)
dataPointDict = {"date": date, "rx_count": rx_count,
"tx_count": tx_count, "rx_drops": rx_drops,
"tx_drops": tx_drops}
graphPoints.append(dataPointDict)
return displayPoints
| 3.0625 | 3 |
scqubits/tests/test_fluxqubit.py | dmtvanzanten/scqubits | 0 | 4824 | # test_fluxqubit.py
# meant to be run with 'pytest'
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, <NAME> and <NAME>
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import numpy as np
from scqubits import FluxQubit
from scqubits.tests.conftest import StandardTests
class TestFluxQubit(StandardTests):
@classmethod
def setup_class(cls):
cls.qbt = None
cls.qbt_type = FluxQubit
cls.file_str = "fluxqubit"
cls.op1_str = "n_1_operator"
cls.op2_str = "n_2_operator"
cls.param_name = "flux"
cls.param_list = np.linspace(0.45, 0.55, 50)
| 1.890625 | 2 |
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py | ckamtsikis/cmssw | 852 | 4825 | <filename>PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
import FWCore.ParameterSet.Config as cms
patPFParticles = cms.EDProducer("PATPFParticleProducer",
# General configurables
pfCandidateSource = cms.InputTag("noJet"),
# MC matching configurables
addGenMatch = cms.bool(False),
genParticleMatch = cms.InputTag(""), ## particles source to be used for the MC matching
## must be an InputTag or VInputTag to a product of
## type edm::Association<reco::GenParticleCollection>
embedGenMatch = cms.bool(False), ## embed gen match inside the object instead of storing the ref
# add user data
userData = cms.PSet(
# add custom classes here
userClasses = cms.PSet(
src = cms.VInputTag('')
),
# add doubles here
userFloats = cms.PSet(
src = cms.VInputTag('')
),
# add ints here
userInts = cms.PSet(
src = cms.VInputTag('')
),
# add candidate ptrs here
userCands = cms.PSet(
src = cms.VInputTag('')
),
# add "inline" functions here
userFunctions = cms.vstring(),
userFunctionLabels = cms.vstring()
),
# Efficiencies
addEfficiencies = cms.bool(False),
efficiencies = cms.PSet(),
# resolution
addResolutions = cms.bool(False),
resolutions = cms.PSet(),
)
| 1.921875 | 2 |
tests/test_api.py | ines/spacy-js | 141 | 4826 | <filename>tests/test_api.py
# coding: utf8
from __future__ import unicode_literals
import pytest
import spacy
import json
from api.server import parse, doc2json, load_model
@pytest.fixture(scope="session")
def model():
return "en_core_web_sm"
@pytest.fixture(scope="session")
def text():
return "This is a sentence about Facebook. This is another one."
@pytest.fixture(scope="session")
def nlp(model):
return spacy.load(model)
@pytest.fixture(scope="session")
def doc(nlp, text):
return nlp(text)
def test_server_parse(model, text, doc):
load_model(model)
json_doc = parse(model, text)
direct_json_doc = doc2json(doc, model)
assert json.dumps(json_doc, sort_keys=True) == json.dumps(
direct_json_doc, sort_keys=True
)
def test_doc2json_doc_tokens(doc, model):
data = doc2json(doc, model)
assert data["model"] == model
assert data["doc"]["text"] == doc.text
assert data["doc"]["text_with_ws"] == doc.text_with_ws
assert data["doc"]["is_tagged"]
assert data["doc"]["is_parsed"]
assert data["doc"]["is_sentenced"]
assert len(data["tokens"]) == len(doc)
assert data["tokens"][0]["text"] == doc[0].text
assert data["tokens"][0]["head"] == doc[0].head.i
def test_doc2json_doc_ents(doc, model):
data = doc2json(doc, model)
ents = list(doc.ents)
assert "ents" in data
assert len(data["ents"]) == len(ents)
assert len(data["ents"]) >= 1
assert data["ents"][0]["start"] == ents[0].start
assert data["ents"][0]["end"] == ents[0].end
assert data["ents"][0]["label"] == ents[0].label_
def test_doc2json_doc_sents(doc, model):
data = doc2json(doc, model)
sents = list(doc.sents)
assert "sents" in data
assert len(data["sents"]) == len(sents)
assert len(data["sents"]) >= 1
assert data["sents"][0]["start"] == sents[0].start
assert data["sents"][0]["end"] == sents[0].end
def test_doc2json_doc_noun_chunks(doc, model):
data = doc2json(doc, model)
chunks = list(doc.noun_chunks)
assert "noun_chunks" in data
assert len(data["noun_chunks"]) == len(chunks)
assert len(data["noun_chunks"]) >= 1
assert data["noun_chunks"][0]["start"] == chunks[0].start
assert data["noun_chunks"][0]["end"] == chunks[0].end
| 2.421875 | 2 |
python/GafferArnold/ArnoldTextureBake.py | medubelko/gaffer | 1 | 4827 | <reponame>medubelko/gaffer
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreScene
import Gaffer
import GafferScene
import GafferArnold
import GafferDispatch
import GafferImage
import imath
import inspect
class ArnoldTextureBake( GafferDispatch.TaskNode ) :
class __CameraSetup( GafferScene.FilteredSceneProcessor ) :
def __init__( self, name = "__CameraSetup" ) :
GafferScene.FilteredSceneProcessor.__init__( self, name )
# Public plugs
self["cameraGroup"] = Gaffer.StringPlug( "cameraGroup", Gaffer.Plug.Direction.In, "__TEXTUREBAKE_CAMERAS" )
self["bakeDirectory"] = Gaffer.StringPlug( "bakeDirectory", Gaffer.Plug.Direction.In, "" )
self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", Gaffer.Plug.Direction.In, "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr" )
self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", Gaffer.Plug.Direction.In, 512 )
self["uvSet"] = Gaffer.StringPlug( "uvSet", Gaffer.Plug.Direction.In, "uv" )
self["udims"] = Gaffer.StringPlug( "udims", Gaffer.Plug.Direction.In, "" )
self["normalOffset"] = Gaffer.FloatPlug( "normalOffset", Gaffer.Plug.Direction.In, 0.1 )
self["aovs"] = Gaffer.StringPlug( "aovs", Gaffer.Plug.Direction.In, "beauty:rgba" )
self["tasks"] = Gaffer.IntPlug( "tasks", Gaffer.Plug.Direction.In, 1 )
self["taskIndex"] = Gaffer.IntPlug( "taskIndex", Gaffer.Plug.Direction.In, 0 )
# Output
self["renderFileList"] = Gaffer.StringVectorDataPlug( "renderFileList", Gaffer.Plug.Direction.Out, defaultValue = IECore.StringVectorData() )
self["renderFileList"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
# Private internal network
self["__udimQuery"] = GafferScene.UDIMQuery()
self["__udimQuery"]["in"].setInput( self["in"] )
self["__udimQuery"]["uvSet"].setInput( self["uvSet"] )
self["__udimQuery"]["attributes"].setValue( "bake:resolution bake:fileName" )
self["__udimQuery"]["filter"].setInput( self["filter"] )
self["__chunkedBakeInfo"] = Gaffer.CompoundObjectPlug( "__chunkedBakeInfo", Gaffer.Plug.Direction.In, IECore.CompoundObject() )
self["__chunkedBakeInfo"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["__chunkExpression"] = Gaffer.Expression()
self["__chunkExpression"].setExpression( inspect.cleandoc(
"""
import collections
import re
rawInfo = parent["__udimQuery"]["out"]
defaultFileName = parent["defaultFileName"]
defaultResolution = parent["defaultResolution"]
selectUdimsStr = parent["udims"]
# FrameList really ought to take care of this check, instead of just doing
# something obviously wrong
if re.match( ".*[0-9] +[0-9].*", selectUdimsStr ):
raise RuntimeError( "ArnoldTextureBake : Udim list must be comma separated." )
selectUdims = set( IECore.FrameList.parse( selectUdimsStr ).asList() )
allMeshes = collections.defaultdict( lambda : [] )
for udim, meshes in rawInfo.items():
if selectUdims and not int( udim ) in selectUdims:
continue
for mesh, extraAttributes in meshes.items():
resolution = defaultResolution
if "bake:resolution" in extraAttributes:
resolution = extraAttributes["bake:resolution"].value
fileName = defaultFileName
if "bake:fileName" in extraAttributes:
fileName = extraAttributes["bake:fileName"].value
allMeshes[ (fileName, udim) ].append( { "mesh" : mesh, "resolution" : resolution } )
fileList = sorted( allMeshes.keys() )
info = IECore.CompoundObject()
numTasks = min( parent["tasks"], len( fileList ) )
taskIndex = parent["taskIndex"]
if taskIndex < numTasks:
chunkStart = ( taskIndex * len( fileList ) ) / numTasks
chunkEnd = ( ( taskIndex + 1 ) * len( fileList ) ) / numTasks
dupeCount = 0
prevFileName = ""
for fileNameTemplate, udim in fileList[chunkStart:chunkEnd]:
for meshData in allMeshes[(fileNameTemplate, udim)]:
o = IECore.CompoundObject()
o["mesh"] = IECore.StringData( meshData["mesh"] )
o["udim"] = IECore.IntData( int( udim ) )
o["resolution"] = IECore.IntData( meshData["resolution"] )
udimStr = str( udim )
fileName = fileNameTemplate.replace( "<UDIM>", udimStr )
if fileName == prevFileName:
dupeCount += 1
fileName = fileName + ".layer" + str( dupeCount )
else:
prevFileName = fileName
dupeCount = 0
o["fileName"] = IECore.StringData( fileName )
name = o["mesh"].value.replace( "/", "_" ) + "." + udimStr
info[ name ] = o
parent["__chunkedBakeInfo"] = info
fileList = []
for name, i in info.items():
fileName = i["fileName"].value
for nameAndAov in parent["aovs"].strip( " " ).split( " " ):
fileList.append( i["fileName"].value.replace( "<AOV>", nameAndAov.split(":")[0] ) )
parent["renderFileList"] = IECore.StringVectorData( fileList )
"""
), "python" )
self["__parent"] = GafferScene.Parent()
self["__parent"]["parent"].setValue( "/" )
for c in ['bound', 'transform', 'attributes', 'object', 'childNames', 'setNames', 'set']:
self["__parent"]["in"][c].setInput( self["in"][c] )
self["__outputExpression"] = Gaffer.Expression()
self["__outputExpression"].setExpression( inspect.cleandoc(
"""
import IECoreScene
# Transfer all input globals except for outputs
inGlobals = parent["in"]["globals"]
outGlobals = IECore.CompoundObject()
for key, value in inGlobals.items():
if not key.startswith( "output:" ):
outGlobals[key] = value
# Make our own outputs
info = parent["__chunkedBakeInfo"]
for cameraName, i in info.items():
params = IECore.CompoundData()
fileName = i["fileName"].value
params["camera"] = IECore.StringData( "/" + parent["cameraGroup"] + "/" + cameraName )
for nameAndAov in parent["aovs"].strip( " " ).split( " " ):
tokens = nameAndAov.split( ":" )
if len( tokens ) != 2:
raise RuntimeError( "Invalid bake aov specification: %s It should contain a : between name and data." )
( aovName, aov ) = tokens
aovFileName = fileName.replace( "<AOV>", aovName )
outGlobals["output:" + cameraName + "." + aov] = IECoreScene.Output( aovFileName, "exr", aov + " RGBA", params )
parent["__parent"]["in"]["globals"] = outGlobals
"""
), "python" )
self["__camera"] = GafferScene.Camera()
self["__camera"]["projection"].setValue( "orthographic" )
self["__cameraTweaks"] = GafferScene.CameraTweaks()
self["__cameraTweaks"]["in"].setInput( self["__camera"]["out"] )
self["__cameraTweaks"]["tweaks"]["projection"] = GafferScene.TweakPlug( "projection", "uv_camera" )
self["__cameraTweaks"]["tweaks"]["resolution"] = GafferScene.TweakPlug( "resolution", imath.V2i( 0 ) )
self["__cameraTweaks"]["tweaks"]["u_offset"] = GafferScene.TweakPlug( "u_offset", 0.0 )
self["__cameraTweaks"]["tweaks"]["v_offset"] = GafferScene.TweakPlug( "v_offset", 0.0 )
self["__cameraTweaks"]["tweaks"]["mesh"] = GafferScene.TweakPlug( "mesh", "" )
self["__cameraTweaks"]["tweaks"]["uv_set"] = GafferScene.TweakPlug( "uv_set", "" )
self["__cameraTweaks"]["tweaks"]["extend_edges"] = GafferScene.TweakPlug( "extend_edges", False )
self["__cameraTweaks"]["tweaks"]["offset"] = GafferScene.TweakPlug( "offset", 0.1 )
self["__cameraTweaks"]["tweaks"]["offset"]["value"].setInput( self["normalOffset"] )
self["__cameraTweaksFilter"] = GafferScene.PathFilter()
self["__cameraTweaksFilter"]["paths"].setValue( IECore.StringVectorData( [ '/camera' ] ) )
self["__cameraTweaks"]["filter"].setInput( self["__cameraTweaksFilter"]["out"] )
self["__collectScenes"] = GafferScene.CollectScenes()
self["__collectScenes"]["sourceRoot"].setValue( "/camera" )
self["__collectScenes"]["rootNameVariable"].setValue( "collect:cameraName" )
self["__collectScenes"]["in"].setInput( self["__cameraTweaks"]["out"] )
self["__group"] = GafferScene.Group()
self["__group"]["in"][0].setInput( self["__collectScenes"]["out"] )
self["__group"]["name"].setInput( self["cameraGroup"] )
self["__parent"]["children"][0].setInput( self["__group"]["out"] )
self["__collectSceneRootsExpression"] = Gaffer.Expression()
self["__collectSceneRootsExpression"].setExpression( inspect.cleandoc(
"""
info = parent["__chunkedBakeInfo"]
parent["__collectScenes"]["rootNames"] = IECore.StringVectorData( info.keys() )
"""
), "python" )
self["__cameraSetupExpression"] = Gaffer.Expression()
self["__cameraSetupExpression"].setExpression( inspect.cleandoc(
"""
cameraName = context["collect:cameraName"]
info = parent["__chunkedBakeInfo"]
i = info[cameraName]
udimOffset = i["udim"].value - 1001
parent["__cameraTweaks"]["tweaks"]["resolution"]["value"] = imath.V2i( i["resolution"].value )
parent["__cameraTweaks"]["tweaks"]["u_offset"]["value"] = -( udimOffset % 10 )
parent["__cameraTweaks"]["tweaks"]["v_offset"]["value"] = -( udimOffset / 10 )
parent["__cameraTweaks"]["tweaks"]["mesh"]["value"] = i["mesh"].value
parent["__cameraTweaks"]["tweaks"]["uv_set"]["value"] = parent["uvSet"] if parent["uvSet"] != "uv" else ""
"""
), "python" )
self["out"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["out"].setInput( self["__parent"]["out"] )
def __init__( self, name = "ArnoldTextureBake" ) :
GafferDispatch.TaskNode.__init__( self, name )
self["in"] = GafferScene.ScenePlug()
self["filter"] = GafferScene.FilterPlug()
self["bakeDirectory"] = Gaffer.StringPlug( "bakeDirectory", defaultValue = "" )
self["defaultFileName"] = Gaffer.StringPlug( "defaultFileName", defaultValue = "${bakeDirectory}/<AOV>/<AOV>.<UDIM>.exr" )
self["defaultResolution"] = Gaffer.IntPlug( "defaultResolution", defaultValue = 512 )
self["uvSet"] = Gaffer.StringPlug( "uvSet", defaultValue = 'uv' )
self["udims"] = Gaffer.StringPlug( "udims", defaultValue = "" )
self["normalOffset"] = Gaffer.FloatPlug( "offset", defaultValue = 0.1 )
self["aovs"] = Gaffer.StringPlug( "aovs", defaultValue = 'beauty:RGBA' )
self["tasks"] = Gaffer.IntPlug( "tasks", defaultValue = 1 )
self["cleanupIntermediateFiles"] = Gaffer.BoolPlug( "cleanupIntermediateFiles", defaultValue = True )
self["applyMedianFilter"] = Gaffer.BoolPlug( "applyMedianFilter", Gaffer.Plug.Direction.In, False )
self["medianRadius"] = Gaffer.IntPlug( "medianRadius", Gaffer.Plug.Direction.In, 1 )
# Set up connection to preTasks beforehand
self["__PreTaskList"] = GafferDispatch.TaskList()
self["__PreTaskList"]["preTasks"].setInput( self["preTasks"] )
self["__CleanPreTasks"] = Gaffer.DeleteContextVariables()
self["__CleanPreTasks"].setup( GafferDispatch.TaskNode.TaskPlug() )
self["__CleanPreTasks"]["in"].setInput( self["__PreTaskList"]["task"] )
self["__CleanPreTasks"]["variables"].setValue( "BAKE_WEDGE:index BAKE_WEDGE:value_unused" )
# First, setup python commands which will dispatch a chunk of a render or image tasks as
# immediate execution once they reach the farm - this allows us to run multiple tasks in
# one farm process.
self["__RenderDispatcher"] = GafferDispatch.PythonCommand()
self["__RenderDispatcher"]["preTasks"][0].setInput( self["__CleanPreTasks"]["out"] )
self["__RenderDispatcher"]["command"].setValue( inspect.cleandoc(
"""
import GafferDispatch
# We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index,
# so we might as well print what we're doing
IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching render task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) )
d = GafferDispatch.LocalDispatcher()
d.dispatch( [ self.parent()["__bakeDirectoryContext"] ] )
"""
) )
self["__ImageDispatcher"] = GafferDispatch.PythonCommand()
self["__ImageDispatcher"]["preTasks"][0].setInput( self["__RenderDispatcher"]["task"] )
self["__ImageDispatcher"]["command"].setValue( inspect.cleandoc(
"""
import GafferDispatch
# We need to access frame and "BAKE_WEDGE:index" so that the hash of render varies with the wedge index,
# so we might as well print what we're doing
IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Dispatching image task index %i for frame %i" % ( context["BAKE_WEDGE:index"], context.getFrame() ) )
d = GafferDispatch.LocalDispatcher()
d.dispatch( [ self.parent()["__CleanUpSwitch"] ] )
"""
) )
# Connect through the dispatch settings to the render dispatcher
# ( The image dispatcher runs much quicker, and should be OK using default settings )
self["__RenderDispatcher"]["dispatcher"].setInput( self["dispatcher"] )
# Set up variables so the dispatcher knows that the render and image dispatches depend on
# the file paths ( in case they are varying in a wedge )
for redispatch in [ self["__RenderDispatcher"], self["__ImageDispatcher"] ]:
redispatch["variables"].addChild( Gaffer.NameValuePlug( "bakeDirectory", "", "bakeDirectoryVar" ) )
redispatch["variables"].addChild( Gaffer.NameValuePlug( "defaultFileName", "", "defaultFileNameVar" ) )
# Connect the variables via an expression so that get expanded ( this also means that
# if you put #### in a filename you will get per frame tasks, because the hash will depend
# on frame number )
self["__DispatchVariableExpression"] = Gaffer.Expression()
self["__DispatchVariableExpression"].setExpression( inspect.cleandoc(
"""
parent["__RenderDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"]
parent["__RenderDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"]
parent["__ImageDispatcher"]["variables"]["bakeDirectoryVar"]["value"] = parent["bakeDirectory"]
parent["__ImageDispatcher"]["variables"]["defaultFileNameVar"]["value"] = parent["defaultFileName"]
"""
), "python" )
# Wedge based on tasks into the overall number of tasks to run. Note that we don't know how
# much work each task will do until we actually run the render tasks ( this is when scene
# expansion happens ). Because we must group all tasks that write to the same file into the
# same task batch, if tasks is a large number, some tasks batches could end up empty
self["__MainWedge"] = GafferDispatch.Wedge()
self["__MainWedge"]["preTasks"][0].setInput( self["__ImageDispatcher"]["task"] )
self["__MainWedge"]["variable"].setValue( "BAKE_WEDGE:value_unused" )
self["__MainWedge"]["indexVariable"].setValue( "BAKE_WEDGE:index" )
self["__MainWedge"]["mode"].setValue( 1 )
self["__MainWedge"]["intMin"].setValue( 1 )
self["__MainWedge"]["intMax"].setInput( self["tasks"] )
self["task"].setInput( self["__MainWedge"]["task"] )
self["task"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
# Now set up the render tasks. This involves doing the actual rendering, and triggering the
# output of the file list index file.
# First get rid of options from the upstream scene that could mess up the bake
self["__OptionOverrides"] = GafferScene.StandardOptions()
self["__OptionOverrides"]["in"].setInput( self["in"] )
self["__OptionOverrides"]["options"]["pixelAspectRatio"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["overscan"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["renderCropWindow"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["cameraBlur"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["transformBlur"]["enabled"].setValue( True )
self["__OptionOverrides"]["options"]["deformationBlur"]["enabled"].setValue( True )
self["__CameraSetup"] = self.__CameraSetup()
self["__CameraSetup"]["in"].setInput( self["__OptionOverrides"]["out"] )
self["__CameraSetup"]["filter"].setInput( self["filter"] )
self["__CameraSetup"]["defaultFileName"].setInput( self["defaultFileName"] )
self["__CameraSetup"]["defaultResolution"].setInput( self["defaultResolution"] )
self["__CameraSetup"]["uvSet"].setInput( self["uvSet"] )
self["__CameraSetup"]["aovs"].setInput( self["aovs"] )
self["__CameraSetup"]["normalOffset"].setInput( self["normalOffset"] )
self["__CameraSetup"]["tasks"].setInput( self["tasks"] )
self["__CameraSetup"]["udims"].setInput( self["udims"] )
self["__Expression"] = Gaffer.Expression()
self["__Expression"].setExpression( 'parent["__CameraSetup"]["taskIndex"] = context.get( "BAKE_WEDGE:index", 0 )', "python" )
self["__indexFilePath"] = Gaffer.StringPlug()
self["__indexFilePath"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["__IndexFileExpression"] = Gaffer.Expression()
self["__IndexFileExpression"].setExpression( inspect.cleandoc(
"""
import os
parent["__indexFilePath"] = os.path.join( parent["bakeDirectory"], "BAKE_FILE_INDEX_" +
str( context.get("BAKE_WEDGE:index", 0 ) ) + ".####.txt" )
"""
), "python" )
self["__outputIndexCommand"] = GafferDispatch.PythonCommand()
self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "bakeDirectory", Gaffer.StringPlug() ) )
self["__outputIndexCommand"]["variables"][0]["value"].setInput( self["bakeDirectory"] )
self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "indexFilePath", Gaffer.StringPlug() ) )
self["__outputIndexCommand"]["variables"][1]["value"].setInput( self["__indexFilePath"] )
self["__outputIndexCommand"]["variables"].addChild( Gaffer.NameValuePlug( "fileList", Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() ) ) )
self["__outputIndexCommand"]["variables"][2]["value"].setInput( self["__CameraSetup"]["renderFileList"] )
self["__outputIndexCommand"]["command"].setValue( inspect.cleandoc(
"""
import os
import distutils.dir_util
# Ensure path exists
distutils.dir_util.mkpath( variables["bakeDirectory"] )
f = open( variables["indexFilePath"], "w" )
f.writelines( [ i + "\\n" for i in sorted( variables["fileList"] ) ] )
f.close()
IECore.msg( IECore.MessageHandler.Level.Info, "Bake Process", "Wrote list of bake files for this chunk to " + variables["indexFilePath"] )
"""
) )
self["__arnoldRender"] = GafferArnold.ArnoldRender()
self["__arnoldRender"]["preTasks"][0].setInput( self["__outputIndexCommand"]["task"] )
self["__arnoldRender"]["dispatcher"]["immediate"].setValue( True )
self["__arnoldRender"]["in"].setInput( self["__CameraSetup"]["out"] )
self["__bakeDirectoryContext"] = GafferDispatch.TaskContextVariables()
self["__bakeDirectoryContext"]["variables"].addChild( Gaffer.NameValuePlug( "bakeDirectory", Gaffer.StringPlug() ) )
self["__bakeDirectoryContext"]["variables"][0]["value"].setInput( self["bakeDirectory"] )
self["__bakeDirectoryContext"]["preTasks"][0].setInput( self["__arnoldRender"]["task"] )
# Now set up the image tasks. This involves merging all layers for a UDIM, filling in the
# background, writing out this image, converting it to tx, and optionally deleting all the exrs
self["__imageList"] = Gaffer.CompoundObjectPlug( "__imageList", defaultValue = IECore.CompoundObject() )
self["__imageList"].setFlags( Gaffer.Plug.Flags.Serialisable, False )
self["__ImageReader"] = GafferImage.ImageReader()
self["__CurInputFileExpression"] = Gaffer.Expression()
self["__CurInputFileExpression"].setExpression( inspect.cleandoc(
"""
l = parent["__imageList"]
outFile = context["wedge:outFile"]
loopIndex = context[ "loop:index" ]
parent["__ImageReader"]["fileName"] = l[outFile][ loopIndex ]
"""
), "python" )
# Find the max size of any input file
self["__SizeLoop"] = Gaffer.LoopComputeNode()
self["__SizeLoop"].setup( Gaffer.IntPlug() )
self["__SizeMaxExpression"] = Gaffer.Expression()
self["__SizeMaxExpression"].setExpression( inspect.cleandoc(
"""
f = parent["__ImageReader"]["out"]["format"]
parent["__SizeLoop"]["next"] = max( f.width(), parent["__SizeLoop"]["previous"] )
"""
), "python" )
# Loop over all input files for this output file, and merge them all together
self["__ImageLoop"] = Gaffer.LoopComputeNode()
self["__ImageLoop"].setup( GafferImage.ImagePlug() )
self["__NumInputsForCurOutputExpression"] = Gaffer.Expression()
self["__NumInputsForCurOutputExpression"].setExpression( inspect.cleandoc(
"""
l = parent["__imageList"]
outFile = context["wedge:outFile"]
numInputs = len( l[outFile] )
parent["__ImageLoop"]["iterations"] = numInputs
parent["__SizeLoop"]["iterations"] = numInputs
"""
), "python" )
self["__Resize"] = GafferImage.Resize()
self["__Resize"]["format"]["displayWindow"]["min"].setValue( imath.V2i( 0, 0 ) )
self["__Resize"]['format']["displayWindow"]["max"]["x"].setInput( self["__SizeLoop"]["out"] )
self["__Resize"]['format']["displayWindow"]["max"]["y"].setInput( self["__SizeLoop"]["out"] )
self["__Resize"]['in'].setInput( self["__ImageReader"]["out"] )
self["__Merge"] = GafferImage.Merge()
self["__Merge"]["in"][0].setInput( self["__Resize"]["out"] )
self["__Merge"]["in"][1].setInput( self["__ImageLoop"]["previous"] )
self["__Merge"]["operation"].setValue( GafferImage.Merge.Operation.Add )
self["__ImageLoop"]["next"].setInput( self["__Merge"]["out"] )
# Write out the combined image, so we can immediately read it back in
# This is just because we're doing enough image processing that we
# could saturate the cache, and Gaffer wouldn't know that this is
# the important result to keep
self["__ImageIntermediateWriter"] = GafferImage.ImageWriter()
self["__ImageIntermediateWriter"]["in"].setInput( self["__ImageLoop"]["out"] )
self["__ImageIntermediateReader"] = GafferImage.ImageReader()
# Now that we've merged everything together, we can use a BleedFill to fill in the background,
# so that texture filtering across the edges will pull in colors that are at least reasonable.
self["__BleedFill"] = GafferImage.BleedFill()
self["__BleedFill"]["in"].setInput( self["__ImageIntermediateReader"]["out"] )
self["__Median"] = GafferImage.Median()
self["__Median"]["in"].setInput( self["__BleedFill"]["out"] )
self["__Median"]["enabled"].setInput( self["applyMedianFilter"] )
self["__Median"]["radius"]["x"].setInput( self["medianRadius"] )
self["__Median"]["radius"]["y"].setInput( self["medianRadius"] )
# Write out the result
self["__ImageWriter"] = GafferImage.ImageWriter()
self["__ImageWriter"]["in"].setInput( self["__Median"]["out"] )
self["__ImageWriter"]["preTasks"][0].setInput( self["__ImageIntermediateWriter"]["task"] )
# Convert result to texture
self["__ConvertCommand"] = GafferDispatch.SystemCommand()
# We shouldn't need a sub-shell and this prevents S.I.P on the Mac from
# blocking the dylibs loaded by maketx.
self["__ConvertCommand"]["shell"].setValue( False )
self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug( "inFile", IECore.StringData(), "member1" ) )
self["__ConvertCommand"]["substitutions"].addChild( Gaffer.NameValuePlug( "outFile", IECore.StringData(), "member1" ) )
self["__ConvertCommand"]["preTasks"][0].setInput( self["__ImageWriter"]["task"] )
self["__ConvertCommand"]["command"].setValue( 'maketx --wrap clamp {inFile} -o {outFile}' )
self["__CommandSetupExpression"] = Gaffer.Expression()
self["__CommandSetupExpression"].setExpression( inspect.cleandoc(
"""
outFileBase = context["wedge:outFile"]
intermediateExr = outFileBase + ".intermediate.exr"
parent["__ImageIntermediateWriter"]["fileName"] = intermediateExr
parent["__ImageIntermediateReader"]["fileName"] = intermediateExr
tmpExr = outFileBase + ".tmp.exr"
parent["__ImageWriter"]["fileName"] = tmpExr
parent["__ConvertCommand"]["substitutions"]["member1"]["value"] = tmpExr
parent["__ConvertCommand"]["substitutions"]["member2"]["value"] = outFileBase + ".tx"
"""
), "python" )
self["__ImageWedge"] = GafferDispatch.Wedge()
self["__ImageWedge"]["preTasks"][0].setInput( self["__ConvertCommand"]["task"] )
self["__ImageWedge"]["variable"].setValue( 'wedge:outFile' )
self["__ImageWedge"]["indexVariable"].setValue( 'wedge:outFileIndex' )
self["__ImageWedge"]["mode"].setValue( int( GafferDispatch.Wedge.Mode.StringList ) )
self["__CleanUpCommand"] = GafferDispatch.PythonCommand()
self["__CleanUpCommand"]["preTasks"][0].setInput( self["__ImageWedge"]["task"] )
self["__CleanUpCommand"]["variables"].addChild( Gaffer.NameValuePlug( "filesToDelete", Gaffer.StringVectorDataPlug( defaultValue = IECore.StringVectorData() ), "member1" ) )
self["__CleanUpCommand"]["command"].setValue( inspect.cleandoc(
"""
import os
for tmpFile in variables["filesToDelete"]:
os.remove( tmpFile )
"""
) )
self["__CleanUpExpression"] = Gaffer.Expression()
self["__CleanUpExpression"].setExpression( inspect.cleandoc(
"""
imageList = parent["__imageList"]
toDelete = []
for outFileBase, inputExrs in imageList.items():
tmpExr = outFileBase + ".tmp.exr"
intermediateExr = outFileBase + ".intermediate.exr"
toDelete.extend( inputExrs )
toDelete.append( tmpExr )
toDelete.append( intermediateExr )
toDelete.append( parent["__indexFilePath"] )
parent["__CleanUpCommand"]["variables"]["member1"]["value"] = IECore.StringVectorData( toDelete )
"""
), "python" )
self["__CleanUpSwitch"] = GafferDispatch.TaskSwitch()
self["__CleanUpSwitch"]["preTasks"][0].setInput( self["__ImageWedge"]["task"] )
self["__CleanUpSwitch"]["preTasks"][1].setInput( self["__CleanUpCommand"]["task"] )
self["__CleanUpSwitch"]["index"].setInput( self["cleanupIntermediateFiles"] )
# Set up the list of input image files to process, and the corresponding list of
# output files to wedge over
self["__ImageSetupExpression"] = Gaffer.Expression()
self["__ImageSetupExpression"].setExpression( inspect.cleandoc(
"""
f = open( parent["__indexFilePath"], "r" )
fileList = f.read().splitlines()
fileDict = {}
for i in fileList:
rootName = i.rsplit( ".exr", 1 )[0]
if rootName in fileDict:
fileDict[ rootName ].append( i )
else:
fileDict[ rootName ] = IECore.StringVectorData( [i] )
parent["__imageList"] = IECore.CompoundObject( fileDict )
parent["__ImageWedge"]["strings"] = IECore.StringVectorData( fileDict.keys() )
"""
), "python" )
IECore.registerRunTimeTyped( ArnoldTextureBake, typeName = "GafferArnold::ArnoldTextureBake" )
| 1.023438 | 1 |
aea/protocols/generator/common.py | valory-xyz/agents-aea | 0 | 4828 | <filename>aea/protocols/generator/common.py
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains utility code for generator modules."""
import inspect
import os
import re
import shutil
import subprocess # nosec
import sys
import tempfile
from pathlib import Path
from typing import Tuple
from aea.configurations.base import ProtocolSpecification
from aea.configurations.constants import (
DEFAULT_PROTOCOL_CONFIG_FILE,
PACKAGES,
PROTOCOL_LANGUAGE_JS,
PROTOCOL_LANGUAGE_PYTHON,
)
from aea.configurations.loader import ConfigLoader
from aea.helpers.io import open_file
SPECIFICATION_PRIMITIVE_TYPES = ["pt:bytes", "pt:int", "pt:float", "pt:bool", "pt:str"]
SPECIFICATION_COMPOSITIONAL_TYPES = [
"pt:set",
"pt:list",
"pt:dict",
"pt:union",
"pt:optional",
]
PYTHON_COMPOSITIONAL_TYPES = [
"FrozenSet",
"Tuple",
"Dict",
"Union",
"Optional",
]
MESSAGE_IMPORT = "from aea.protocols.base import Message"
SERIALIZER_IMPORT = "from aea.protocols.base import Serializer"
PATH_TO_PACKAGES = PACKAGES
INIT_FILE_NAME = "__init__.py"
PROTOCOL_YAML_FILE_NAME = DEFAULT_PROTOCOL_CONFIG_FILE
MESSAGE_DOT_PY_FILE_NAME = "message.py"
DIALOGUE_DOT_PY_FILE_NAME = "dialogues.py"
CUSTOM_TYPES_DOT_PY_FILE_NAME = "custom_types.py"
SERIALIZATION_DOT_PY_FILE_NAME = "serialization.py"
PYTHON_TYPE_TO_PROTO_TYPE = {
"bytes": "bytes",
"int": "int32",
"float": "float",
"bool": "bool",
"str": "string",
}
CURRENT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
ISORT_CONFIGURATION_FILE = os.path.join(CURRENT_DIR, "isort.cfg")
ISORT_CLI_ARGS = [
"--settings-path",
ISORT_CONFIGURATION_FILE,
"--quiet",
]
PROTOLINT_CONFIGURATION_FILE_NAME = "protolint.yaml"
PROTOLINT_CONFIGURATION = """lint:
rules:
remove:
- MESSAGE_NAMES_UPPER_CAMEL_CASE
- ENUM_FIELD_NAMES_ZERO_VALUE_END_WITH
- PACKAGE_NAME_LOWER_CASE
- REPEATED_FIELD_NAMES_PLURALIZED
- FIELD_NAMES_LOWER_SNAKE_CASE"""
PROTOLINT_INDENTATION_ERROR_STR = "incorrect indentation style"
PROTOLINT_ERROR_WHITELIST = [PROTOLINT_INDENTATION_ERROR_STR]
def _to_camel_case(text: str) -> str:
"""
Convert a text in snake_case format into the CamelCase format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return "".join(word.title() for word in text.split("_"))
def _camel_case_to_snake_case(text: str) -> str:
"""
Convert a text in CamelCase format into the snake_case format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
def _match_brackets(text: str, index_of_open_bracket: int) -> int:
"""
Give the index of the matching close bracket for the opening bracket at 'index_of_open_bracket' in the input 'text'.
:param text: the text containing the brackets.
:param index_of_open_bracket: the index of the opening bracket.
:return: the index of the matching closing bracket (if any).
:raises SyntaxError if there are no matching closing bracket.
"""
if text[index_of_open_bracket] != "[":
raise SyntaxError(
"Index {} in 'text' is not an open bracket '['. It is {}".format(
index_of_open_bracket,
text[index_of_open_bracket],
)
)
open_bracket_stack = []
for index in range(index_of_open_bracket, len(text)):
if text[index] == "[":
open_bracket_stack.append(text[index])
elif text[index] == "]":
open_bracket_stack.pop()
if not open_bracket_stack:
return index
raise SyntaxError(
"No matching closing bracket ']' for the opening bracket '[' at {} "
+ str(index_of_open_bracket)
)
def _has_matched_brackets(text: str) -> bool:
"""
Evaluate whether every opening bracket '[' in the 'text' has a matching closing bracket ']'.
:param text: the text.
:return: Boolean result, and associated message.
"""
open_bracket_stack = []
for index, _ in enumerate(text):
if text[index] == "[":
open_bracket_stack.append(index)
elif text[index] == "]":
if len(open_bracket_stack) == 0:
return False
open_bracket_stack.pop()
return len(open_bracket_stack) == 0
def _get_sub_types_of_compositional_types(compositional_type: str) -> Tuple[str, ...]:
"""
Extract the sub-types of compositional types.
This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]).
:param compositional_type: the compositional type string whose sub-types are to be extracted.
:return: tuple containing all extracted sub-types.
"""
sub_types_list = list()
for valid_compositional_type in (
SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES
):
if compositional_type.startswith(valid_compositional_type):
inside_string = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
while inside_string != "":
do_not_add = False
if inside_string.find(",") == -1: # No comma; this is the last sub-type
provisional_sub_type = inside_string.strip()
if (
provisional_sub_type == "..."
): # The sub-string is ... used for Tuple, e.g. Tuple[int, ...]
do_not_add = True
else:
sub_type = provisional_sub_type
inside_string = ""
else: # There is a comma; this MAY not be the last sub-type
sub_string_until_comma = inside_string[
: inside_string.index(",")
].strip()
if (
sub_string_until_comma.find("[") == -1
): # No open brackets; this is a primitive type and NOT the last sub-type
sub_type = sub_string_until_comma
inside_string = inside_string[
inside_string.index(",") + 1 :
].strip()
else: # There is an open bracket'['; this is a compositional type
try:
closing_bracket_index = _match_brackets(
inside_string, inside_string.index("[")
)
except SyntaxError:
raise SyntaxError(
"Bad formatting. No matching close bracket ']' for the open bracket at {}".format(
inside_string[
: inside_string.index("[") + 1
].strip()
)
)
sub_type = inside_string[: closing_bracket_index + 1].strip()
the_rest_of_inside_string = inside_string[
closing_bracket_index + 1 :
].strip()
if (
the_rest_of_inside_string.find(",") == -1
): # No comma; this is the last sub-type
inside_string = the_rest_of_inside_string.strip()
else: # There is a comma; this is not the last sub-type
inside_string = the_rest_of_inside_string[
the_rest_of_inside_string.index(",") + 1 :
].strip()
if not do_not_add:
sub_types_list.append(sub_type)
return tuple(sub_types_list)
raise SyntaxError(
"{} is not a valid compositional type.".format(compositional_type)
)
def _union_sub_type_to_protobuf_variable_name(
content_name: str, content_type: str
) -> str:
"""
Given a content of type union, create a variable name for its sub-type for protobuf.
:param content_name: the name of the content
:param content_type: the sub-type of a union type
:return: The variable name
"""
if content_type.startswith("FrozenSet"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "set_of_{}".format(sub_type)
elif content_type.startswith("Tuple"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "list_of_{}".format(sub_type)
elif content_type.startswith("Dict"):
sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]
sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]
expanded_type_str = "dict_of_{}_{}".format(sub_type_1, sub_type_2)
else:
expanded_type_str = content_type
protobuf_variable_name = "{}_type_{}".format(content_name, expanded_type_str)
return protobuf_variable_name
def _python_pt_or_ct_type_to_proto_type(content_type: str) -> str:
"""
Convert a PT or CT from python to their protobuf equivalent.
:param content_type: the python type
:return: The protobuf equivalent
"""
if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys():
proto_type = PYTHON_TYPE_TO_PROTO_TYPE[content_type]
else:
proto_type = content_type
return proto_type
def _includes_custom_type(content_type: str) -> bool:
"""
Evaluate whether a content type is a custom type or has a custom type as a sub-type.
:param content_type: the content type
:return: Boolean result
"""
if content_type.startswith("Optional"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
result = _includes_custom_type(sub_type)
elif content_type.startswith("Union"):
sub_types = _get_sub_types_of_compositional_types(content_type)
result = False
for sub_type in sub_types:
if _includes_custom_type(sub_type):
result = True
break
elif (
content_type.startswith("FrozenSet")
or content_type.startswith("Tuple")
or content_type.startswith("Dict")
or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()
):
result = False
else:
result = True
return result
def is_installed(programme: str) -> bool:
"""
Check whether a programme is installed on the system.
:param programme: the name of the programme.
:return: True if installed, False otherwise
"""
res = shutil.which(programme)
return res is not None
def base_protolint_command() -> str:
"""
Return the base protolint command.
:return: The base protolint command
"""
if sys.platform.startswith("win"):
protolint_base_cmd = "protolint" # pragma: nocover
else:
protolint_base_cmd = "PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint"
return protolint_base_cmd
def check_prerequisites() -> None:
"""Check whether a programme is installed on the system."""
# check black code formatter is installed
if not is_installed("black"):
raise FileNotFoundError(
"Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html"
)
# check isort code formatter is installed
if not is_installed("isort"):
raise FileNotFoundError(
"Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort"
)
# check protolint code formatter is installed
if subprocess.call(f"{base_protolint_command()} version", shell=True) != 0: # nosec
raise FileNotFoundError(
"Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint."
)
# check protocol buffer compiler is installed
if not is_installed("protoc"):
raise FileNotFoundError(
"Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/"
)
def get_protoc_version() -> str:
"""Get the protoc version used."""
result = subprocess.run( # nosec
["protoc", "--version"], stdout=subprocess.PIPE, check=True
)
result_str = result.stdout.decode("utf-8").strip("\n").strip("\r")
return result_str
def load_protocol_specification(specification_path: str) -> ProtocolSpecification:
"""
Load a protocol specification.
:param specification_path: path to the protocol specification yaml file.
:return: A ProtocolSpecification object
"""
config_loader = ConfigLoader(
"protocol-specification_schema.json", ProtocolSpecification
)
protocol_spec = config_loader.load_protocol_specification(
open_file(specification_path)
)
return protocol_spec
def _create_protocol_file(
path_to_protocol_package: str, file_name: str, file_content: str
) -> None:
"""
Create a file in the generated protocol package.
:param path_to_protocol_package: path to the file
:param file_name: the name of the file
:param file_content: the content of the file
"""
pathname = os.path.join(path_to_protocol_package, file_name)
with open_file(pathname, "w") as file:
file.write(file_content)
def try_run_black_formatting(path_to_protocol_package: str) -> None:
"""
Run Black code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "black", path_to_protocol_package, "--quiet"],
check=True,
)
def try_run_isort_formatting(path_to_protocol_package: str) -> None:
"""
Run Isort code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "isort", *ISORT_CLI_ARGS, path_to_protocol_package],
check=True,
)
def try_run_protoc(
path_to_generated_protocol_package: str,
name: str,
language: str = PROTOCOL_LANGUAGE_PYTHON,
) -> None:
"""
Run 'protoc' protocol buffer compiler via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
"""
# for closure-styled imports for JS, comment the first line and uncomment the second
js_commonjs_import_option = (
"import_style=commonjs,binary:" if language == PROTOCOL_LANGUAGE_JS else ""
)
language_part_of_the_command = f"--{language}_out={js_commonjs_import_option}{path_to_generated_protocol_package}"
subprocess.run( # nosec
[
"protoc",
f"-I={path_to_generated_protocol_package}",
language_part_of_the_command,
f"{path_to_generated_protocol_package}/{name}.proto",
],
stderr=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
)
def try_run_protolint(path_to_generated_protocol_package: str, name: str) -> None:
"""
Run 'protolint' linter via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
"""
# path to proto file
path_to_proto_file = os.path.join(
path_to_generated_protocol_package,
f"{name}.proto",
)
# Dump protolint configuration into a temporary file
temp_dir = tempfile.mkdtemp()
path_to_configuration_in_tmp_file = Path(
temp_dir, PROTOLINT_CONFIGURATION_FILE_NAME
)
with open_file(path_to_configuration_in_tmp_file, "w") as file:
file.write(PROTOLINT_CONFIGURATION)
# Protolint command
cmd = f'{base_protolint_command()} lint -config_path={path_to_configuration_in_tmp_file} -fix "{path_to_proto_file}"'
# Execute protolint command
subprocess.run( # nosec
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
shell=True,
)
# Delete temporary configuration file
shutil.rmtree(temp_dir) # pragma: no cover
def check_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str
) -> Tuple[bool, str]:
"""
Check whether a protocol buffer schema file is valid.
Validation is via trying to compile the schema file. If successfully compiled it is valid, otherwise invalid.
If valid, return True and a 'protobuf file is valid' message, otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name)
os.remove(os.path.join(path_to_generated_protocol_package, name + "_pb2.py"))
return True, "protobuf file is valid"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def compile_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str, language: str
) -> Tuple[bool, str]:
"""
Compile a protocol buffer schema file using protoc.
If successfully compiled, return True and a success message,
otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name, language)
return True, "protobuf schema successfully compiled"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def apply_protolint(path_to_proto_file: str, name: str) -> Tuple[bool, str]:
"""
Apply protolint linter to a protocol buffer schema file.
If no output, return True and a success message,
otherwise return False and the output shown by the linter
(minus the indentation suggestions which are automatically fixed by protolint).
:param path_to_proto_file: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protolint(path_to_proto_file, name)
return True, "protolint has no output"
except subprocess.CalledProcessError as e:
lines_to_show = []
for line in e.stderr.split("\n"):
to_show = True
for whitelist_error_str in PROTOLINT_ERROR_WHITELIST:
if whitelist_error_str in line:
to_show = False
break
if to_show:
lines_to_show.append(line)
error_message = "\n".join(lines_to_show)
return False, error_message
| 1.6875 | 2 |
tests/unit/python/foglamp/services/core/api/test_backup_restore.py | vaibhav-ScaleDB/FogLAMP | 0 | 4829 | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import os
import asyncio
import json
from unittest.mock import MagicMock, patch
from collections import Counter
from aiohttp import web
import pytest
from foglamp.services.core import routes
from foglamp.services.core import connect
from foglamp.plugins.storage.common.backup import Backup
from foglamp.plugins.storage.common.restore import Restore
from foglamp.plugins.storage.common import exceptions
from foglamp.services.core.api import backup_restore
from foglamp.common.storage_client.storage_client import StorageClientAsync
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@asyncio.coroutine
def mock_coro(*args, **kwargs):
if len(args) > 0:
return args[0]
else:
return ""
@pytest.allure.feature("unit")
@pytest.allure.story("api", "backup")
class TestBackup:
"""Unit test the Backup functionality
"""
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
@pytest.mark.parametrize("input_data, expected", [
(1, "RUNNING"),
(2, "COMPLETED"),
(3, "CANCELED"),
(4, "INTERRUPTED"),
(5, "FAILED"),
(6, "RESTORED"),
(7, "UNKNOWN")
])
def test_get_status(self, input_data, expected):
assert expected == backup_restore._get_status(input_data)
@pytest.mark.parametrize("request_params", [
'',
'?limit=1',
'?skip=1',
'?status=completed',
'?status=failed',
'?status=restored&skip=10',
'?status=running&limit=1',
'?status=canceled&limit=10&skip=0',
'?status=interrupted&limit=&skip=',
'?status=&limit=&skip='
])
async def test_get_backups(self, client, request_params):
storage_client_mock = MagicMock(StorageClientAsync)
response = [{'file_name': '1.dump',
'id': 1, 'type': '1', 'status': '2',
'ts': '2018-02-15 15:18:41.821978+05:30',
'exit_code': '0'}]
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_all_backups', return_value=mock_coro(response)):
resp = await client.get('/foglamp/backup{}'.format(request_params))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 1 == len(json_response['backups'])
assert Counter({"id", "date", "status"}) == Counter(json_response['backups'][0].keys())
@pytest.mark.parametrize("request_params, response_code, response_message", [
('?limit=invalid', 400, "Limit must be a positive integer"),
('?limit=-1', 400, "Limit must be a positive integer"),
('?skip=invalid', 400, "Skip/Offset must be a positive integer"),
('?skip=-1', 400, "Skip/Offset must be a positive integer"),
('?status=BLA', 400, "'BLA' is not a valid status")
])
async def test_get_backups_bad_data(self, client, request_params, response_code, response_message):
resp = await client.get('/foglamp/backup{}'.format(request_params))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backups_exceptions(self, client):
with patch.object(connect, 'get_storage_async', return_value=Exception):
resp = await client.get('/foglamp/backup')
assert 500 == resp.status
assert "Internal Server Error" == resp.reason
async def test_create_backup(self, client):
async def mock_create():
return "running_or_failed"
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'create_backup', return_value=mock_create()):
resp = await client.post('/foglamp/backup')
assert 200 == resp.status
assert '{"status": "running_or_failed"}' == await resp.text()
async def test_create_backup_exception(self, client):
with patch.object(connect, 'get_storage_async', return_value=Exception):
with patch.object(Backup, 'create_backup', return_value=Exception):
resp = await client.post('/foglamp/backup')
assert 500 == resp.status
assert "Internal Server Error" == resp.reason
async def test_get_backup_details(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'id': 1, 'file_name': '1.dump', 'ts': '2018-02-15 15:18:41.821978+05:30',
'status': '2', 'type': '1', 'exit_code': '0'}
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', return_value=mock_coro(response)):
resp = await client.get('/foglamp/backup/{}'.format(1))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert 3 == len(json_response)
assert Counter({"id", "date", "status"}) == Counter(json_response.keys())
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_get_backup_details_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', side_effect=input_exception):
resp = await client.get('/foglamp/backup/{}'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backup_details_bad_data(self, client):
resp = await client.get('/foglamp/backup/{}'.format('BLA'))
assert 400 == resp.status
assert "Invalid backup id" == resp.reason
async def test_delete_backup(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'delete_backup', return_value=mock_coro(None)):
resp = await client.delete('/foglamp/backup/{}'.format(1))
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'message': 'Backup deleted successfully'} == json_response
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_delete_backup_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'delete_backup', side_effect=input_exception):
resp = await client.delete('/foglamp/backup/{}'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_delete_backup_bad_data(self, client):
resp = await client.delete('/foglamp/backup/{}'.format('BLA'))
assert 400 == resp.status
assert "Invalid backup id" == resp.reason
async def test_get_backup_status(self, client):
resp = await client.get('/foglamp/backup/status')
assert 200 == resp.status
result = await resp.text()
json_response = json.loads(result)
assert {'backupStatus': [{'index': 1, 'name': 'RUNNING'},
{'index': 2, 'name': 'COMPLETED'},
{'index': 3, 'name': 'CANCELED'},
{'index': 4, 'name': 'INTERRUPTED'},
{'index': 5, 'name': 'FAILED'},
{'index': 6, 'name': 'RESTORED'}]} == json_response
@pytest.mark.parametrize("input_exception, response_code, response_message", [
(ValueError, 400, "Invalid backup id"),
(exceptions.DoesNotExist, 404, "Backup id 8 does not exist"),
(Exception, 500, "Internal Server Error")
])
async def test_get_backup_download_exceptions(self, client, input_exception, response_code, response_message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', side_effect=input_exception):
resp = await client.get('/foglamp/backup/{}/download'.format(8))
assert response_code == resp.status
assert response_message == resp.reason
async def test_get_backup_download(self, client):
storage_client_mock = MagicMock(StorageClientAsync)
response = {'id': 1, 'file_name': '/usr/local/foglamp/data/backup/foglamp.db', 'ts': '2018-02-15 15:18:41',
'status': '2', 'type': '1'}
with patch("aiohttp.web.FileResponse", return_value=web.FileResponse(path=os.path.realpath(__file__))) as file_res:
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Backup, 'get_backup_details', return_value=mock_coro(response)) as patch_backup_detail:
with patch('tarfile.open'):
resp = await client.get('/foglamp/backup/{}/download'.format(1))
assert 200 == resp.status
assert 'OK' == resp.reason
patch_backup_detail.assert_called_once_with(1)
assert 1 == file_res.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("api", "restore")
class TestRestore:
"""Unit test the Restore functionality"""
@pytest.fixture
def client(self, loop, test_client):
app = web.Application(loop=loop)
# fill the routes table
routes.setup(app)
return loop.run_until_complete(test_client(app))
async def test_restore_backup(self, client):
async def mock_restore():
return "running"
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Restore, 'restore_backup', return_value=mock_restore()):
resp = await client.put('/foglamp/backup/{}/restore'.format(1))
assert 200 == resp.status
r = await resp.text()
assert {'status': 'running'} == json.loads(r)
@pytest.mark.parametrize("backup_id, input_exception, code, message", [
(8, exceptions.DoesNotExist, 404, "Backup with 8 does not exist"),
(2, Exception, 500, "Internal Server Error"),
('blah', ValueError, 400, 'Invalid backup id')
])
async def test_restore_backup_exceptions(self, client, backup_id, input_exception, code, message):
storage_client_mock = MagicMock(StorageClientAsync)
with patch.object(connect, 'get_storage_async', return_value=storage_client_mock):
with patch.object(Restore, 'restore_backup', side_effect=input_exception):
resp = await client.put('/foglamp/backup/{}/restore'.format(backup_id))
assert code == resp.status
assert message == resp.reason
| 1.96875 | 2 |
pyemits/core/preprocessing/dimensional_reduction.py | thompson0012/PyEmits | 6 | 4830 | <reponame>thompson0012/PyEmits<gh_stars>1-10
"""
Why need dimensional reduction
The following is the use of dimensionality reduction in the data set:
• As data dimensions continue to decrease, the space required for data storage will also decrease.
• Low-dimensional data helps reduce calculation/training time.
• Some algorithms tend to perform poorly on high-dimensional data, and dimensionality reduction can improve algorithm availability.
• Dimensionality reduction can solve the problem of multicollinearity by removing redundant features. For example, we have two variables: "On the treadmill for a period of time
Time spent” and “calorie consumption”. These two variables are highly correlated. The longer the time spent on the treadmill, the more calories burned.
Naturally, the more. Therefore, it does not make much sense to store these two data at the same time, just one is enough.
• Dimensionality reduction helps data visualization. As mentioned earlier, if the dimensionality of the data is very high, the visualization will become quite difficult, while drawing two-dimensional three-dimensional
The graph of dimensional data is very simple.
Common dimensional reduction techniques:
1. missing value ratio
2. low variance filter
3. high correlation filter
4. random forest
5. backward feature elimination
6. forward feature selection
7. factor analysis
8. principle components analysis
9. independent component analysis
10. IOSMAP
11. t-SNE
12. UMAP
"""
random_state = 0
from enum import Enum
class FeatureSelection(Enum):
@classmethod
def missing_value_ratio(cls, threshold):
return
@classmethod
def low_variance_filter(cls, threshold):
return
@classmethod
def high_correlation_filter(cls, threshold):
return
@classmethod
def random_forest(cls):
from sklearn.ensemble import RandomForestRegressor
RF = RandomForestRegressor()
RF.fit()
RF.feature_importances_
return
@classmethod
def backward_feature_extraction(cls):
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
clf = LinearRegression()
rfe = RFE(clf, 10)
rfe = rfe.fit_transform()
return
@classmethod
def forward_feature_extraction(cls):
from sklearn.feature_selection import f_regression
ffs = f_regression()
return
class ProjectionBased(Enum):
@classmethod
def isomap(cls):
from sklearn.manifold import Isomap
ISOMAP = Isomap(neighbors_algorithm=5, n_components=3, n_jobs=-1)
ISOMAP.fit_transform()
return
@classmethod
def tsne(cls):
from sklearn.manifold import TSNE
tsne = TSNE(n_components=3, n_iter=300)
tsne.fit_transform()
return
@classmethod
def umap(cls):
# install umap
return
class ComponentsFactorsBased(Enum):
@classmethod
def factor_analysis(cls):
from sklearn.decomposition import FactorAnalysis
FA = FactorAnalysis(n_components=3)
FA.fit_transform()
return
@classmethod
def pca(cls):
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit_transform()
return
@classmethod
def ica(cls):
from sklearn.decomposition import FastICA
ICA = FastICA(n_components=3)
ICA.fit_transform()
return
@classmethod
def lda(cls, solver='svd', n_components=3):
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
LDA = LinearDiscriminantAnalysis(solver=solver, n_components=n_components)
LDA.fit_transform()
return
| 4.21875 | 4 |
sample_project/exam/exam.py | pcse/gitlab_tools | 0 | 4831 | <gh_stars>0
"""
These methods can be called inside WebCAT to determine which tests are loaded
for a given section/exam pair. This allows a common WebCAT submission site to
support different project tests
"""
def section():
# Instructor section (instructor to change before distribution)
#return 8527
#return 8528
return 8529
def exam():
# A or B exam (instructor to change to match specific project distribution
return "A"
#return "B"
| 2.015625 | 2 |
scrapy/clarinetear/spiders/pagina12.py | ramiror/clarinete | 0 | 4832 | <reponame>ramiror/clarinete<gh_stars>0
from datetime import datetime
import scrapy
import lxml
from lxml.html.clean import Cleaner
import re
SOURCE = 'Página 12'
LANGUAGE = 'es'
cleaner = Cleaner(allow_tags=['p', 'br', 'b', 'a', 'strong', 'i', 'em'])
class Pagina12Spider(scrapy.Spider):
name = 'pagina12'
allowed_domains = ['www.pagina12.com.ar']
start_urls = ['https://www.pagina12.com.ar/']
def start_requests(self):
url = getattr(self, 'article_url', None)
if url is not None:
yield scrapy.Request(url, callback=self.parse_article, cb_kwargs=dict(url=url))
def parse(self, response):
urls = []
for article in response.css('article'):
link = article.css('a')
url = link.attrib['href']
if not url:
continue
if not url.startswith('http'):
url = 'https://www.pagina12.com.ar' + url
urls.append(url)
maybe_img = article.css('img.show-for-large-only')
obj = {
'title': article.css('.article-title a::text, a .title::text').get(),
'volanta': (article.css('.article-title a .title-prefix::text').get() or '').strip(),
'url': url,
'image': maybe_img.attrib['src'] if maybe_img else None,
'source': SOURCE,
'source_language': LANGUAGE,
}
yield obj
request = scrapy.Request(url, callback=self.parse_article, cb_kwargs=dict(url=url))
yield request
yield {'homepage': urls, 'source': SOURCE}
def parse_article(self, response, url):
html = ''.join(response.xpath('//div[@class="article-main-content article-text "]/p').extract())
if not html:
return
content = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(html))).decode('utf-8')
date = response.css('div.date span::text').get().strip()
date_fragments = re.match(r'^([0-9]{1,2}) de ([a-z]+) de ([0-9]{4})$', date)
months = {
'enero': 1,
'febrero': 2,
'marzo': 3,
'abril': 4,
'mayo': 5,
'junio': 6,
'julio': 7,
'agosto': 8,
'septiembre': 9,
'octubre': 10,
'noviembre': 11,
'diciembre': 12,
}
day = int(date_fragments.group(1))
month = months[date_fragments.group(2)]
year = int(date_fragments.group(3))
hour = 0
minute = 0
date = datetime(year, month, day, hour, minute)
obj = {
'url': url,
'content': content,
'date': date.isoformat()
}
yield obj
| 2.546875 | 3 |
svd.py | christyc14/fyp | 0 | 4833 | from calendar import c
from typing import Dict, List, Union
from zlib import DEF_BUF_SIZE
import json_lines
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import pandas as pd
import json
from scipy.sparse.linalg import svds
from scipy.spatial import distance
import os
import streamlit as st
def preprocess_ingredients(ingredients):
processed_ingredients = []
for i in range(len(ingredients)):
processed_ingredient = re.sub(
r"\(([^)]*)\)|(([0-9]\d{0,2}(\.\d{1,3})*(,\d+)?)(%|mg|units))|(<\/?i>)|(\/.+)|(\\.+)|\[([^\]]*)\]",
"",
ingredients[i],
).strip()
if (
processed_ingredient.lower() == "water"
or processed_ingredient.lower() == "aqua"
or processed_ingredient.lower() == "eau"
):
processed_ingredient = "Water"
processed_ingredients.append(processed_ingredient)
return processed_ingredients
@st.experimental_memo
def content_recommender(opt, _item1, _item2, _item3, df) -> pd.DataFrame:
content_df = df[df.category == opt]
content_df["ingredients"] = content_df["ingredients"].map(preprocess_ingredients)
mlb = MultiLabelBinarizer()
output = mlb.fit_transform(content_df.ingredients.values)
content_df = content_df.drop(["ingredients"], axis=1)
model = TSNE(n_components=2, learning_rate=200)
tsne_features = model.fit_transform(output)
content_df["X"] = tsne_features[:, 0]
content_df["Y"] = tsne_features[:, 1]
content_df["dist"] = 0.0
item1 = content_df[content_df["product_name"] == _item1]
item2 = content_df[content_df["product_name"] == _item2]
item3 = content_df[content_df["product_name"] == _item3]
p1 = np.array([item1["X"], item1["Y"]]).reshape(1, -1)
p2 = np.array([item2["X"], item2["Y"]]).reshape(1, -1)
p3 = np.array([item3["X"], item3["Y"]]).reshape(1, -1)
for ind, item in content_df.iterrows():
pn = np.array([item.X, item.Y]).reshape(-1, 1)
df.at[ind, "dist"] = min(
distance.chebyshev(p1, pn),
distance.chebyshev(p2, pn),
distance.chebyshev(p3, pn),
)
content_df = content_df[~content_df.product_name.isin([_item1, _item2, _item3])]
content_df = content_df.sort_values("dist")
return content_df
@st.experimental_memo
def collab_recommender(df_tmp, num_recs, username):
reviews = df_tmp.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
reviews["rating"] = reviews["review_data"].apply(lambda x: x["Rating"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
multi_reviews = reviews[reviews.username.isin(multiple_rating_users)]
products_reviewed_per_user = {u: set() for u in multiple_rating_users}
product_index = dict(zip(df_tmp["url"].values, range(len(df_tmp["url"]))))
username_index = dict(zip(multiple_rating_users, range(len(multiple_rating_users))))
matrix = np.zeros((len(multiple_rating_users), len(df_tmp["url"])))
for user, rating, url in zip(
multi_reviews.username.values,
multi_reviews.rating.values,
multi_reviews.url.values,
):
matrix[username_index[user]][product_index[url]] = rating
products_reviewed_per_user[user].add(url)
ss = StandardScaler()
normatrix = ss.fit_transform(matrix)
print(normatrix)
U, S, V = svds(normatrix)
all_user_predicted_rating = ss.inverse_transform(U @ np.diag(S) @ V)
preds_df = pd.DataFrame(
all_user_predicted_rating, columns=product_index, index=username_index
)
sorted_user_preds = preds_df.loc[username].sort_values(ascending=False)
sorted_user_preds = sorted_user_preds[
~sorted_user_preds.index.isin(products_reviewed_per_user[username])
]
sorted_user_preds = sorted_user_preds.head(num_recs)
# we want those that they haven't already tested
collab_df = pd.merge(
df_tmp,
sorted_user_preds.to_frame(),
left_on="url",
right_index=True,
how="right",
)
collab_df.rename(columns={username: "pred_rating"}, inplace=True)
return collab_df
if __name__ == "__main__":
file_path = os.path.dirname(__file__)
if file_path != "":
os.chdir(file_path)
products: List[Dict[str, Union[str, List[str]]]] = []
# input data into List
with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f:
unique = set()
lines = f.read().splitlines()
df_inter = pd.DataFrame(lines)
df_inter.columns = ["json_element"]
df_inter["json_element"].apply(json.loads)
df = pd.json_normalize(df_inter["json_element"].apply(json.loads))
# to save myself if i do something dumb and run the scraper without deleting the .jsonlines file
df.drop_duplicates(subset=["url"], inplace=True)
# option: category of product, eg cleanser
categories = set(df.category.values)
# filter data by given option
print("Hello world!")
print("Welcome!")
print(categories)
print("pls enter the category:")
cat = str(input())
display_product_names = df[df.category == cat]
print(display_product_names[["brand", "product_name"]])
print("pls enter your top 3 products indices, separated by a new line")
item1 = int(input())
item2 = int(input())
item3 = int(input())
print("pls enter # of recs:")
num_recs = int(input())
reviews = display_product_names.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
print(multiple_rating_users)
print("pls enter sephora userid, if you don't have one just enter 'none':")
username = str(input())
if username == "none":
print("your ingredients based recommendations are:")
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs,
df,
)
print(cbf[["brand", "product_name", "url", "avg_rating"]])
else:
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs + 10,
df,
)
cf = collab_recommender(cbf, num_recs, username)
print("your hybrid recommendations are:")
print(cf[["brand", "product_name", "url", "pred_rating"]])
print("thank u for using this service :)")
| 2.203125 | 2 |
esp32/tools/flasher.py | rodgergr/pycom-micropython-sigfox | 0 | 4834 | #!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
"""
Flash the ESP32 (bootloader, partitions table and factory app).
How to call esptool:
python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin'
"""
from esptool import ESP32ROM
import os
import sys
import struct
import sqlite3
import argparse
import subprocess
import threading
import time
import fw_version
import csv
working_threads = {}
macs_db = None
wmacs = {}
DB_MAC_UNUSED = 0
DB_MAC_ERROR = -1
DB_MAC_LOCK = -2
DB_MAC_OK = 1
def open_macs_db(db_filename):
global macs_db
if not os.path.exists(db_filename):
print("MAC addresses database not found")
sys.exit(1)
macs_db = sqlite3.connect(db_filename)
def fetch_MACs(number):
return [x[0].encode('ascii', 'ignore') for x in macs_db.execute("select mac from macs where status = 0 order by rowid asc limit ?", (number,)).fetchall()]
def set_mac_status(mac, wmac, status):
macs_db.execute("update macs set status = ?, last_touch = strftime('%s','now'), wmac = ? where mac = ?", (status, wmac, mac))
macs_db.commit()
def print_exception(e):
print ('Exception: {}, on line {}'.format(e, sys.exc_info()[-1].tb_lineno))
def erase_flash(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_erases = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'Chip erase completed successfully' in nextline:
sys.stdout.write('Board erased OK on port %s\n' % port)
num_erases += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_erases != 1:
working_threads[port] = None
def read_wlan_mac(port, command):
global working_threads
global wmacs
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
mac_read = False
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'MAC: ' in nextline:
wmacs[port] = nextline[5:-1].replace(":", "-").upper()
sys.stdout.write('MAC address %s read OK on port %s\n' % (nextline[5:-1], port))
mac_read = True
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or not mac_read:
working_threads[port] = None
def set_vdd_sdio_voltage(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'VDD_SDIO setting complete' in nextline:
sys.stdout.write('Board VDD_SDIO Voltage configured OK on port %s\n' % port)
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0:
working_threads[port] = None
def flash_firmware(port, command):
global working_threads
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
num_hashes = 0
# poll the process for new output until finished
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() != None:
break
if 'at 0x00001000' in nextline:
sys.stdout.write('Bootloader programmed OK on port %s\n' % port)
elif 'at 0x00008000' in nextline:
sys.stdout.write('Partition table programmed OK on port %s\n' % port)
elif 'at 0x00010000' in nextline:
sys.stdout.write('Application programmed OK on port %s\n' % port)
elif 'Hash of data verified' in nextline:
num_hashes += 1
sys.stdout.flush()
# hack to give feedback to the main thread
if process.returncode != 0 or num_hashes != 3:
working_threads[port] = None
def run_initial_test(port, board):
global working_threads
if board == 'LoPy':
import run_initial_lopy_test as run_test
elif board == 'LoPy4':
import run_initial_lopy4_test as run_test
elif board == 'SiPy':
import run_initial_sipy_test as run_test
else:
import run_initial_wipy_test as run_test
try:
if not run_test.test_board(port):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def flash_lpwan_mac(port, mac):
import flash_lpwan_mac
global working_threads
try:
if not flash_lpwan_mac.program_board(port, mac):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_final_test(port, board, mac):
if board == 'LoPy':
import run_final_lopy_test as run_test
elif board == 'LoPy4':
import run_final_lopy4_test as run_test
else:
import run_final_sipy_test as run_test
try:
if not run_test.test_board(port, mac, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def run_qa_test(port, board):
global working_threads
if board == 'LoPy':
import run_qa_lopy_test as run_test
elif board == 'LoPy4':
import run_qa_lopy4_test as run_test
elif board == 'SiPy':
import run_qa_sipy_test as run_test
else:
import run_qa_wipy_test as run_test
try:
if not run_test.test_board(port, fw_version.number):
# same trick to give feedback to the main thread
working_threads[port] = None
except Exception:
working_threads[port] = None
def main():
cmd_parser = argparse.ArgumentParser(description='Flash the ESP32 and optionally run a small test on it.')
cmd_parser.add_argument('--esptool', default=None, help='the path to the esptool')
cmd_parser.add_argument('--espefuse', default=None, help='the path to the espefuse')
cmd_parser.add_argument('--boot', default=None, help='the path to the bootloader binary')
cmd_parser.add_argument('--table', default=None, help='the path to the partitions table')
cmd_parser.add_argument('--app', default=None, help='the path to the application binary')
cmd_parser.add_argument('--macs', default="macs.db", help='the path to the MAC addresses database')
cmd_parser.add_argument('--ports', default=['/dev/ttyUSB0'], nargs='+', help="the serial ports of the ESP32's to program")
cmd_parser.add_argument('--erase', default=None, help='set to True to erase the boards first')
cmd_parser.add_argument('--qa', action='store_true', help='just do some quality asurance test')
cmd_parser.add_argument('--board', default='LoPy', help='identifies the board to be flashed and tested')
cmd_parser.add_argument('--revision', default='1', help='identifies the hardware revision')
cmd_args = cmd_parser.parse_args()
global working_threads
global wmacs
output = ""
ret = 0
global_ret = 0
if cmd_args.qa:
raw_input("Please reset all the boards, wait until the LED starts blinking and then press enter...")
time.sleep(2.5) # wait for the board to reset
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_qa_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
print("Failed QA test on board connected to %s" % port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("QA test succeeded on all boards:-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the QA test!")
print("=============================================================")
global_ret = 1
else:
print("Reading the WLAN MAC address...")
try:
for port in cmd_args.ports:
cmd = ['python', 'esptool.py', '--port', port, 'read_mac']
working_threads[port] = threading.Thread(target=read_wlan_mac, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error reading the WLAN MAC on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("WLAN MAC address reading succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: WLAN MAC address reading failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
if int(cmd_args.revision) > 1:
# program the efuse bits to set the VDD_SDIO voltage to 1.8V
try:
print('Configuring the VDD_SDIO voltage...')
for port in cmd_args.ports:
cmd = ['python', cmd_args.espefuse, '--port', port, '--do-not-confirm', 'set_flash_voltage', '1.8V']
working_threads[port] = threading.Thread(target=set_vdd_sdio_voltage, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error setting the VDD_SDIO voltage on the board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("VDD_SDIO voltage setting succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: VDD_SDIO voltage setting failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
if cmd_args.erase:
try:
print('Erasing flash memory... (will take a few seconds)')
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'erase_flash']
working_threads[port] = threading.Thread(target=erase_flash, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error erasing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch erasing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch erasing failed in some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please reset all the boards and press enter to continue with the flashing process...")
time.sleep(1.0) # wait for the board to reset
working_threads = {}
try:
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
open_macs_db(cmd_args.macs)
macs_list = fetch_MACs(len(cmd_args.ports))
if len(macs_list) < len(cmd_args.ports):
print("No enough remaining MAC addresses to use")
sys.exit(1)
mac_per_port = {}
i = 0
for port in cmd_args.ports:
mac_per_port[port] = macs_list[i]
i += 1
for port in cmd_args.ports:
cmd = ['python', cmd_args.esptool, '--chip', 'esp32', '--port', port, '--baud', '921600',
'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', cmd_args.boot,
'0x8000', cmd_args.table, '0x10000', cmd_args.app]
working_threads[port] = threading.Thread(target=flash_firmware, args=(port, cmd))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programming board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
else:
print("Board on port %s programmed OK" % port)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch firmware programming failed on some boards!")
print("=============================================================")
global_ret = 1
raw_input("Please place all boards into run mode, RESET them and then \n press enter to continue with the testing process...")
time.sleep(5.0) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_initial_test, args=(port, cmd_args.board))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error testing board on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
elif cmd_args.board == 'WiPy':
print("Batch test OK on port %s, firmware version %s" % (port, fw_version.number))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), ' ', 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch testing succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch testing failed in some boards!")
print("=============================================================")
global_ret = 1
# only do the MAC programming and MAC verificacion for the LoPy, SiPy and LoPy4
if cmd_args.board == 'LoPy' or cmd_args.board == 'SiPy' or cmd_args.board == 'LoPy4':
print("Waiting before programming the LPWAN MAC address...")
time.sleep(3.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
set_mac_status(mac_per_port[port], "", DB_MAC_LOCK) # mark them as locked, so if the script fails and doesn't get to save, they wont be accidentally reused
working_threads[port] = threading.Thread(target=flash_lpwan_mac, args=(port, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
_ports = list(cmd_args.ports)
for port in _ports:
if working_threads[port] == None:
print("Error programing MAC address on port %s" % port)
cmd_args.ports.remove(port)
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Batch MAC programming succeeded :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Batch MAC programming failed in some boards!")
print("=============================================================")
global_ret = 1
print("Waiting for the board(s) to reboot...")
time.sleep(4.5) # wait for the board to reset
working_threads = {}
try:
for port in cmd_args.ports:
working_threads[port] = threading.Thread(target=run_final_test, args=(port, cmd_args.board, mac_per_port[port]))
working_threads[port].start()
for port in cmd_args.ports:
if working_threads[port]:
working_threads[port].join()
for port in cmd_args.ports:
if working_threads[port] == None:
ret = 1
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_ERROR)
print("Error performing MAC address test on port %s" % port)
else:
set_mac_status(mac_per_port[port], wmacs[port], DB_MAC_OK)
print("Final test OK on port %s, firmware version %s, MAC address %s" % (port, fw_version.number, mac_per_port[port]))
with open('%s_Flasher_Results.csv' % (cmd_args.board), 'ab') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow(['%s' % (cmd_args.board), '%s' % (fw_version.number), '%s' % (mac_per_port[port]), 'OK'])
except Exception as e:
ret = 1
print_exception(e)
if ret == 0:
print("=============================================================")
print("Final test succeeded on all boards :-)")
print("=============================================================")
else:
print("=============================================================")
print("ERROR: Some boards failed the final test!")
print("=============================================================")
global_ret = 1
macs_db.close()
sys.exit(global_ret)
if __name__ == "__main__":
main()
| 2.4375 | 2 |
hknweb/events/tests/models/utils.py | jyxzhang/hknweb | 0 | 4835 | import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from hknweb.events.models import Event, EventType, Rsvp
class ModelFactory:
@staticmethod
def create_user(**kwargs):
default_kwargs = {
"username": "default username",
}
kwargs = {**default_kwargs, **kwargs}
return User.objects.create(**kwargs)
@staticmethod
def create_event_type(**kwargs):
default_kwargs = {
"type": "default event type",
}
kwargs = {**default_kwargs, **kwargs}
return EventType.objects.create(**kwargs)
@staticmethod
def create_event(name, event_type, created_by, **kwargs):
required_kwargs = {
"name": name,
"event_type": event_type,
"created_by": created_by,
}
default_kwargs = {
"start_time": timezone.now(),
"end_time": timezone.now() + datetime.timedelta(hours=2),
"location": "default location",
"description": "default description",
}
kwargs = {**required_kwargs, **default_kwargs, **kwargs}
return Event.objects.create(**kwargs)
@staticmethod
def create_rsvp(user, event, **kwargs):
required_kwargs = {
"user": user,
"event": event,
}
kwargs = {**required_kwargs, **kwargs}
return Rsvp.objects.create(**kwargs)
@staticmethod
def create_event_with_rsvps():
event_create_user = ModelFactory.create_user(username="event create user")
num_rsvps = 3
rsvp_users = [
ModelFactory.create_user(username="rsvp_user_{}".format(str(i)))
for i in range(1, 1 + num_rsvps)
]
event_type = ModelFactory.create_event_type()
event_name = "custom event name"
event = ModelFactory.create_event(
name=event_name,
event_type=event_type,
created_by=event_create_user,
rsvp_limit=num_rsvps - 1,
)
rsvps = [ModelFactory.create_rsvp(rsvp_user, event) for rsvp_user in rsvp_users]
return (
event_create_user,
rsvp_users,
event_type,
event_name,
event,
rsvps,
)
| 2.34375 | 2 |
HealthNet/prescriptions/views.py | jimga150/HealthNet | 0 | 4836 | <filename>HealthNet/prescriptions/views.py
from django.shortcuts import redirect
from .forms import PrescriptionForm
from core.views import is_doctor, is_nurse, is_admin, is_patient
from core.models import *
from .models import Prescription
from django.contrib.auth.decorators import login_required, user_passes_test
from django.utils import timezone
from django.shortcuts import render
from django.core.urlresolvers import reverse
def not_admin(user):
"""
:param user: The User in question
:return: True if the user is anything but an Admin
"""
return not is_admin(user)
def is_doctor_or_nurse(user):
"""
:param user: The User in question
:return: True if the user is a Doctor or Nurse
"""
return is_doctor(user) or is_nurse(user)
@login_required
@user_passes_test(is_doctor)
def new_prescription(request):
"""
Page for the form a doctor fills out to prescribe a drug
:param request: the request with possible form submission
:return: Prescription form or redirect to listing page (below)
"""
if request.method == 'POST':
prescription_form = PrescriptionForm(data=request.POST)
validity = prescription_form.is_valid()
if validity:
prescription = prescription_form.save(commit=False)
prescription.date_prescribed = timezone.now()
prescription.doctor = Doctor.objects.all().get(user=request.user)
prescription.save()
log = Log.objects.create_Log(request.user, request.user.username, timezone.now(),
"Prescription filled out")
log.save()
else:
print("Error")
print(prescription_form.errors)
if 'submit_singular' in request.POST and validity:
return redirect('prescriptions')
elif 'submit_another' in request.POST:
prescription_form = PrescriptionForm()
else:
prescription_form = PrescriptionForm()
context = {"prescription_form": prescription_form}
return render(request, 'prescriptions/makenew.html', context)
def get_prescription_list_for(cpatient):
"""
Generic getter for a specific patient's prescription list
:param cpatient: Patient to fetch list for
:return: context of Prescription list
"""
Prescriptions = Prescription.objects.all().filter(patient=cpatient)
per = []
for p in Prescriptions.iterator():
per.append(str(dict(p.TIME_CHOICES)[p.Time_units]))
p_list = zip(Prescriptions, per)
return {"Labels": ["Doctor", "Drug", "Dosage", "Rate"], "Name": str(cpatient), "Prescriptions": p_list}
@login_required
@user_passes_test(not_admin)
def prescriptions(request):
"""
Lists either all patients in the hospital with links to their prescription lists, or the prescriptions applied to a
single defined patient.
:param request: The request sent in, not used here
:return: List page rendering
"""
context = {}
if is_doctor(request.user) or is_nurse(request.user):
context["Labels"] = ["Name", "Prescriptions"]
patients = Patient.objects.all()
prescription_nums = []
for pat in patients.iterator():
prescription_nums.append(Prescription.objects.filter(patient=pat).count())
context["Patients"] = zip(patients, prescription_nums)
elif is_patient(request.user):
cpatient = Patient.objects.get(user=request.user)
context = get_prescription_list_for(cpatient)
context["is_doctor"] = is_doctor(request.user)
context["is_doctor"] = is_doctor(request.user)
return render(request, 'prescriptions/list.html', context)
@login_required
@user_passes_test(is_doctor_or_nurse)
def prescriptions_list(request, patient_id):
"""
Page that doctors and nurses are sent to when accessing a single patient's prescription list.
:param request: The request sent in, not used here
:param patient_id: ID of the patient who's being listed
:return: List page rendering
"""
cpatient = Patient.objects.get(pk=patient_id)
context = get_prescription_list_for(cpatient)
context["is_doctor"] = is_doctor(request.user)
return render(request, 'prescriptions/list.html', context)
@login_required
@user_passes_test(is_doctor)
def delete_prescription(request, prescription_id):
"""
Page for confirming/deleting a single prescription
:param request: The request sent in, not used here
:param prescription_id: ID number of the prescription in question
:return: Redirect or confirmation page
"""
prescription = Prescription.objects.get(pk=prescription_id)
patient_id = prescription.patient.id
if request.method == 'POST':
prescription.delete()
return redirect(reverse('list prescriptions for patient', kwargs={'patient_id': patient_id}))
context = {"Prescription": prescription, 'patient_id': patient_id}
return render(request, 'prescriptions/delete.html', context)
| 2.390625 | 2 |
algorithms/329. Longest Increasing Path in a Matrix.py | woozway/py3-leetcode | 1 | 4837 | """
1. Clarification
2. Possible solutions
- dfs + memoization
- Topological sort
3. Coding
4. Tests
"""
# T=O(m*n), S=O(m*n)
from functools import lru_cache
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
@lru_cache(None)
def dfs(row: int, col: int) -> int:
best = 1
for dx, dy in Solution.DIRS:
newRow, newCol = row + dx, col + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] > matrix[row][col]:
best = max(best, dfs(newRow, newCol) + 1)
return best
ans = 0
rows, cols = len(matrix), len(matrix[0])
for i in range(rows):
for j in range(cols):
ans = max(ans, dfs(i, j))
return ans
# T=O(m*n), S=O(m*n)
class Solution:
DIRS = [(-1, 0), (1, 0), (0, -1), (0, 1)]
def longestIncreasingPath(self, matrix: List[List[int]]) -> int:
if not matrix:
return 0
rows, cols = len(matrix), len(matrix[0])
outdegrees = [[0] * cols for _ in range(rows)]
queue = collections.deque()
for i in range(rows):
for j in range(cols):
for dx, dy in Solution.DIRS:
newRow, newCol = i + dx, j + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] > matrix[i][j]:
outdegrees[i][j] += 1
if outdegrees[i][j] == 0:
queue.append((i, j))
ans = 0
while queue:
ans += 1
size = len(queue)
for _ in range(size):
row, col = queue.popleft()
for dx, dy in Solution.DIRS:
newRow, newCol = row + dx, col + dy
if 0 <= newRow < rows and 0 <= newCol < cols and matrix[newRow][newCol] < matrix[row][col]:
outdegrees[newRow][newCol] -= 1
if outdegrees[newRow][newCol] == 0:
queue.append((newRow, newCol))
return ans
| 3.28125 | 3 |
cocos2d/tools/coding-style/tailing-spaces.py | NIKEA-SOFT/TestGame | 898 | 4838 | #!/usr/bin/env python
#coding=utf-8
'''
Remove tailing whitespaces and ensures one and only one empty ending line.
'''
import os, re
def scan(*dirs, **kwargs):
files = []
extensions = kwargs['extensions'] if kwargs.has_key('extensions') else None
excludes = kwargs['excludes'] if kwargs.has_key('excludes') else []
for top in dirs:
for root, dirnames, filenames in os.walk(top):
dirnames = [i for i in dirnames if i in excludes]
for f in filenames:
if f in excludes:
continue
ext = os.path.splitext(f)[1].lower()
if extensions is None or ext in extensions:
files.append(os.path.join(root, f))
return files
def fixone(src):
lines = open(src, 'r').readlines()
trimed = []
for line in lines:
trimed.append(re.sub('\s+$', '', line))
while len(trimed) > 1 and not trimed[-1]:
trimed.pop()
trimed.append('')
with open(src, 'w') as f:
for line in trimed:
f.write('%s\n' % line)
def lint(root):
print('Checking tailing whitespaces in: %s' % root)
dirs = [
os.path.join(root, 'cocos'),
os.path.join(root, 'extensions'),
os.path.join(root, 'templates'),
os.path.join(root, 'tests'),
os.path.join(root, 'tools', 'simulator')
]
files = scan(*dirs, extensions=['.c', '.cpp', '.h', '.hpp', '.m', '.mm', '.java'])
for f in files:
print(f)
fixone(f)
def main():
default_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
lint(default_root)
main()
| 2.796875 | 3 |
three.py/TestPostprocessing-8Bit.py | Michael-Pascale/three.py | 0 | 4839 | from core import *
from cameras import *
from geometry import *
from material import *
from lights import *
class TestPostprocessing2(Base):
def initialize(self):
self.setWindowTitle('Pixellation and Reduced Color Palette')
self.setWindowSize(1024,768)
self.renderer = Renderer()
self.renderer.setViewportSize(1024,768)
self.renderer.setClearColor(0.5,0.5,0.5)
self.scene = Scene()
self.camera = PerspectiveCamera()
self.camera.setAspectRatio(1024/768)
self.camera.transform.setPosition(0, 0, 6)
self.cameraControls = FirstPersonController(self.input, self.camera)
self.renderTarget = RenderTarget.RenderTarget(1024,768)
crateTexture = OpenGLUtils.initializeTexture("images/crate.jpg")
ballTexture = OpenGLUtils.initializeTexture("images/basketball.png")
self.cube = Mesh( BoxGeometry(), SurfaceLambertMaterial(objTexture=crateTexture) )
self.cube.transform.translate(1.5, 0, 0, Matrix.LOCAL)
self.scene.add(self.cube)
self.sphere = Mesh( SphereGeometry(), SurfaceLambertMaterial(objTexture=ballTexture) )
self.sphere.transform.translate(-1.5, 0, 0, Matrix.LOCAL)
self.scene.add(self.sphere)
ambientLight = AmbientLight(color=[0.1,0.1,0.2])
self.scene.add( ambientLight )
directionalLight = DirectionalLight(color=[1,1,1], position=[4,4,-2], direction=[-1,-1,-1])
self.scene.add( directionalLight )
# add postprocessing content
self.postScene = Scene()
postGeo = Geometry()
vertexPositionData = [[-1,-1],[1,-1],[1,1], [-1,-1],[1,1],[-1,1]]
postGeo.setAttribute("vec2", "vertexPosition", vertexPositionData)
postGeo.vertexCount = 6
vsCode = """
in vec2 vertexPosition;
void main()
{
gl_Position = vec4(vertexPosition, 0, 1);
}
"""
fsCode = """
uniform sampler2D image;
uniform vec2 textureSize;
// round x to the nearest 1/denominator
float roundFrac(float x, float denominator)
{
return round(x*denominator) / denominator;
}
void main()
{
// pixellate original image
int k = 8;
vec2 rounded = k * floor(gl_FragCoord.xy / k);
vec2 UV = rounded / textureSize;
vec4 color = vec4(0,0,0,0);
for (int x = 0; x < k; x++)
{
for (int y = 0; y < k; y++)
{
color += texture(image, UV + vec2(x,y)/textureSize);
}
}
color /= (k*k);
// reduce color to a smaller palette
color.r = roundFrac(color.r, 8);
color.g = roundFrac(color.g, 8);
color.b = roundFrac(color.b, 8);
// combine sepia tones with vignette
gl_FragColor = color;
}
"""
uniforms = [
["vec2", "textureSize", [1024,768]],
["sampler2D", "image", self.renderTarget.textureID] ]
postMat = Material(vsCode, fsCode, uniforms)
postMesh = Mesh(postGeo, postMat)
self.postScene.add(postMesh)
def update(self):
self.cameraControls.update()
# rotate main scene objects
self.cube.transform.rotateX(0.005, Matrix.LOCAL)
self.cube.transform.rotateY(0.008, Matrix.LOCAL)
self.sphere.transform.rotateX(0.005, Matrix.LOCAL)
self.sphere.transform.rotateY(0.008, Matrix.LOCAL)
# first, render scene into target (texture)
self.renderer.render(self.scene, self.camera, self.renderTarget)
# second, render post-processed scene to window.
# (note: camera irrelevant since projection/view matrices are not used in shader.)
self.renderer.render(self.postScene, self.camera)
# instantiate and run the program
TestPostprocessing2().run()
| 2.234375 | 2 |
scripts/test_cache_size_vs_code_balance.py | tareqmalas/girih | 7 | 4840 | <filename>scripts/test_cache_size_vs_code_balance.py
#!/usr/bin/env python
def igs_test(target_dir, exp_name, th, group='', dry_run=0):
from scripts.conf.conf import machine_conf, machine_info
from scripts.utils import run_test
import itertools
cs = 8192
th = th
# Test using rasonable time
# T = scale * size / perf
# scale = T*perf/size
desired_time = 20
if(machine_info['hostname']=='Haswell_18core'):
k_perf_order = {0:150, 1:500, 4:40, 5:200 ,6:20}
elif(machine_info['hostname']=='IVB_10core'):
k_perf_order = {0:120, 1:300, 4:35, 5:150 ,6:20}
k_time_scale = {n: desired_time*k_perf_order[n] for n in k_perf_order.keys()}
#exp = is_dp, ts, k, N, bs_z, tb_l
exp_l = []
# spatial blocking
exp_l = exp_l + \
[(0, 0, 0, 960, 0, [-1])
,(1, 0, 0, 960, 0, [-1])
,(1, 0, 1, 960, 0, [-1])
,(1, 0, 4, 480, 0, [-1])
,(1, 0, 5, 680, 0, [-1])
]
# 1WD
exp_l = exp_l + \
[(0, 2, 0, 960, 1, [1, 3, 5])
,(1, 2, 0, 960, 1, [1, 3, 5])
,(1, 2, 1, 960, 1, [1, 3, 5, 7, 9, 11, 15, 19, 23, 29])
,(1, 2, 4, 480, 1, [1, 3, 5])
,(1, 2, 5, 680, 1, [1, 3, 9, 19])
]
# Solar kernel
exp_l = exp_l + \
[(1, 2, 6, 480, 1, [1, 3, 5, 7])
,(1, 2, 6, 480, 2, [1, 3, 5, 7])
,(1, 2, 6, 480, 3, [1, 3, 5, 7])
,(1, 2, 6, 480, 6, [1, 3, 5, 7])
,(1, 2, 6, 480, 9, [1, 3, 5, 7])]
mwdt=1
tgs, thx, thy, thz = (1,1,1,1)
count=0
for is_dp, ts, kernel, N, bs_z, tb_l in exp_l:
for tb in tb_l:
outfile=('kernel%d_isdp%d_ts%d_bsz$d_tb%d_N%d_%s_%s.txt' % (kernel, is_dp, ts, bs_z, tb, N, group, exp_name[-13:]))
nt = max(int(k_time_scale[kernel]/(N**3/1e6)), 30)
# print outfile, ts, kernel, tb, N
run_test(ntests=1,dry_run=dry_run, is_dp=is_dp, th=th, tgs=tgs, thx=thx, thy=thy, thz=thz, kernel=kernel, ts=ts, nx=N, ny=N, nz=N, nt=nt, outfile=outfile, target_dir=target_dir, cs=cs, mwdt=mwdt, tb=tb, nwf=bs_z)
count = count+1
return count
def main():
from scripts.utils import create_project_tarball, get_stencil_num, parse_results
from scripts.conf.conf import machine_conf, machine_info
import os, sys
import time,datetime
# user params
dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1]) # dry run
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M')
exp_name = "cache_size_vs_code_balance_at_%s_%s" % (machine_info['hostname'], time_stamp)
tarball_dir='results/'+exp_name
if(dry_run==0): create_project_tarball(tarball_dir, "project_"+exp_name)
target_dir='results/' + exp_name
th = 1
pin_str = "S0:0-%d "%(th-1)
count=0
group = 'MEM'
if( (machine_info['hostname']=='IVB_10core') and (group=='TLB_DATA') ): group='TLB'
machine_conf['pinning_args'] = "-m -g " + group + " -C " + pin_str + ' -s 0x03 --'
count= count + igs_test(target_dir, exp_name, th=th, group=group, dry_run=dry_run)
print "experiments count =" + str(count)
if __name__ == "__main__":
main()
| 1.734375 | 2 |
generate/lib/run-firefox/firefox_runner.py | flamencist/browser-extensions | 102 | 4841 | <reponame>flamencist/browser-extensions
import os
import shutil
import codecs
import json
from cuddlefish.runner import run_app
from cuddlefish.rdf import RDFManifest
def run():
original_harness_options = os.path.join('development', 'firefox', 'harness-options.json')
backup_harness_options = os.path.join('development', 'firefox', 'harness-options-bak.json')
shutil.move(original_harness_options, backup_harness_options)
with codecs.open(backup_harness_options, encoding='utf8') as harness_file:
harness_config = json.load(harness_file)
run_app(
harness_root_dir=os.path.join('development', 'firefox'),
harness_options=harness_config,
app_type="firefox",
verbose=True
)
| 1.703125 | 2 |
pyripple/protocol/orderbook.py | gip/pyripple | 0 | 4842 | # PyRipple
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pandas as pd
import mpmath as mp
from mpmath import mpf
import matplotlib
import matplotlib.pyplot as plt
import json
def _weigtedAverage(book, target):
rs = 0
ws = 0
t = target
for order in book:
if t <= order['limit']:
rs += t
ws += t*order['rate']
return ws / rs
else:
rs += order['limit']
ws += order['limit']*order['rate']
t -= order['limit']
def _currencyStr((c, i)):
return 'XRP' if c=='XRP' else '%s@%s' % (c, i)
def _foldBook(accumulator, orderbook):
if accumulator is None:
accumulator = { 'bids': { }, 'asks': { }, 'ledgers': [ ] }
ldg= orderbook.ledger.index
accumulator['ledgers'].append(ldg)
for offer in orderbook.offersA:
uid = (offer['account'], offer['sequence'])
if uid in accumulator['asks']:
accumulator['asks'][uid]['end'] = ldg
else:
accumulator['asks'][uid] = { 'start': ldg, 'end': ldg, 'offer': offer }
for offer in orderbook.offersB:
uid = (offer['account'], offer['sequence'])
if uid in accumulator['bids']:
accumulator['bids'][uid]['end'] = ldg
else:
accumulator['bids'][uid] = { 'start': ldg, 'end': ldg, 'offer': offer }
return accumulator
def _foldBooks(orderbooks):
acc = None
for orderbook in orderbooks:
acc = _foldBook(acc, orderbook)
return acc
class Orderbook:
def __init__(self, (c0, i0), (c1, i1), ldgA, offersA, ldbB, offersB, through=[]):
self.c0= c0
self.i0= i0
self.c1= c1
self.i1= i1
self.offersA= offersA
self.offersB= offersB
self.spread= offersA[0]['rate']-offersB[0]['rate']
self.spread_pct= self.spread*100 / offersA[0]['rate']
self.ledger= ldgA
self.through= through
def weigtedAverageA(self, v):
return _weigtedAverage(self.offersA, v)
def weigtedAverageB(self, v):
return _weigtedAverage(self.offersB, v)
def info(self):
return {
'currency': _currencyStr((self.c0, self.i0)),
'counter_currency': _currencyStr((self.c1, self.i1)),
'spread': self.spread,
'spread': self.spread_pct,
'best_ask': self.offersA[0]['rate'],
'n_asks': len(self.offersA),
'n_bids': len(self.offersB),
'best_bid': self.offersB[0]['rate'],
'through': self.through
}
def showInfo(self):
print ('Orderbook %s%s in ledger %i' % (self.c0, self.c1, self.ledger.index))
print (' Close date: %s' % self.ledger.date_human)
print (' Currency: XRP' if self.c0=='XRP' else ' Currency: %s@%s' % (self.c0, self.i0))
print (' Counter currency: XRP' if self.c1=='XRP' else ' Counter currency: %s@%s' % (self.c1, self.i1))
print (' Spread: %f (%f %%)' % (self.spread, self.spread_pct))
print (' Best ask/bid: %f / %f' % (self.offersA[0]['rate'], self.offersB[0]['rate']))
print ' Through: ', self.through
def __mul__(self, other):
assert self.c1 == other.c0 and self.i1 == other.i0, "Invalide trade"
# Let's compute the new orderbook!
def prudctOffers(o0, o1):
offers = []
i0= 0
i1= 0
xlim= 0
o0limit= 0
o1limit= 0
while i1 < len(o1) and i0 < len(o0):
if o0limit==0:
o0rate= o0[i0]['rate']
o0limit= o0[i0]['limit']
i0+= 1
if o1limit==0:
o1rate= o1[i1]['rate']
o1limit= o1[i1]['limit']
i1+= 1
delta = o0limit*o0rate-o1limit
if delta<0:
amt= o0limit*o1rate
o0limit= 0
o1limit-= amt
xlim+= amt
offers.append({ 'rate': o0rate*o1rate, 'limit': amt, 'xlimit': xlim })
elif delta>0:
amt= o1limit
o1limit= 0
o0limit-= amt
xlim+= amt
offers.append({ 'rate': o0rate*o1rate, 'limit': amt, 'xlimit': xlim })
else:
o0limit= 0
o1limit= 0
xlim+= o1limit
offers.append({ 'rate': o0rate*o1rate, 'limit': o1limit, 'xlimit': xlim })
return offers
through = list(self.through)
through.append((self.c1, self.i1))
return Orderbook((self.c0, self.i0),
(other.c1, other.i1),
self.ledger, prudctOffers(self.offersA, other.offersA), other.ledger, prudctOffers(self.offersB, other.offersB), through)
def plot(self, *args, **kwargs):
fA = pd.DataFrame(self.offersA)
fB = pd.DataFrame(self.offersB)
newfig= kwargs.get('newfig', True)
if newfig:
plt.figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k');
axes = plt.gca();
plt.title('Order book for %s / %s at ledger %i' % (_currencyStr((self.c0, self.i0)), _currencyStr((self.c1, self.i1)), self.ledger.index));
plt.xlabel(_currencyStr((self.c1, self.i1)))
plt.ylabel('%s%s' % (self.c0, self.c1))
plt.gca().xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
if kwargs.get('orders', True):
plt.hlines(fA['rate'], 0, fA['limit'], color='b', label= 'Asks')
plt.plot(fA['limit'], fA['rate'], 'b^')
plt.hlines(fB['rate'], 0, fB['limit'], color='r', label= 'Bids')
plt.plot(fB['limit'], fB['rate'], 'r^')
def supplyDemand(xlimits):
x= []
y= []
limit= 0
for (r, l) in xlimits:
x.append(r)
x.append(r)
y.append(limit)
limit= l
y.append(limit)
return (x,y)
if kwargs.get('supplydemand', True):
(x, y)= supplyDemand(zip(fA['rate'], fA['xlimit']))
plt.plot(y, x, 'b--', label= 'Supply')
(x, y)= supplyDemand(zip(fB['rate'], fB['xlimit']))
plt.plot(y, x, 'r--', label= 'Demand')
if newfig:
plt.legend()
def plotWeighted(self, limit, *args, **kwargs):
newfig= kwargs.get('newfig', True)
if newfig:
plt.figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k');
plt.xlabel('%s@%s' % (self.c1, self.i1))
plt.title('Rate (weigthed average) for %s / %s ledger %i' % (_currencyStr((self.c0, self.i0)), _currencyStr((self.c1, self.i1)), self.ledger.index))
plt.gca().xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
x = np.arange(1, limit, limit / 1000 if limit > 1000 else 1)
cask = kwargs.get('styleask', 'b')
cbid = kwargs.get('stylebid', 'r')
label = kwargs.get('label', 'Weighted avg')
plt.plot(x, map(self.weigtedAverageA, x), cask, label= label + ' (ask)')
plt.plot(x, map(self.weigtedAverageB, x), cbid, label= label + ' (bid)')
if newfig:
plt.legend()
@staticmethod
def plotTimeResolvedBook(orderbooks):
ob0 = orderbooks[0]
fold = _foldBooks(orderbooks)
plt.figure(num=None, figsize=(16, 12), dpi=80, facecolor='w', edgecolor='k');
plt.hlines(map(lambda x: x['offer']['rate'], fold['asks'].values()),
map(lambda x: x['start'], fold['asks'].values()),
map(lambda x: x['end'], fold['asks'].values()), color ='b', label= 'asks' )
plt.hlines(map(lambda x: x['offer']['rate'], fold['bids'].values()),
map(lambda x: x['start'], fold['bids'].values()),
map(lambda x: x['end'], fold['bids'].values()), color ='r', label= 'bids' )
x = map(lambda ob: ob.ledger.index, orderbooks)
plt.plot(x, map(lambda x: x.offersA[0]['rate'], orderbooks), 'b--')
plt.plot(x, map(lambda x: x.offersB[0]['rate'], orderbooks), 'r--')
axes = plt.gca()
axes.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x))))
axes.set_xlabel('Ripple ledger #')
axes.set_ylabel('%s%s' % (ob0.c0, ob0.c1))
plt.title('Order books for %s / %s' % (_currencyStr((ob0.c0, ob0.i0)), _currencyStr((ob0.c1, ob0.i1))));
plt.legend()
| 2.234375 | 2 |
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py | catb0y/twitter_feeling | 69 | 4843 | # -*- coding: utf-8 -*-
"""
Unit tests for Senna
"""
from __future__ import unicode_literals
from os import environ, path, sep
import logging
import unittest
from nltk.classify import Senna
from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger
# Set Senna executable path for tests if it is not specified as an environment variable
if 'SENNA' in environ:
SENNA_EXECUTABLE_PATH = path.normpath(environ['SENNA']) + sep
else:
SENNA_EXECUTABLE_PATH = '/usr/share/senna-v3.0'
senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH)
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
class TestSennaPipeline(unittest.TestCase):
"""Unittest for nltk.classify.senna"""
def test_senna_pipeline(self):
"""Senna pipeline interface"""
pipeline = Senna(SENNA_EXECUTABLE_PATH, ['pos', 'chk', 'ner'])
sent = 'Dusseldorf is an international business center'.split()
result = [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)]
expected = [('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP',
'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'), ('international', 'I-NP',
'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP',
'O', 'NN')]
self.assertEqual(result, expected)
@unittest.skipUnless(senna_is_installed, "Requires Senna executable")
class TestSennaTagger(unittest.TestCase):
"""Unittest for nltk.tag.senna"""
def test_senna_tagger(self):
tagger = SennaTagger(SENNA_EXECUTABLE_PATH)
result = tagger.tag('What is the airspeed of an unladen swallow ?'.split())
expected = [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed',
'NN'),('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow',
'NN'), ('?', '.')]
self.assertEqual(result, expected)
def test_senna_chunk_tagger(self):
chktagger = SennaChunkTagger(SENNA_EXECUTABLE_PATH)
result_1 = chktagger.tag('What is the airspeed of an unladen swallow ?'.split())
expected_1 = [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed',
'I-NP'), ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow',
'I-NP'), ('?', 'O')]
result_2 = list(chktagger.bio_to_chunks(result_1, chunk_type='NP'))
expected_2 = [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow',
'5-6-7')]
self.assertEqual(result_1, expected_1)
self.assertEqual(result_2, expected_2)
def test_senna_ner_tagger(self):
nertagger = SennaNERTagger(SENNA_EXECUTABLE_PATH)
result_1 = nertagger.tag('Shakespeare theatre was in London .'.split())
expected_1 = [('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'),
('in', 'O'), ('London', 'B-LOC'), ('.', 'O')]
result_2 = nertagger.tag('UN headquarters are in NY , USA .'.split())
expected_2 = [('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'),
('in', 'O'), ('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')]
self.assertEqual(result_1, expected_1)
self.assertEqual(result_2, expected_2)
| 2.59375 | 3 |
eoxserver/services/opensearch/v11/description.py | kalxas/eoxserver | 25 | 4844 | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2015 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from itertools import chain
from lxml.builder import ElementMaker
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.shortcuts import get_object_or_404
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.util.xmltools import (
XMLEncoder, NameSpace, NameSpaceMap
)
from eoxserver.resources.coverages import models
from eoxserver.services.opensearch.formats import get_formats
from eoxserver.services.opensearch.extensions import get_extensions
from eoxserver.services.opensearch.config import OpenSearchConfigReader
class OpenSearch11DescriptionEncoder(XMLEncoder):
content_type = "application/opensearchdescription+xml"
def __init__(self, search_extensions):
ns_os = NameSpace("http://a9.com/-/spec/opensearch/1.1/", None)
self.ns_param = ns_param = NameSpace(
"http://a9.com/-/spec/opensearch/extensions/parameters/1.0/",
"parameters"
)
ns_atom = NameSpace("http://www.w3.org/2005/Atom", "atom")
nsmap = NameSpaceMap(ns_os, ns_param, ns_atom)
for search_extension in search_extensions:
nsmap.add(search_extension.namespace)
self.OS = ElementMaker(namespace=ns_os.uri, nsmap=nsmap)
self.PARAM = ElementMaker(namespace=ns_param.uri, nsmap=nsmap)
self.ATOM = ElementMaker(namespace=ns_atom.uri, nsmap=nsmap)
self.search_extensions = search_extensions
def encode_description(self, request, collection, result_formats):
""" Encode an OpenSearch 1.1 description document.
"""
OS = self.OS
description = OS("OpenSearchDescription",
OS("ShortName",
collection.identifier if collection is not None else ""
),
OS("Description")
)
for method in ("GET", "POST"):
description.extend([
self.encode_url(
request, collection, result_format, method
)
for result_format in result_formats
])
description.extend([
OS("Contact"),
OS("Tags", "CEOS-OS-BP-V1.1/L1"),
OS("LongName"),
OS("Developer"),
OS("Attribution"),
OS("SyndicationRight", "open"),
OS("AdultContent"),
OS("Language"),
OS("InputEncoding"),
OS("OutputEncoding")
])
return description
def encode_url(self, request, collection, result_format, method):
""" Encode a single opensearch URL, either for a specific collection, or
the whole service.
"""
if collection is not None:
search_url = reverse("opensearch:collection:search",
kwargs={
"collection_id": collection.identifier,
"format_name": result_format.name
}
)
else:
search_url = reverse("opensearch:search",
kwargs={
"format_name": result_format.name
}
)
conf = OpenSearchConfigReader(get_eoxserver_config())
search_url = request.build_absolute_uri(search_url)
default_parameters = (
dict(name="q", type="searchTerms", profiles=[
]),
dict(name="count", type="count", min=0, max=conf.max_count),
dict(name="startIndex", type="startIndex", min=0),
)
parameters = list(chain(default_parameters, *[
[
dict(parameter, **{"namespace": search_extension.namespace})
for parameter in search_extension.get_schema(
collection,
models.Collection if collection is None else models.Product
)
] for search_extension in self.search_extensions
]))
query_template = "&".join(
"%s={%s%s%s%s}" % (
parameter["name"],
parameter["namespace"].prefix
if "namespace" in parameter else "",
":" if "namespace" in parameter else "",
parameter["type"],
"?" if parameter.get("optional", True) else ""
)
for parameter in parameters
)
url = self.OS("Url", *[
self.encode_parameter(parameter, parameter.get("namespace"))
for parameter in parameters
],
type=result_format.mimetype,
template="%s?%s" % (search_url, query_template)
if method == "GET" else search_url,
rel="results" if collection is not None else "collection", ** {
self.ns_param("method"): method,
self.ns_param("enctype"): "application/x-www-form-urlencoded",
"indexOffset": "0"
}
)
return url
def encode_parameter(self, parameter, namespace):
options = parameter.pop("options", [])
profiles = parameter.pop("profiles", [])
attributes = {"name": parameter["name"]}
if namespace:
attributes["value"] = "{%s:%s}" % (
namespace.prefix, parameter.pop("type")
)
else:
attributes["value"] = "{%s}" % parameter.pop("type")
if 'min' in parameter:
attributes['minInclusive'] = str(parameter['min'])
if 'max' in parameter:
attributes['maxInclusive'] = str(parameter['max'])
pattern = parameter.get("pattern")
if pattern:
attributes["pattern"] = pattern
return self.PARAM("Parameter", *[
self.PARAM("Option", value=option, label=option)
for option in options
] + [
self.ATOM("link",
rel="profile", href=profile["href"], title=profile["title"]
)
for profile in profiles
], minimum="0" if parameter.get("optional", True) else "1", maximum="1",
**attributes
)
class OpenSearch11DescriptionHandler(object):
def handle(self, request, collection_id=None):
collection = None
if collection_id:
collection = get_object_or_404(models.Collection,
identifier=collection_id
)
encoder = OpenSearch11DescriptionEncoder([
extension() for extension in get_extensions()
])
return (
encoder.serialize(
encoder.encode_description(
request, collection, [format_() for format_ in get_formats()]
)
),
encoder.content_type
)
| 1.328125 | 1 |
exercises/level_0/stringing.py | eliranM98/python_course | 6 | 4845 | <reponame>eliranM98/python_course
text = '''
<NAME>'s ({}) tale of injustice, heroism and love follows the fortunes of <NAME>, an escaped convict determined to put his criminal past behind him. But his attempts to become a respected member of the community are constantly put under threat: by his own conscience, when, owing to a case of mistaken identity, another man is arrested in his place; and by the relentless investigations of the dogged Inspector Javert. It is not simply for himself that Valjean must stay free, however, for he has sworn to protect the baby daughter of Fantine, driven to prostitution by poverty.
Norm<NAME>'s ({}) lively English translation is accompanied by an introduction discussing Hugo's political and artistic aims in writing Les Miserables.
<NAME> (1802-85) wrote volumes of criticism, dramas, satirical verse and political journalism but is best remembered for his novels, especially Notre-Dame de Paris (also known as The Hunchback of Notre-Dame) and Les Miserables, which was adapted into one of the most successful musicals of all time.
'All human life is here'
<NAME>, producer of the musical Les Miserables
'One of the half-dozen greatest novels of the world'
Upton Sinclair
'A great writer - inventive, witty, sly, innovatory'
<NAME>, author of Possession
'''
name = 'Victor'
word1 = 'writer'
word2 = 'witty'
numbers = "0123456789"
small_letters = 'abcdefghijklmnopqrstuvwxyz'
big_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
name_index = text.find(name)
name_plus3 = text[name_index: name_index+len(name)+3]
word1_index = text.find(word1, 0, 100)
word2_index = text.find(word2, int(len(text)/2), len(text))
count_characters = text.count('of')
is_text_starts_with_name = text.startswith(name)
is_text_ends_with_name = text.endswith(name)
text = text.format('1822-95', '1807-63')
words = text.split(' ')
text1 = ''.join(words)
text2 = ','.join(words)
text3 = '_'.join(words)
text4 = ' '.join(words)
text5 = text.replace('of', '@🐔')
text6 = text.capitalize()
text7 = text.replace('a', '')
text8 = text.strip()
upper_name = name.upper()
lower_name = name.lower()
is_name_upper = name.isupper()
is_name_lower = name.islower()
is_big_letters_upper = big_letters.isupper()
is_small_letters_lower = small_letters.islower()
stringed_integer = '90'.isnumeric()
stringed_float = '90.5'.isnumeric()
converted_int = int('90')
converted_float = float('90.5')
converted_string = str(183)
is_digit = converted_string[1].isdigit()
edges = small_letters[0] + big_letters[-1]
body = numbers[1:-1]
evens = numbers[::2]
odds = numbers[1::2]
print('name', name)
print('word1', word1)
print('word2', word2)
print('numbers', numbers)
print('small_letters', small_letters)
print('big_letters', big_letters)
print('name_index', name_index)
print('name_plus3', name_plus3)
print('word1_index', word1_index)
print('word2_index', word2_index)
print('count_characters -> \'of\' in the text', count_characters)
print('is_text_starts_with_name', is_text_starts_with_name)
print('is_text_ends_with_name', is_text_ends_with_name)
print('\n\n\n\n\n', 'text', text, '\n\n\n\n\n')
print('\n\n\n\n\n', 'words', words, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text1', text1, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text2', text2, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text3', text3, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text4', text4, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text5', text5, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text6', text6, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text7', text7, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text8', text8, '\n\n\n\n\n')
print('upper_name', upper_name)
print('lower_name', lower_name)
print('is_name_upper', is_name_upper)
print('is_name_lower', is_name_lower)
print('is_big_letters_upper', is_big_letters_upper)
print('is_small_letters_lower', is_small_letters_lower)
print('stringed_integer', stringed_integer)
print('stringed_float', stringed_float)
print('converted_int', converted_int)
print('converted_float', converted_float)
print('converted_string', converted_string)
print('is_digit', is_digit)
print('edges', edges)
print('body', body)
print('evens', evens)
print('odds', odds)
| 4.125 | 4 |
lab1/text_recognizer/models/mlp.py | Agyey/fsdl-text-recognizer-2021-labs | 0 | 4846 | <gh_stars>0
from typing import Any, Dict
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
FC1_DIM = 1024
FC2_DIM = 128
class MLP(nn.Module):
"""Simple MLP suitable for recognizing single characters."""
def __init__(
self,
data_config: Dict[str, Any],
args: argparse.Namespace = None,
) -> None:
super().__init__()
self.args = vars(args) if args is not None else {}
input_dim = np.prod(data_config["input_dims"])
num_classes = len(data_config["mapping"])
self.dropout = nn.Dropout(0.5)
layers = self.args.get("layers", FC1_DIM)
self.layers = layers
if layers:
fcn = (int(FC1_DIM - x * ((FC1_DIM - FC2_DIM)//(layers-1))) for x in range(layers))
fcl = input_dim
fcv = []
for fci in fcn:
fcv.append(nn.Linear(fcl, fci))
fcl = fci
fcv.append(nn.Linear(fcl, num_classes))
self.fcv = nn.Sequential(*fcv)
else:
fc1_dim = self.args.get("fc1", FC1_DIM)
fc2_dim = self.args.get("fc2", FC2_DIM)
self.fc1 = nn.Linear(input_dim, fc1_dim)
self.fc2 = nn.Linear(fc1_dim, fc2_dim)
self.fc3 = nn.Linear(fc2_dim, num_classes)
def forward(self, x):
x = torch.flatten(x, 1)
if self.layers:
for fci in self.fcv[:-1]:
x = fci(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fcv[-1](x)
else:
x = self.fc1(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc3(x)
return x
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--layers", type=int, default=None, choices=range(2, 20))
parser.add_argument("--fc1", type=int, default=1024)
parser.add_argument("--fc2", type=int, default=128)
return parser
| 2.578125 | 3 |
frontend/widgets/button.py | AzoeDesarrollos/PyMavisDatabase | 0 | 4847 | from pygame import Surface, font
from .basewidget import BaseWidget
from frontend import Renderer, WidgetHandler
class Button(BaseWidget):
action = None
def __init__(self, x, y, texto, action=None):
self.f = font.SysFont('Verdana', 16)
imagen = self.crear(texto)
rect = imagen.get_rect(topleft=(x, y))
super().__init__(imagen, rect)
Renderer.add_widget(self, 1)
WidgetHandler.add_widget(self, 1)
self.action = action
def crear(self, texto):
w, h = self.f.size(texto)
image = Surface((w + 4, h + 2))
image.fill((125, 125, 125), (1, 1, w+2, h))
render = self.f.render(texto, 1, (255, 255, 255), (125, 125, 125))
image.blit(render, (2, 1))
return image
def on_mousebuttondown(self, button):
if button == 1 and self.action is not None:
self.action()
def on_mouseover(self):
pass
def update(self):
self.dirty = 1
| 2.921875 | 3 |
tensorflow_rnn/mnist_lstm.py | naoki009/samples | 0 | 4848 | <filename>tensorflow_rnn/mnist_lstm.py<gh_stars>0
import numpy as np
import tensorflow as tf
"""
Do an MNIST classification line by line by LSTM
"""
(x_train, y_train), \
(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(128, input_shape=(None, 28)))
#model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation("softmax"))
model.summary()
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer="sgd",
metrics=["accuracy"])
model.fit(x_train, y_train, validation_data=(x_test, y_test),
batch_size=100, epochs=100)
| 3.015625 | 3 |
scrap/CloudCoverUndersampling.py | cseale/kaggle-amazon-rainforests | 0 | 4849 | <filename>scrap/CloudCoverUndersampling.py<gh_stars>0
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os
from random import shuffle
from tqdm import tqdm
DATA_DIR = '../input/amazon/'
TRAIN_TIF_DIR = DATA_DIR + 'train-tif/'
TRAIN_CSV = DATA_DIR + 'train.csv'
TEST_TIF_DIR = DATA_DIR + 'test-tif/'
IMG_SIZE = 100
LR = 1e-3
MODEL_NAME = 'amazon=-{}-{}.model'.format(LR, '2conv-basic')
CLOUD_COVER_LABELS = [
'clear',
'cloudy',
'haze',
'partly_cloudy']
# read our data and take a look at what we are dealing with
train_csv = pd.read_csv(TRAIN_CSV)
train_csv.head()
tags = pd.DataFrame()
for label in CLOUD_COVER_LABELS:
tags[label] = train_csv.tags.apply(lambda x: np.where(label in x, 1, 0))
train_csv = pd.concat([train_csv, tags], axis=1)
# In[17]:
pd.concat([train_csv[train_csv.clear == 1].sample(n=7251),
train_csv[train_csv.cloudy == 1].sample(n=7251),
train_csv[train_csv.haze == 1],
train_csv[train_csv.partly_cloudy == 1].sample(n=7251)], axis=0, ignore_index=True)
| 2.5625 | 3 |
rpython/translator/platform/posix.py | wdv4758h/mu-client-pypy | 0 | 4850 | """Base support for POSIX-like platforms."""
import py, os, sys
from rpython.translator.platform import Platform, log, _run_subprocess
import rpython
rpydir = str(py.path.local(rpython.__file__).join('..'))
class BasePosix(Platform):
exe_ext = ''
make_cmd = 'make'
relevant_environ = ('CPATH', 'LIBRARY_PATH', 'C_INCLUDE_PATH')
DEFAULT_CC = 'gcc'
rpath_flags = ['-Wl,-rpath=\'$$ORIGIN/\'']
def __init__(self, cc=None):
self.cc = cc or os.environ.get('CC', self.DEFAULT_CC)
def _libs(self, libraries):
return ['-l%s' % lib for lib in libraries]
def _libdirs(self, library_dirs):
assert '' not in library_dirs
return ['-L%s' % ldir for ldir in library_dirs]
def _includedirs(self, include_dirs):
assert '' not in include_dirs
return ['-I%s' % idir for idir in include_dirs]
def _linkfiles(self, link_files):
return list(link_files)
def _compile_c_file(self, cc, cfile, compile_args):
oname = self._make_o_file(cfile, ext='o')
args = ['-c'] + compile_args + [str(cfile), '-o', str(oname)]
self._execute_c_compiler(cc, args, oname,
cwd=str(cfile.dirpath()))
return oname
def _link_args_from_eci(self, eci, standalone):
return Platform._link_args_from_eci(self, eci, standalone)
def _exportsymbols_link_flags(self):
if (self.cc == 'mingw32' or (self.cc== 'gcc' and os.name=='nt')
or sys.platform == 'cygwin'):
return ["-Wl,--export-all-symbols"]
return ["-Wl,--export-dynamic"]
def _link(self, cc, ofiles, link_args, standalone, exe_name):
args = [str(ofile) for ofile in ofiles] + link_args
args += ['-o', str(exe_name)]
if not standalone:
args = self._args_for_shared(args)
self._execute_c_compiler(cc, args, exe_name,
cwd=str(exe_name.dirpath()))
return exe_name
def _pkg_config(self, lib, opt, default, check_result_dir=False):
try:
ret, out, err = _run_subprocess("pkg-config", [lib, opt])
except OSError, e:
err = str(e)
ret = 1
if ret:
result = default
else:
# strip compiler flags
result = [entry[2:] for entry in out.split()]
#
if not result:
pass # if pkg-config explicitly returned nothing, then
# we assume it means no options are needed
elif check_result_dir:
# check that at least one of the results is a valid dir
for check in result:
if os.path.isdir(check):
break
else:
if ret:
msg = ("running 'pkg-config %s %s' failed:\n%s\n"
"and the default %r is not a valid directory" % (
lib, opt, err.rstrip(), default))
else:
msg = ("'pkg-config %s %s' returned no valid directory:\n"
"%s\n%s" % (lib, opt, out.rstrip(), err.rstrip()))
raise ValueError(msg)
return result
def get_rpath_flags(self, rel_libdirs):
# needed for cross-compilation i.e. ARM
return self.rpath_flags + ['-Wl,-rpath-link=\'%s\'' % ldir
for ldir in rel_libdirs]
def get_shared_only_compile_flags(self):
return tuple(self.shared_only) + ('-fvisibility=hidden',)
def gen_makefile(self, cfiles, eci, exe_name=None, path=None,
shared=False, headers_to_precompile=[],
no_precompile_cfiles = [], icon=None):
cfiles = self._all_cfiles(cfiles, eci)
if path is None:
path = cfiles[0].dirpath()
rpypath = py.path.local(rpydir)
if exe_name is None:
exe_name = cfiles[0].new(ext=self.exe_ext)
else:
exe_name = exe_name.new(ext=self.exe_ext)
linkflags = list(self.link_flags)
if shared:
linkflags = self._args_for_shared(linkflags)
linkflags += self._exportsymbols_link_flags()
if shared:
libname = exe_name.new(ext='').basename
target_name = 'lib' + exe_name.new(ext=self.so_ext).basename
else:
target_name = exe_name.basename
if shared:
cflags = tuple(self.cflags) + self.get_shared_only_compile_flags()
else:
cflags = tuple(self.cflags) + tuple(self.standalone_only)
m = GnuMakefile(path)
m.exe_name = path.join(exe_name.basename)
m.eci = eci
def rpyrel(fpath):
lpath = py.path.local(fpath)
rel = lpath.relto(rpypath)
if rel:
return os.path.join('$(RPYDIR)', rel)
# Hack: also relativize from the path '$RPYDIR/..'.
# Otherwise, when translating pypy, we get the paths in
# pypy/module/* that are kept as absolute, which makes the
# whole purpose of $RPYDIR rather pointless.
rel = lpath.relto(rpypath.join('..'))
if rel:
return os.path.join('$(RPYDIR)', '..', rel)
m_dir = m.makefile_dir
if m_dir == lpath:
return '.'
if m_dir.dirpath() == lpath:
return '..'
return fpath
rel_cfiles = [m.pathrel(cfile) for cfile in cfiles]
rel_ofiles = [rel_cfile[:rel_cfile.rfind('.')]+'.o' for rel_cfile in rel_cfiles]
m.cfiles = rel_cfiles
rel_includedirs = [rpyrel(incldir) for incldir in
self.preprocess_include_dirs(eci.include_dirs)]
rel_libdirs = [rpyrel(libdir) for libdir in
self.preprocess_library_dirs(eci.library_dirs)]
m.comment('automatically generated makefile')
definitions = [
('RPYDIR', '"%s"' % rpydir),
('TARGET', target_name),
('DEFAULT_TARGET', exe_name.basename),
('SOURCES', rel_cfiles),
('OBJECTS', rel_ofiles),
('LIBS', self._libs(eci.libraries) + list(self.extra_libs)),
('LIBDIRS', self._libdirs(rel_libdirs)),
('INCLUDEDIRS', self._includedirs(rel_includedirs)),
('CFLAGS', cflags),
('CFLAGSEXTRA', list(eci.compile_extra)),
('LDFLAGS', linkflags),
('LDFLAGS_LINK', list(self.link_flags)),
('LDFLAGSEXTRA', list(eci.link_extra)),
('CC', self.cc),
('CC_LINK', eci.use_cpp_linker and 'g++' or '$(CC)'),
('LINKFILES', eci.link_files),
('RPATH_FLAGS', self.get_rpath_flags(rel_libdirs)),
]
for args in definitions:
m.definition(*args)
rules = [
('all', '$(DEFAULT_TARGET)', []),
('$(TARGET)', '$(OBJECTS)', '$(CC_LINK) $(LDFLAGSEXTRA) -o $@ $(OBJECTS) $(LIBDIRS) $(LIBS) $(LINKFILES) $(LDFLAGS)'),
('%.o', '%.c', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'),
('%.o', '%.s', '$(CC) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'),
('%.o', '%.cxx', '$(CXX) $(CFLAGS) $(CFLAGSEXTRA) -o $@ -c $< $(INCLUDEDIRS)'),
]
for rule in rules:
m.rule(*rule)
if shared:
m.definition('SHARED_IMPORT_LIB', libname),
m.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup")
m.rule('main.c', '',
'echo "'
'int $(PYPY_MAIN_FUNCTION)(int, char*[]); '
'int main(int argc, char* argv[]) '
'{ return $(PYPY_MAIN_FUNCTION)(argc, argv); }" > $@')
m.rule('$(DEFAULT_TARGET)', ['$(TARGET)', 'main.o'],
'$(CC_LINK) $(LDFLAGS_LINK) main.o -L. -l$(SHARED_IMPORT_LIB) -o $@ $(RPATH_FLAGS)')
return m
def execute_makefile(self, path_to_makefile, extra_opts=[]):
if isinstance(path_to_makefile, GnuMakefile):
path = path_to_makefile.makefile_dir
else:
path = path_to_makefile
log.execute('make %s in %s' % (" ".join(extra_opts), path))
returncode, stdout, stderr = _run_subprocess(
self.make_cmd, ['-C', str(path)] + extra_opts)
self._handle_error(returncode, stdout, stderr, path.join('make'))
class Definition(object):
def __init__(self, name, value):
self.name = name
self.value = value
def write(self, f):
def write_list(prefix, lst):
lst = lst or ['']
for i, fn in enumerate(lst):
fn = fn.replace('\\', '\\\\')
print >> f, prefix, fn,
if i < len(lst)-1:
print >> f, '\\'
else:
print >> f
prefix = ' ' * len(prefix)
name, value = self.name, self.value
if isinstance(value, str):
f.write('%s = %s\n' % (name, value.replace('\\', '\\\\')))
else:
write_list('%s =' % (name,), value)
f.write('\n')
class Rule(object):
def __init__(self, target, deps, body):
self.target = target
self.deps = deps
self.body = body
def write(self, f):
target, deps, body = self.target, self.deps, self.body
if isinstance(deps, str):
dep_s = deps
else:
dep_s = ' '.join(deps)
f.write('%s: %s\n' % (target, dep_s))
if isinstance(body, str):
f.write('\t%s\n' % body)
elif body:
f.write('\t%s\n' % '\n\t'.join(body))
f.write('\n')
class Comment(object):
def __init__(self, body):
self.body = body
def write(self, f):
f.write('# %s\n' % (self.body,))
class GnuMakefile(object):
def __init__(self, path=None):
self.defs = {}
self.lines = []
self.makefile_dir = py.path.local(path)
def pathrel(self, fpath):
if fpath.dirpath() == self.makefile_dir:
return fpath.basename
elif fpath.dirpath().dirpath() == self.makefile_dir.dirpath():
assert fpath.relto(self.makefile_dir.dirpath()), (
"%r should be relative to %r" % (
fpath, self.makefile_dir.dirpath()))
path = '../' + fpath.relto(self.makefile_dir.dirpath())
return path.replace('\\', '/')
else:
return str(fpath)
def definition(self, name, value):
defs = self.defs
defn = Definition(name, value)
if name in defs:
self.lines[defs[name]] = defn
else:
defs[name] = len(self.lines)
self.lines.append(defn)
def rule(self, target, deps, body):
self.lines.append(Rule(target, deps, body))
def comment(self, body):
self.lines.append(Comment(body))
def write(self, out=None):
if out is None:
f = self.makefile_dir.join('Makefile').open('w')
else:
f = out
for line in self.lines:
line.write(f)
f.flush()
if out is None:
f.close()
| 2.140625 | 2 |
chintai-scrape/A001_parse_htmls.py | GINK03/itmedia-scraping | 16 | 4851 | import glob
import bs4
import gzip
import pickle
import re
import os
from concurrent.futures import ProcessPoolExecutor as PPE
import json
from pathlib import Path
from hashlib import sha256
import shutil
Path('json').mkdir(exist_ok=True)
def sanitize(text):
text = re.sub(r'(\t|\n|\r)', '', text)
text = re.sub(r'\xa0', '', text)
text = re.sub(r'\\r', '', text)
text = re.sub('地図で物件の周辺環境をチェック!', '', text)
return text
def is_train(x):
if '線' in x:
return False
else:
return True
def pmap(arg):
key, fns = arg
SIZE = len(fns)
for index, fn in enumerate(fns):
try:
print('now', key,index, 'size', SIZE, fn)
html = gzip.decompress(open(fn, 'rb').read())
soup = bs4.BeautifulSoup(html, 'lxml')
if soup.find('link', {'rel':'canonical'}) is None:
Path(fn).unlink()
continue
canonical = soup.find('link', {'rel':'canonical'})['href']
if '/detail/' not in canonical:
Path(fn).unlink()
continue
basic_table = soup.find('div', {'class':'detail_basicInfo'})
if basic_table is None:
Path(fn).unlink()
continue
basic_table = basic_table.find('table')
# ズレの処理
tds = list(basic_table.find_all('td'))
tds.pop(0)
#print(tds.pop(0).text)
tds = [td for td in tds if is_train(td)]
print(len(basic_table.find_all('th')), len(tds))
if len(basic_table.find_all('th')) == 13 and len(tds) == 14:
tds.pop(4)
...
basic_obj = {sanitize(th.text):sanitize(td.text) for th, td in zip(basic_table.find_all('th'),tds)}
detail_obj = {}
for table in soup.find('div', {'class':'detail_specTable'}).find_all('table'):
#print(table)
for th, td in zip(table.find_all('th'), table.find_all('td')):
detail_obj[sanitize(th.text)] = sanitize(td.text)
obj = {'basic':basic_obj, 'detail':detail_obj, 'canonical':canonical, 'title':soup.title.text}
last_fn = fn.split('/')[-1]
shutil.move(fn, f'parsed_htmls/{last_fn}' )
with open(f'json/{last_fn}', 'w') as fp:
fp.write(json.dumps(obj, indent=2, ensure_ascii=False))
except Exception as ex:
#Path(fn).unlink()
print(ex)
#detail_table = soup.find('table', {'class':'bukken_detail_table'})
#detail_obj = {re.sub(r'\t', '', th.text):re.sub(r'(\t|\n)', '', td.text) for th, td in zip(detail_table.find_all('th'), detail_table.find_all('td'))}
#print(detail_obj)
#urls = [sha256(bytes(v, 'utf8')).hexdigest() for v in json.load(fp=open('./hash_url.json')).values()]
#fns = [f'./htmls/{url}' for url in urls]
import random
files = glob.glob('./htmls/*')
random.shuffle(files)
args = {}
for index, fn in enumerate(files):
key = index%8
if args.get(key) is None:
args[key] = []
args[key].append(fn)
args = [(key,fns) for key,fns in args.items()]
#[pmap(arg) for arg in args]
with PPE(max_workers=8) as exe:
exe.map(pmap, args)
| 2.390625 | 2 |
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py | ricardolsmendes/datacatalog-connectors-hive | 19 | 4852 | <filename>google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.datacatalog_connectors.apache_atlas import scrape
class MetadataScraper:
def __init__(self, connection_args):
self._apache_atlas_facade = scrape.apache_atlas_facade.\
ApacheAtlasFacade(connection_args)
self.__metadata_enricher = scrape.metadata_enricher.\
MetadataEnricher(self._apache_atlas_facade)
def get_metadata(self, **kwargs):
self._log_scrape_start('Scraping all Metadata...')
classifications_dict = {}
entity_types_dict = {}
enum_types_dict = {}
self._log_scrape_start('Scraping admin metrics...')
admin_metrics = self._apache_atlas_facade.get_admin_metrics()
logging.info(admin_metrics)
self._log_single_object_scrape_result(admin_metrics)
self._log_scrape_start('Scraping typedefs...')
for typedef in self._apache_atlas_facade.get_typedefs():
self._scrape_classification_types(classifications_dict, typedef)
self._scrape_enum_types(enum_types_dict, typedef)
self._scrape_entity_types(entity_types_dict, typedef)
self.__metadata_enricher.enrich_entity_relationships(entity_types_dict)
return {
'classifications': classifications_dict,
'enum_types': enum_types_dict,
'entity_types': entity_types_dict
}, None
def _scrape_entity_types(self, entity_types_dict, typedef):
self._log_scrape_start('Scraping EntityTypes...')
for entity_type in typedef.entityDefs:
entity_type_name = entity_type.name
entity_type_dict = {
'name': entity_type_name,
'data': entity_type._data,
'superTypes': entity_type.superTypes,
'entities': {}
}
entities = self.__scrape_entity_type(entity_type)
entity_type_dict['entities'] = entities
entity_types_dict[entity_type_name] = entity_type_dict
def _scrape_classification_types(self, classifications_dict, typedef):
self._log_scrape_start('Scraping Classifications/Templates...')
for classification_type in typedef.classificationDefs:
classification_data = classification_type._data
logging.info('Classification: %s', classification_type.name)
logging.debug(classification_data)
classifications_dict[classification_type.name] = {
'name': classification_type.name,
'guid': classification_type.guid,
'data': classification_data
}
def _scrape_enum_types(self, enum_types_dict, typedef):
self._log_scrape_start('Scraping Enum types...')
for enum_type in typedef.enumDefs:
enum_data = enum_type._data
logging.info('Enum type: %s', enum_type.name)
logging.debug(enum_data)
enum_types_dict[enum_type.name] = {
'name': enum_type.name,
'guid': enum_type.guid,
'data': enum_data
}
def __scrape_entity_type(self, entity_type):
searched_entries = {}
entity_type_name = entity_type.name
logging.info('=> Entity Type: %s', entity_type_name)
logging.debug(entity_type._data)
search_results = self._apache_atlas_facade.\
search_entities_from_entity_type(entity_type_name)
guids = []
for entity in search_results:
# Collecting guids and storing entity to enricher data later on.
guid = entity.guid
guids.append(guid)
searched_entries[guid] = {'guid': guid, 'data': entity._data}
fetched_entities_dict = {}
if guids:
fetched_entities_dict = self._apache_atlas_facade.fetch_entities(
guids)
self.__metadata_enricher.enrich_entity_classifications(
fetched_entities_dict, searched_entries)
logging.info('Entity Type: %s scrapped!', entity_type_name)
logging.info('')
return fetched_entities_dict
@classmethod
def _log_scrape_start(cls, message, *args):
logging.info('')
logging.info(message, *args)
logging.info('-------------------------------------------------')
@classmethod
def _log_single_object_scrape_result(cls, the_object):
logging.info('Found!' if the_object else 'NOT found!')
| 1.929688 | 2 |
aviary/roost/data.py | sxie22/aviary | 0 | 4853 | import functools
import json
from os.path import abspath, dirname, exists, join
from typing import Dict, Sequence
import numpy as np
import pandas as pd
import torch
from pymatgen.core import Composition
from torch.utils.data import Dataset
class CompositionData(Dataset):
def __init__(
self,
df: pd.DataFrame,
task_dict: Dict[str, str],
elem_emb: str = "matscholar200",
inputs: Sequence[str] = ["composition"],
identifiers: Sequence[str] = ["material_id", "composition"],
):
"""Data class for Roost models.
Args:
df (pd.DataFrame): Pandas dataframe holding input and target values.
task_dict (dict[str, "regression" | "classification"]): Map from target names to task
type.
elem_emb (str, optional): One of "matscholar200", "cgcnn92", "megnet16", "onehot112" or
path to a file with custom embeddings. Defaults to "matscholar200".
inputs (list[str], optional): df column name holding material compositions.
Defaults to ["composition"].
identifiers (list, optional): df columns for distinguishing data points. Will be
copied over into the model's output CSV. Defaults to ["material_id", "composition"].
"""
assert len(identifiers) == 2, "Two identifiers are required"
assert len(inputs) == 1, "One input column required are required"
self.inputs = inputs
self.task_dict = task_dict
self.identifiers = identifiers
self.df = df
if elem_emb in ["matscholar200", "cgcnn92", "megnet16", "onehot112"]:
elem_emb = join(
dirname(abspath(__file__)), f"../embeddings/element/{elem_emb}.json"
)
else:
assert exists(elem_emb), f"{elem_emb} does not exist!"
with open(elem_emb) as f:
self.elem_features = json.load(f)
self.elem_emb_len = len(list(self.elem_features.values())[0])
self.n_targets = []
for target, task in self.task_dict.items():
if task == "regression":
self.n_targets.append(1)
elif task == "classification":
n_classes = np.max(self.df[target].values) + 1
self.n_targets.append(n_classes)
def __len__(self):
return len(self.df)
@functools.lru_cache(maxsize=None) # Cache data for faster training
def __getitem__(self, idx):
"""[summary]
Args:
idx (int): dataset index
Raises:
AssertionError: [description]
ValueError: [description]
Returns:
atom_weights: torch.Tensor shape (M, 1)
weights of atoms in the material
atom_fea: torch.Tensor shape (M, n_fea)
features of atoms in the material
self_fea_idx: torch.Tensor shape (M*M, 1)
list of self indices
nbr_fea_idx: torch.Tensor shape (M*M, 1)
list of neighbor indices
target: torch.Tensor shape (1,)
target value for material
cry_id: torch.Tensor shape (1,)
input id for the material
"""
df_idx = self.df.iloc[idx]
composition = df_idx[self.inputs][0]
cry_ids = df_idx[self.identifiers].values
comp_dict = Composition(composition).get_el_amt_dict()
elements = list(comp_dict.keys())
weights = list(comp_dict.values())
weights = np.atleast_2d(weights).T / np.sum(weights)
try:
atom_fea = np.vstack([self.elem_features[element] for element in elements])
except AssertionError:
raise AssertionError(
f"cry-id {cry_ids[0]} [{composition}] contains element types not in embedding"
)
except ValueError:
raise ValueError(
f"cry-id {cry_ids[0]} [{composition}] composition cannot be parsed into elements"
)
nele = len(elements)
self_fea_idx = []
nbr_fea_idx = []
for i, _ in enumerate(elements):
self_fea_idx += [i] * nele
nbr_fea_idx += list(range(nele))
# convert all data to tensors
atom_weights = torch.Tensor(weights)
atom_fea = torch.Tensor(atom_fea)
self_fea_idx = torch.LongTensor(self_fea_idx)
nbr_fea_idx = torch.LongTensor(nbr_fea_idx)
targets = []
for target in self.task_dict:
if self.task_dict[target] == "regression":
targets.append(torch.Tensor([df_idx[target]]))
elif self.task_dict[target] == "classification":
targets.append(torch.LongTensor([df_idx[target]]))
return (
(atom_weights, atom_fea, self_fea_idx, nbr_fea_idx),
targets,
*cry_ids,
)
def collate_batch(dataset_list):
"""
Collate a list of data and return a batch for predicting crystal
properties.
Parameters
----------
dataset_list: list of tuples for each data point.
(atom_fea, nbr_fea, nbr_fea_idx, target)
atom_fea: torch.Tensor shape (n_i, atom_fea_len)
nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)
self_fea_idx: torch.LongTensor shape (n_i, M)
nbr_fea_idx: torch.LongTensor shape (n_i, M)
target: torch.Tensor shape (1, )
cif_id: str or int
Returns
-------
N = sum(n_i); N0 = sum(i)
batch_atom_weights: torch.Tensor shape (N, 1)
batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len)
Atom features from atom type
batch_self_fea_idx: torch.LongTensor shape (N, M)
Indices of mapping atom to copies of itself
batch_nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
target: torch.Tensor shape (N, 1)
Target value for prediction
batch_comps: list
batch_ids: list
"""
# define the lists
batch_atom_weights = []
batch_atom_fea = []
batch_self_fea_idx = []
batch_nbr_fea_idx = []
crystal_atom_idx = []
batch_targets = []
batch_cry_ids = []
cry_base_idx = 0
for i, (inputs, target, *cry_ids) in enumerate(dataset_list):
atom_weights, atom_fea, self_fea_idx, nbr_fea_idx = inputs
# number of atoms for this crystal
n_i = atom_fea.shape[0]
# batch the features together
batch_atom_weights.append(atom_weights)
batch_atom_fea.append(atom_fea)
# mappings from bonds to atoms
batch_self_fea_idx.append(self_fea_idx + cry_base_idx)
batch_nbr_fea_idx.append(nbr_fea_idx + cry_base_idx)
# mapping from atoms to crystals
crystal_atom_idx.append(torch.tensor([i] * n_i))
# batch the targets and ids
batch_targets.append(target)
batch_cry_ids.append(cry_ids)
# increment the id counter
cry_base_idx += n_i
return (
(
torch.cat(batch_atom_weights, dim=0),
torch.cat(batch_atom_fea, dim=0),
torch.cat(batch_self_fea_idx, dim=0),
torch.cat(batch_nbr_fea_idx, dim=0),
torch.cat(crystal_atom_idx),
),
tuple(torch.stack(b_target, dim=0) for b_target in zip(*batch_targets)),
*zip(*batch_cry_ids),
)
| 2.6875 | 3 |
tests/test_util.py | danqing/dqpy | 0 | 4854 | import unittest
from dq import util
class TestUtil(unittest.TestCase):
def test_safe_cast(self):
assert util.safe_cast('1', int) == 1
assert util.safe_cast('meow', int, 2) == 2
| 3.046875 | 3 |
check_perm.py | codecakes/random_games | 0 | 4855 | <reponame>codecakes/random_games
"""
PermCheck
Check whether array A is a permutation.
https://codility.com/demo/results/demoANZ7M2-GFU/
Task description
A non-empty zero-indexed array A consisting of N integers is given.
A permutation is a sequence containing each element from 1 to N once, and only once.
For example, array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
A[3] = 2
is a permutation, but array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
is not a permutation, because value 2 is missing.
The goal is to check whether array A is a permutation.
Write a function:
def solution(A)
that, given a zero-indexed array A, returns 1 if array A is a permutation and 0 if it is not.
For example, given array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
A[3] = 2
the function should return 1.
Given array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
the function should return 0.
Assume that:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [1..1,000,000,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
def solution(A):
# write your code in Python 2.7
s = set(A)
N_set = len(s) #O(n)
N = len(A)
if N != N_set: return 0
sum_N = N*(N+1)/2 #O(1)
sum_A = sum(A) #O(n)
return 1 if sum_N == sum_A else 0 | 3.71875 | 4 |
src/oci/log_analytics/models/log_analytics_association.py | Manny27nyc/oci-python-sdk | 249 | 4856 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsAssociation(object):
"""
LogAnalyticsAssociation
"""
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "ACCEPTED"
LIFE_CYCLE_STATE_ACCEPTED = "ACCEPTED"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "IN_PROGRESS"
LIFE_CYCLE_STATE_IN_PROGRESS = "IN_PROGRESS"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "SUCCEEDED"
LIFE_CYCLE_STATE_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the life_cycle_state property of a LogAnalyticsAssociation.
#: This constant has a value of "FAILED"
LIFE_CYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsAssociation object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param failure_message:
The value to assign to the failure_message property of this LogAnalyticsAssociation.
:type failure_message: str
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsAssociation.
:type agent_id: str
:param time_last_attempted:
The value to assign to the time_last_attempted property of this LogAnalyticsAssociation.
:type time_last_attempted: datetime
:param retry_count:
The value to assign to the retry_count property of this LogAnalyticsAssociation.
:type retry_count: int
:param source_name:
The value to assign to the source_name property of this LogAnalyticsAssociation.
:type source_name: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsAssociation.
:type source_display_name: str
:param source_type_name:
The value to assign to the source_type_name property of this LogAnalyticsAssociation.
:type source_type_name: str
:param life_cycle_state:
The value to assign to the life_cycle_state property of this LogAnalyticsAssociation.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type life_cycle_state: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsAssociation.
:type entity_id: str
:param entity_name:
The value to assign to the entity_name property of this LogAnalyticsAssociation.
:type entity_name: str
:param entity_type_name:
The value to assign to the entity_type_name property of this LogAnalyticsAssociation.
:type entity_type_name: str
:param host:
The value to assign to the host property of this LogAnalyticsAssociation.
:type host: str
:param agent_entity_name:
The value to assign to the agent_entity_name property of this LogAnalyticsAssociation.
:type agent_entity_name: str
:param entity_type_display_name:
The value to assign to the entity_type_display_name property of this LogAnalyticsAssociation.
:type entity_type_display_name: str
:param log_group_id:
The value to assign to the log_group_id property of this LogAnalyticsAssociation.
:type log_group_id: str
:param log_group_name:
The value to assign to the log_group_name property of this LogAnalyticsAssociation.
:type log_group_name: str
:param log_group_compartment:
The value to assign to the log_group_compartment property of this LogAnalyticsAssociation.
:type log_group_compartment: str
"""
self.swagger_types = {
'failure_message': 'str',
'agent_id': 'str',
'time_last_attempted': 'datetime',
'retry_count': 'int',
'source_name': 'str',
'source_display_name': 'str',
'source_type_name': 'str',
'life_cycle_state': 'str',
'entity_id': 'str',
'entity_name': 'str',
'entity_type_name': 'str',
'host': 'str',
'agent_entity_name': 'str',
'entity_type_display_name': 'str',
'log_group_id': 'str',
'log_group_name': 'str',
'log_group_compartment': 'str'
}
self.attribute_map = {
'failure_message': 'failureMessage',
'agent_id': 'agentId',
'time_last_attempted': 'timeLastAttempted',
'retry_count': 'retryCount',
'source_name': 'sourceName',
'source_display_name': 'sourceDisplayName',
'source_type_name': 'sourceTypeName',
'life_cycle_state': 'lifeCycleState',
'entity_id': 'entityId',
'entity_name': 'entityName',
'entity_type_name': 'entityTypeName',
'host': 'host',
'agent_entity_name': 'agentEntityName',
'entity_type_display_name': 'entityTypeDisplayName',
'log_group_id': 'logGroupId',
'log_group_name': 'logGroupName',
'log_group_compartment': 'logGroupCompartment'
}
self._failure_message = None
self._agent_id = None
self._time_last_attempted = None
self._retry_count = None
self._source_name = None
self._source_display_name = None
self._source_type_name = None
self._life_cycle_state = None
self._entity_id = None
self._entity_name = None
self._entity_type_name = None
self._host = None
self._agent_entity_name = None
self._entity_type_display_name = None
self._log_group_id = None
self._log_group_name = None
self._log_group_compartment = None
@property
def failure_message(self):
"""
Gets the failure_message of this LogAnalyticsAssociation.
The failure message.
:return: The failure_message of this LogAnalyticsAssociation.
:rtype: str
"""
return self._failure_message
@failure_message.setter
def failure_message(self, failure_message):
"""
Sets the failure_message of this LogAnalyticsAssociation.
The failure message.
:param failure_message: The failure_message of this LogAnalyticsAssociation.
:type: str
"""
self._failure_message = failure_message
@property
def agent_id(self):
"""
Gets the agent_id of this LogAnalyticsAssociation.
The agent unique identifier.
:return: The agent_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""
Sets the agent_id of this LogAnalyticsAssociation.
The agent unique identifier.
:param agent_id: The agent_id of this LogAnalyticsAssociation.
:type: str
"""
self._agent_id = agent_id
@property
def time_last_attempted(self):
"""
Gets the time_last_attempted of this LogAnalyticsAssociation.
The last attempt date.
:return: The time_last_attempted of this LogAnalyticsAssociation.
:rtype: datetime
"""
return self._time_last_attempted
@time_last_attempted.setter
def time_last_attempted(self, time_last_attempted):
"""
Sets the time_last_attempted of this LogAnalyticsAssociation.
The last attempt date.
:param time_last_attempted: The time_last_attempted of this LogAnalyticsAssociation.
:type: datetime
"""
self._time_last_attempted = time_last_attempted
@property
def retry_count(self):
"""
Gets the retry_count of this LogAnalyticsAssociation.
The number of times the association will be attempted
before failing.
:return: The retry_count of this LogAnalyticsAssociation.
:rtype: int
"""
return self._retry_count
@retry_count.setter
def retry_count(self, retry_count):
"""
Sets the retry_count of this LogAnalyticsAssociation.
The number of times the association will be attempted
before failing.
:param retry_count: The retry_count of this LogAnalyticsAssociation.
:type: int
"""
self._retry_count = retry_count
@property
def source_name(self):
"""
Gets the source_name of this LogAnalyticsAssociation.
The source name.
:return: The source_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_name
@source_name.setter
def source_name(self, source_name):
"""
Sets the source_name of this LogAnalyticsAssociation.
The source name.
:param source_name: The source_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_name = source_name
@property
def source_display_name(self):
"""
Gets the source_display_name of this LogAnalyticsAssociation.
The source display name.
:return: The source_display_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_display_name
@source_display_name.setter
def source_display_name(self, source_display_name):
"""
Sets the source_display_name of this LogAnalyticsAssociation.
The source display name.
:param source_display_name: The source_display_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_display_name = source_display_name
@property
def source_type_name(self):
"""
Gets the source_type_name of this LogAnalyticsAssociation.
The source type internal name.
:return: The source_type_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._source_type_name
@source_type_name.setter
def source_type_name(self, source_type_name):
"""
Sets the source_type_name of this LogAnalyticsAssociation.
The source type internal name.
:param source_type_name: The source_type_name of this LogAnalyticsAssociation.
:type: str
"""
self._source_type_name = source_type_name
@property
def life_cycle_state(self):
"""
Gets the life_cycle_state of this LogAnalyticsAssociation.
The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED
or FAILED.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The life_cycle_state of this LogAnalyticsAssociation.
:rtype: str
"""
return self._life_cycle_state
@life_cycle_state.setter
def life_cycle_state(self, life_cycle_state):
"""
Sets the life_cycle_state of this LogAnalyticsAssociation.
The lifecycle status. Valid values are ACCEPTED, IN_PROGRESS, SUCCEEDED
or FAILED.
:param life_cycle_state: The life_cycle_state of this LogAnalyticsAssociation.
:type: str
"""
allowed_values = ["ACCEPTED", "IN_PROGRESS", "SUCCEEDED", "FAILED"]
if not value_allowed_none_or_none_sentinel(life_cycle_state, allowed_values):
life_cycle_state = 'UNKNOWN_ENUM_VALUE'
self._life_cycle_state = life_cycle_state
@property
def entity_id(self):
"""
Gets the entity_id of this LogAnalyticsAssociation.
The entity unique identifier.
:return: The entity_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this LogAnalyticsAssociation.
The entity unique identifier.
:param entity_id: The entity_id of this LogAnalyticsAssociation.
:type: str
"""
self._entity_id = entity_id
@property
def entity_name(self):
"""
Gets the entity_name of this LogAnalyticsAssociation.
The entity name.
:return: The entity_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_name
@entity_name.setter
def entity_name(self, entity_name):
"""
Sets the entity_name of this LogAnalyticsAssociation.
The entity name.
:param entity_name: The entity_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_name = entity_name
@property
def entity_type_name(self):
"""
Gets the entity_type_name of this LogAnalyticsAssociation.
The entity type internal name.
:return: The entity_type_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_type_name
@entity_type_name.setter
def entity_type_name(self, entity_type_name):
"""
Sets the entity_type_name of this LogAnalyticsAssociation.
The entity type internal name.
:param entity_type_name: The entity_type_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_type_name = entity_type_name
@property
def host(self):
"""
Gets the host of this LogAnalyticsAssociation.
The host name.
:return: The host of this LogAnalyticsAssociation.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this LogAnalyticsAssociation.
The host name.
:param host: The host of this LogAnalyticsAssociation.
:type: str
"""
self._host = host
@property
def agent_entity_name(self):
"""
Gets the agent_entity_name of this LogAnalyticsAssociation.
The name of the entity which contains the agent.
:return: The agent_entity_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._agent_entity_name
@agent_entity_name.setter
def agent_entity_name(self, agent_entity_name):
"""
Sets the agent_entity_name of this LogAnalyticsAssociation.
The name of the entity which contains the agent.
:param agent_entity_name: The agent_entity_name of this LogAnalyticsAssociation.
:type: str
"""
self._agent_entity_name = agent_entity_name
@property
def entity_type_display_name(self):
"""
Gets the entity_type_display_name of this LogAnalyticsAssociation.
The entity type display name.
:return: The entity_type_display_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._entity_type_display_name
@entity_type_display_name.setter
def entity_type_display_name(self, entity_type_display_name):
"""
Sets the entity_type_display_name of this LogAnalyticsAssociation.
The entity type display name.
:param entity_type_display_name: The entity_type_display_name of this LogAnalyticsAssociation.
:type: str
"""
self._entity_type_display_name = entity_type_display_name
@property
def log_group_id(self):
"""
Gets the log_group_id of this LogAnalyticsAssociation.
The log group unique identifier.
:return: The log_group_id of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_id
@log_group_id.setter
def log_group_id(self, log_group_id):
"""
Sets the log_group_id of this LogAnalyticsAssociation.
The log group unique identifier.
:param log_group_id: The log_group_id of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_id = log_group_id
@property
def log_group_name(self):
"""
Gets the log_group_name of this LogAnalyticsAssociation.
The log group name.
:return: The log_group_name of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_name
@log_group_name.setter
def log_group_name(self, log_group_name):
"""
Sets the log_group_name of this LogAnalyticsAssociation.
The log group name.
:param log_group_name: The log_group_name of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_name = log_group_name
@property
def log_group_compartment(self):
"""
Gets the log_group_compartment of this LogAnalyticsAssociation.
The log group compartment.
:return: The log_group_compartment of this LogAnalyticsAssociation.
:rtype: str
"""
return self._log_group_compartment
@log_group_compartment.setter
def log_group_compartment(self, log_group_compartment):
"""
Sets the log_group_compartment of this LogAnalyticsAssociation.
The log group compartment.
:param log_group_compartment: The log_group_compartment of this LogAnalyticsAssociation.
:type: str
"""
self._log_group_compartment = log_group_compartment
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.09375 | 2 |
symblic_game/NEW_GAME.py | zishanqin/Symbolic-transfer | 3 | 4857 | <reponame>zishanqin/Symbolic-transfer
'Author: <NAME>'
'''email: <EMAIL>'''
# -------------------------------------------------------------------------------------------------- #
# This code can run 4 different models of Reinforcement Learning:
# Q-Learning (QL), DQN, SRL (DSRL), SRL+CS(DSRL_object_near) and some other variations of SRL
# The setting for each run can be set at the end of the code
# It can load and save the models in Excel form
# There are some pre-defined environments, but you can create your own
# Press G to get intermediate Graphs and P to stop
# -------------------------------------------------------------------------------------------------- #
import Class
import pprint
import random
import sys
import numpy as np
import pygame
# from pyglet import clock
import pandas as pd
import time
import json
from time import sleep
import math
import matplotlib.pyplot as plt
import os
import glob
## Comment this part if not using DQN model:
# import keras
# from keras.models import Sequential
# from keras.layers import Dense, Activation, Flatten
# from keras.models import model_from_json
# from keras.optimizers import sgd
# from keras.utils import plot_model
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
# set_session(tf.Session(config=config))
# ------ environments ------
# region COLOR DEFINITION
explore_set = set()
explore_dict = dict()
white = (255, 255, 255)
black = (0, 0, 0)
grey = (80, 80, 80)
red = (255, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
yellow = (250, 250, 0)
pink = (250, 105, 180)
# endregion
# region PANDAS DEFINITION
pd.set_option('display.max_columns', None)
pd.set_option('display.large_repr', 'info')
desired_width = 180
pd.set_option('display.width', desired_width)
pd.set_option('precision', 4)
# endregion
np.random.seed(123) # For reproducibility
pygame.init() # Pygame initialialization
pp = pprint.PrettyPrinter(indent=4)
actions = ['up', 'down', 'right', 'left']
actions_dict = {'up':0, 'down':1, 'right':2, 'left':3}
p_keys = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d]
# clock.tick(20)
def pop(self):
'''Removes a layer instance on top of the layer stack.
'''
while self.outputs:
self.layers.pop()
if not self.layers:
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[-1].outbound_nodes = []
self.outputs = [self.layers[-1].output]
self.built = False
# region REWARDS
negative_reward = 5 # Negative Reward
positive_reward = 1 # Positive Reward
step_reward = 0 # Reward received by each step
# endregion
# ------ environments configuration (till line 640) ------
# region TEXT FONTS DEFINITION
smallfont = pygame.font.SysFont('comicsansms', 13)
smallfont_act = pygame.font.SysFont('arial', 13)
mediumfont_act = pygame.font.SysFont('arial', 18, bold=True)
pygame.font.init()
# endregion
# region DISPLAY FUNCTIONS
def show_Alg(alg, screen):
text = smallfont.render("Alg: " + alg, True, black)
screen.blit(text, [5 + 90 * 0, 0])
def show_Samples(sample, screen):
text = smallfont.render("Sample: " + str(sample), True, black)
screen.blit(text, [60+100*1, 0])
def show_Level(level, screen):
text = smallfont.render("Episode: " + str(level), True, black)
screen.blit(text, [50+100*2, 0])
def show_Score(score, screen):
text = smallfont.render("Score: " + str(score), True, black)
screen.blit(text, [50+100*3, 0])
def show_Steps(steps, screen):
text = smallfont.render("Steps: " + str(steps), True, black)
screen.blit(text, [50+100*4, 0])
def show_Percent(percent, screen):
text = smallfont.render("Percent: " + str(['%.2f' % elem for elem in percent]), True, black)
screen.blit(text, [5, 30 * 4])
def show_Steps_list(steps_list, screen):
text = smallfont.render("Steps_list: " + str(steps_list), True, black)
screen.blit(text, [5, 30 * 1])
def show_Act_List(act_list, screen):
text = smallfont_act.render("act_list: " + str(act_list), True, black)
screen.blit(text, [5, 30 * 2])
def show_Action(act, screen):
text = smallfont_act.render("Chosen Action: " + act, True, black)
screen.blit(text, [5, 30 * 3])
def show_Env(env, screen):
text = mediumfont_act.render("Environment: " + str(env), True, black)
screen.blit(text, [50, 30 * 5])
# endregion
# region CREATE OBJ_LIST FROM STATE AND RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS
''' CREATE obj_list - FROM env '''
def create_obj_list(env):
obj_list_fun = []
tp_list = []
loc_list = []
env = env.transpose()
h_max = env.shape[0]
# print("h_max", h_max)
v_max = env.shape[1]
# print("v_max",v_max)
for h in range(1, (h_max - 1)):
for v in range(1, (v_max - 1)):
if env[h][v] != 0:
tp_list.append(env[h][v])
loc_list.append((h, v))
for i in range(len(loc_list)):
tp = tp_list[i]
loc = loc_list[i]
obj = Class.Obj(tp, loc)
obj_list_fun.append(obj)
return obj_list_fun
''' CREATE A RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS - FROM obj_list '''
def relation_obj_list(obj_list, agent_pos):
rel_list = []
xA = agent_pos[0]
yA = agent_pos[1]
# print("xA", xA)
# print("yA", yA)
for obj in obj_list:
xB = obj.loc[0]
yB = obj.loc[1]
x = xA - xB
y = yA - yB
loc_dif = (x, y)
# loc_dif = (x[0], y[0])
tp = obj.tp
obj = Class.Obj(tp, loc_dif)
rel_list.append(obj)
return rel_list
# endregion
# region DRAW OBJECTS
x_zero_screen = 50
y_zero_screen = 180
size_obj = 37
def draw_objects(agent, positivo_list, negativo_list, wall_list, screen):
# Class.Grid.draw_grid(screen) # Uncomment to display a Grid
for i in positivo_list: # POSITIVO
screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj))
for i in negativo_list: # NEGATIVO
screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj))
screen.blit(agent.icon, (agent.pos[0] * size_obj + x_zero_screen, y_zero_screen + agent.pos[1] * size_obj)) # AGENT
for i in wall_list: # WALL
screen.blit(i.icon, (i.pos[0] * size_obj + x_zero_screen, y_zero_screen + i.pos[1] * size_obj))
# endregion
# region CREATE THE STATE FROM THE ENVIRONMENT
def update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list):
# state is defined in terms of symbols not pixels...
state = np.zeros((v_max, h_max)).astype(np.int16)
for i in positivo_list:
state[i.pos[1]][i.pos[0]] = 60 # SYMBOL 60 POSITIVE
for i in negativo_list:
state[i.pos[1]][i.pos[0]] = 180 # SYMBOL 180 NEGATIVE
for i in wall_list:
state[i.pos[1]][i.pos[0]] = 255 # SYMBOL 255
# state[agent.pos[1]][agent.pos[0]] = 120 # SYMBOL 60
return state
# TODO I have to check if this v_max and h_max have to be declared eveytime
# endregion
# region ENVIRONMENT CONFIGURATION
def environment_conf(s_env):
if s_env == 1:
v_max = 4
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 1, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0]])
elif s_env == 2:
v_max = 4
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 1]])
m_posi = np.matrix([[0, 0, 1],
[0, 0, 0]])
elif s_env == 3:
v_max = 4
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[1, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0]])
elif s_env == 4:
v_max = 4
h_max = 4
x_agent = 1
y_agent = 1
m_nega = np.matrix([[0, 0],
[0, 0]])
m_posi = np.matrix([[0, 0],
[0, 1]])
elif s_env == 5:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.zeros(shape=(v_max - 2, h_max - 2))
m_posi = np.zeros(shape=(v_max - 2, h_max - 2))
while (True):
x = random.randrange(0, h_max - 2)
y = random.randrange(0, v_max - 2)
if x != x_agent-1 or y != y_agent-1:
element = (x, y)
break
m_posi[element] = 1
elif s_env == 6:
v_max = 7
h_max = 7
x_agent = 3
y_agent = 3
m_nega = np.zeros(shape=(v_max - 2, h_max - 2))
m_posi = np.zeros(shape=(v_max - 2, h_max - 2))
while (True):
x = random.randrange(0, h_max - 2)
y = random.randrange(0, v_max - 2)
if x != x_agent - 1 or y != y_agent - 1:
element = (x, y)
break
m_posi[element] = 1
elif s_env == 7:
v_max = 9
h_max = 9
x_agent = 4
y_agent = 4
m_nega = np.zeros(shape=(v_max - 2, h_max - 2))
m_posi = np.zeros(shape=(v_max - 2, h_max - 2))
while (True):
x = random.randrange(0, h_max - 2)
y = random.randrange(0, v_max - 2)
if x != x_agent - 1 or y != y_agent - 1:
element = (x, y)
break
m_posi[element] = 1
elif s_env == 8:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 0],
[1, 0, 1]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[0, 0, 0]])
elif s_env == 9:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[1, 0, 0],
[0, 0, 0],
[0, 0, 1]])
m_posi = np.matrix([[0, 0, 1],
[0, 0, 0],
[1, 0, 0]])
elif s_env == 10:
v_max = 9
h_max = 9
x_agent = 4
y_agent = 4
m_nega = np.matrix([[1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1]])
m_posi = np.matrix([[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0]])
elif s_env == 11:
v_max = 9
h_max = 9
x_agent = 4
y_agent = 4
element_list = []
for n in range(14):
while(True):
x = random.randrange(0,7)
y = random.randrange(0,7)
if x != 3 and y != 3 and (x,y) not in element_list:
element = (x, y)
break
element_list.append(element)
m_nega = np.zeros(shape=(v_max-2, h_max-2))
m_posi = np.zeros(shape=(v_max-2, h_max-2))
half = len(element_list) / 2
nega_list = element_list[:int(half)]
posi_list = element_list[int(half):]
for ele in nega_list:
m_nega[ele] = 1
for ele in posi_list:
m_posi[ele] = 1
elif s_env == 12:
v_max = 3
h_max = 5
x_agent = 2
y_agent = 1
m_nega = np.matrix([1, 0, 0])
m_posi = np.matrix([0, 0, 1])
elif s_env == 13:
v_max = 3
h_max = 5
x_agent = 2
y_agent = 1
m_nega = np.matrix([0, 0, 0])
m_posi = np.matrix([1, 0, 1])
elif s_env == 14:
v_max = 3
h_max = 6
x_agent = 2
y_agent = 1
m_nega = np.matrix([1, 0, 0, 0])
m_posi = np.matrix([0, 0, 0, 1])
elif s_env == 15:
v_max = 3
h_max = 6
x_agent = 2
y_agent = 1
m_nega = np.matrix([0, 0, 0, 0])
m_posi = np.matrix([1, 0, 0, 1])
elif s_env == 16:
v_max = 3
h_max = 7
x_agent = 3
y_agent = 1
m_nega = np.matrix([1, 0, 0, 0, 0])
m_posi = np.matrix([0, 0, 0, 0, 1])
elif s_env == 17:
v_max = 3
h_max = 7
x_agent = 3
y_agent = 1
m_nega = np.matrix([0, 0, 0, 0, 0])
m_posi = np.matrix([1, 0, 0, 0, 1])
elif s_env == 18:
v_max = 3
h_max = 9
x_agent = 4
y_agent = 1
m_nega = np.matrix([1, 0, 0, 0, 0, 0, 0])
m_posi = np.matrix([0, 0, 0, 0, 0, 0, 1])
elif s_env == 19:
v_max = 3
h_max = 9
x_agent = 4
y_agent = 1
m_nega = np.matrix([0, 0, 0, 0, 0, 0, 0])
m_posi = np.matrix([1, 0, 0, 0, 0, 0, 1])
elif s_env == 20:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[0, 1, 0]])
elif s_env == 21:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 1, 0],
[0, 0, 0],
[1, 0, 1]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[0, 1, 0]])
elif s_env == 22:
v_max = 5
h_max = 5
x_agent = 2
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[1, 0, 1],
[0, 0, 0],
[1, 0, 1]])
if s_env == 31:
v_max = 5
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
elif s_env == 32:
v_max = 5
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[0, 0, 0],
[0, 0, 1],
[0, 0, 0]])
m_posi = np.matrix([[0, 0, 1],
[0, 0, 0],
[0, 0, 0]])
elif s_env == 33:
v_max = 5
h_max = 5
x_agent = 1
y_agent = 2
m_nega = np.matrix([[1, 0, 0],
[0, 0, 0],
[0, 0, 0]])
m_posi = np.matrix([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
else:
pass
"INSTANCE THE wall_list"
wall_list = []
for y in range(v_max):
for x in range(h_max):
if y == v_max - 1 or y == 0 or x == h_max - 1 or x == 0:
wall = Class.Wall('wall', x, y)
wall_list.append(wall)
"INSTANCE THE AGENT"
agent = Class.Agent('agent', x_agent, y_agent)
"INSTANCE POSITIVE OBJECTS"
positivo_list = []
for x in range(m_posi.shape[0]):
for y in range(m_posi.shape[1]):
if m_posi[x, y] == 1:
positivo = Class.Positivo('positivo', y + 1, x + 1)
positivo_list.append(positivo)
"INSTANCE NEGATIVE OBJECTS"
negativo_list = []
for x in range(m_nega.shape[0]):
for y in range(m_nega.shape[1]):
if m_nega[x, y] == 1:
negativo = Class.Negativo('negativo', y + 1, x + 1)
negativo_list.append(negativo)
return negativo_list, positivo_list, agent, wall_list, h_max, v_max
# endregion
# region SAVE - LOAD - CREATE
def save_model(model, path):
model.save_weights(path + ".h5", overwrite=True)
with open(path + ".json", "w") as outfile:
json.dump(model.to_json(), outfile)
def load_model(s_alg, path):
optimizer_config = []
print(path)
if s_alg == "QL":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model")
elif s_alg == "DSRL":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist_type":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist_type_near":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_dist_type_near_propNeg":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_object_near":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0,1])
elif s_alg == "DSRL_object":
path = path + ".xlsx"
model = pd.read_excel(path, sheetname="model", header=[0], index_col=[0, 1])
elif s_alg == "DQN":
with open(path + ".json", "r") as jfile:
model = model_from_json(json.load(jfile))
model.load_weights(path + ".h5")
conf = pd.read_excel(path + ".xlsx", sheetname="Run_Conf", header=[0])
# net_conf = conf.loc[[16:20],:]
# print("net_conf", net_conf)
optimizer = conf.loc[19, "A"]
print("op_conf ", optimizer)
# pd.Series({'N_actions': net_conf["N_actions"]}),
# pd.Series({'Max_memory': net_conf["Max_memory"]}),
# pd.Series({'Hidden_size': net_conf["Hidden_size"]}),
# pd.Series({'Batch_size': net_conf["Batch_size"]}),
# pd.Series({'Optimizer': net_conf["Optimizer"]}),
# pd.Series({'lr': op_conf[0]}),
# pd.Series({'beta_1': op_conf[1]}),
# pd.Series({'beta_2': op_conf[2]}),
# pd.Series({'epsilon': op_conf[3]}),
# pd.Series({'decay': op_conf[4]}),
# pd.Series({'rho': op_conf[5]})
use_optimizer, optimizer_config = define_optimizer(optimizer)
model.compile(loss='mse', optimizer=use_optimizer)
model.summary()
# pass
return model, optimizer_config
def create_model(s_alg, state_shape, net_conf):
optimizer_config = []
if s_alg == "QL":
model = pd.DataFrame()
model.index.name = ["States", "Action"]
elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
m_index = pd.MultiIndex(levels=[[''], [""]],
labels=[[], []],
names=['state', 'actions'])
model = pd.DataFrame(index=m_index)
elif s_alg == "DQN":
model = Sequential()
pop(model)
model = Sequential()
model.add(Dense(net_conf["Hidden_size"],
input_dim=state_shape[0]*state_shape[1],
activation="relu",
name="DENSE_1"))
model.add(Dense(net_conf["Hidden_size"],
activation='relu',
name="DENSE_2"))
model.add(Dense(net_conf["N_actions"],
name="DENSE_3"))
use_optimizer, optimizer_config = define_optimizer(net_conf["Optimizer"])
model.compile(loss='mse', optimizer=use_optimizer)
print(model.summary())
# plot_model(model, to_file='model.png')
# d3v.d3viz(model.get_output(), 'test.html')
return model, optimizer_config
# endregion
# ------ RL algorithms (till line 1030) ------
# region DQN - CONFIGURATIONS
class ExperienceReplay(object):
"""
During gameplay all the experiences < s, a, r, s’ > are stored in a replay memory.
In training, batches of randomly drawn experiences are used to generate the input and target for training.
"""
def __init__(self, max_memory=100, discount=.9):
"""
Setup
max_memory: the maximum number of experiences we want to store
memory: a list of experiences
discount: the discount factor for future experience
In the memory the information whether the game ended at the state is stored seperately in a nested array
[...
[experience, game_over]
[experience, game_over]
...]
"""
self.max_memory = max_memory
self.memory = list()
self.discount = discount
def remember(self, states, game_over):
# Save a state to memory
self.memory.append([states, game_over])
# We don't want to store infinite memories, so if we have too many, we just delete the oldest one
if len(self.memory) > self.max_memory:
del self.memory[0]
# print(">>> states:", states)
def get_batch(self, model, batch_size=10):
# How many experiences do we have?
len_memory = len(self.memory)
# Calculate the number of actions that can possibly be taken in the game
num_actions = model.output_shape[-1]
# Dimensions of the game field
env_dim = self.memory[0][0][0].shape[1]
# We want to return an input and target vector with inputs from an observed state...
inputs = np.zeros((min(len_memory, batch_size), env_dim))
# ...and the target r + gamma * max Q(s’,a’)
# Note that our target is a matrix, with possible fields not only for the action taken but also for the other possible actions.
# The actions not take the same value as the prediction to not affect them
targets = np.zeros((inputs.shape[0], num_actions))
# We draw states to learn from randomly
for i, idx in enumerate(np.random.randint(0, len_memory, size=inputs.shape[0])):
"""
Here we load one transition <s, a, r, s’> from memory
state_t: initial state s
action_t: action taken a
reward_t: reward earned r
state_tp1: the state that followed s’
"""
state_t, action_t, reward_t, state_tp1 = self.memory[idx][0]
# We also need to know whether the game ended at this state
game_over = self.memory[idx][1]
inputs[i:i + 1] = state_t
# First we fill the target values with the predictions of the model.
# They will not be affected by training (since the training loss for them is 0)
targets[i] = model.predict(state_t)[0]
# print("targets\n", targets)
# print("action_t", action_t)
"""
If the game ended, the expected reward Q(s,a) should be the final reward r.
Otherwise the target value is r + gamma * max Q(s’,a’)
"""
# Here Q_sa is max_a'Q(s', a')
Q_sa = np.max(model.predict(state_tp1)[0])
# if the game ended, the reward is the final reward
if game_over: # if game_over is True
targets[i, action_t] = reward_t
else:
# r + gamma * max Q(s’,a’)
targets[i, action_t] = reward_t + self.discount * Q_sa
return inputs, targets
def define_optimizer(s_optimizer):
lr = 0
beta_1 = 0
beta_2 = 0
epsilon = 0
decay = 0
rho = 0
if s_optimizer == "adam":
lr = 0.001 # 0.001
beta_1 = 0.9 # 0.9
beta_2 = 0.999 # 0.999
epsilon = 1e-08 # 1e-08
decay = 0.0 # 0.0
optimizer_selected = keras.optimizers.Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon, decay=decay)
elif s_optimizer == "rms_opt":
lr = 0.001 # 0.001
rho = 0.9 # 0.9
epsilon = 1e-08 # e-08
decay = 0.0 # 0.0
optimizer_selected = keras.optimizers.RMSprop(lr=lr, rho=rho, epsilon=epsilon, decay=decay)
optimizer_config = [lr, beta_1, beta_2, epsilon, decay, rho]
return optimizer_selected, optimizer_config
#
def choose_action(s_alg, state, agent_pos, model, s_prob,step):
# print("\nPREVIOUS MODEL - CHOOSE ACTION\n", model)
zero = False
if s_alg == "QL":
state[agent_pos[1]][agent_pos[0]] = 120
s = str(state)
if s not in model.index:
indices = [np.array([s, s, s, s]), np.array(['up', 'down', 'right', 'left'])]
df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices)
model = model.append(df_zero)
model = model.fillna(0)
n_action = np.argmax(model.loc[s][0]) # Choose the max argument
if max(model.loc[s][0]) == 0: zero = True
elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
a_v_list = []
d = {}
obj_list = create_obj_list(state)
rel_list = relation_obj_list(obj_list, agent_pos)
new_state = rel_list
for obj in new_state: # FOR ALL OBJECTS SEEN
tp_n_c = str(obj.tp) # GET THE TYPE FROM THE NEW STATE
s_n_c = str(obj.loc) # GET THE LOCATION FROM THE NEW STATE
if tp_n_c not in model.columns:
# print("tp_n_c not in model.columns", tp_n_c)
model[tp_n_c] = 0
if s_n_c not in model.index:
# print("s_n_c not in model.index", s_n_c)
m_index = pd.MultiIndex(levels=[[s_n_c], actions],
labels=[[0, 0, 0, 0], [0, 1, 2, 3]],
names=['state', 'actions'])
df_zero = pd.DataFrame(index=m_index)
model = model.append(df_zero)
model = model.fillna(0)
Qts_a = model[tp_n_c].loc[s_n_c]
# print("Qts_a - ", Qts_a)
global explore_dict
if s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near": # Calculate the distance
s_n_c_abs = [int(s) for s in s_n_c if s.isdigit()] # s_n_c_abs = state_new_absolute_distance
distance = np.sqrt(s_n_c_abs[0]**2 + s_n_c_abs[1]**2)
# print("distance",distance)
Qts_a = Qts_a.divide(distance*distance, axis=0)
a_v = []
for action, value in Qts_a.items():
pos_x = agent_pos[0]
pos_y = agent_pos[1]
if action == 'up':
pos_y-=1
elif action =="down":
pos_y+=1
elif action =="right":
pos_x +=1
else:
pos_x -=1
if (pos_x, pos_y) in explore_dict:
a_v.append((action, value-0.1*explore_dict[(pos_x, pos_y)]))
else:
a_v.append((action, value))
# a_v = [(value, key) for value, key in Qts_a.items()]
# print("Qts_a - NEW", Qts_a)
a_v_list.append(a_v) # Append Q-value
# print(a_v_list)
# Sum the values of all Qs into a single Q
for element in a_v_list:
for a in element:
act = a[0] # Action
val = a[1] # Value
d[act] = d.get(act, 0) + val # Sum values for each Q
# print('a_v_list: (List of the action values for each object in the scene): ')
# print('{0}'.format(a_v_list))
# print('\nd: (The sum of all object`s action values )')
# pp.pprint(d)
if d != {}: # BE CAREFUL THIS IS A DICT (argmax does not work as usual)
inverse = [(value, key) for key, value in d.items()] # CALCULATE ALL KEYS
n_action = max(inverse)[1] # Choose the max argument
if max(d.values()) == 0: zero = True
else:
# n_action = "down"
n_action = random.choice(actions)
elif s_alg == "DQN":
state[agent_pos[1]][agent_pos[0]] = 120
state = state.reshape((1, -1))
q = model.predict(state)
n_act = np.argmax(q[0])
n_action = actions[n_act]
if max(q[0]) == 0: zero = True
x = random.random() # E greedy exploration
# if x < s_prob:
if step < 5 or x < s_prob:
n_action = random.choice(actions)
print_action = 'Random Act (Prob):'
elif zero == True:
# n_action = random.choice(actions)
print_action = 'NOT Random Act (Zero):'
pass
else:
print_action = 'Chosen Act:'
# print("\nNEW MODEL - CHOOSE ACTION\n", model)
# explore_set.add(tuple(agent_pos))
return n_action, model, print_action
alfa = 1 # Learning Rate
gamma = 0.9 # Temporal Discount Factor
def learn(s_alg, model, state_t, state_t1, agent_t_pos, agent_t1_pos, reward, action_t, end_game, net_conf, exp_replay):
# print("\nPREVIOUS MODEL - LEARN\n", model)
batch_loss = 0
if s_alg == "QL":
state_t[agent_t_pos[1]][agent_t_pos[0]] = 120
state_t1[agent_t1_pos[1]][agent_t1_pos[0]] = 120
s_t = str(state_t)
s_t1 = str(state_t1)
if s_t1 not in model.index:
indices = [np.array([s_t1, s_t1, s_t1, s_t1]), np.array(['up', 'down', 'right', 'left'])]
df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices)
model = model.append(df_zero)
if s_t not in model.index:
indices = [np.array([s_t, s_t, s_t, s_t]), np.array(['up', 'down', 'right', 'left'])]
df_zero = pd.DataFrame(np.zeros([4, 1]), index=indices)
model = model.append(df_zero)
model = model.fillna(0)
if end_game == False:
max_value = max(model.loc[s_t1][0]) # max(df.loc[new_state][0])
Q_value = model.loc[s_t, action_t][0]
updated_model = Q_value + alfa * (reward + (gamma * (max_value)) - Q_value)
else:
updated_model = reward
model.loc[s_t, action_t] = updated_model
elif s_alg == "DSRL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
max_value = 0
obj_list = create_obj_list(state_t)
rel_list = relation_obj_list(obj_list, agent_t_pos)
old_state = rel_list
obj_list = create_obj_list(state_t1)
rel_list = relation_obj_list(obj_list, agent_t1_pos)
new_state = rel_list
for i in range(len(old_state)):
# Check all items in old state
obj_prev = old_state[i]
tp_prev = str(obj_prev.tp)
s_prev = str(obj_prev.loc)
# Check all items in new state
obj_new = new_state[i]
tp_new = str(obj_new.tp)
s_new = str(obj_new.loc)
if tp_new not in model.columns: # If type is new, then add type
model[tp_new] = 0
if s_new not in model.index: # If state is new, then add state
m_index = pd.MultiIndex(levels=[[s_new], actions],
labels=[[0, 0, 0, 0], [0, 1, 2, 3]],
names=['state', 'actions'])
df_zero = pd.DataFrame(index=m_index)
model = model.append(df_zero)
model = model.fillna(0)
max_value = max(model[tp_new].loc[s_new])
if s_alg == "DSRL": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_dist": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL
if reward != 0:
s_p_c = [int(s) for s in s_prev if s.isdigit()]
if s_p_c[0] < 2 and s_p_c[1] < 2:
# EDITIONG DELETE
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
else:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near": # THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL
max_value_positive = max(model[tp_new].loc[s_new])
if reward != 0:
s_p_c = [int(s) for s in s_prev if s.isdigit()] # s_p_c = state_previous_absolute_distance
if s_p_c[0] < 2 and s_p_c[1] < 2: # IF IT IS CLOSE BY, THEN UPDATE ONLY THE CLOSE ONE:
if reward < 0 and tp_new == "180": # IF REWARD IS NEGATIVE and NEW OBJECT IS NEGATIVE UPDATE ONLY NEGATIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif reward > 0 and tp_new == "60": # IF REWARD IS POSITIVE and NEW OBJECT IS POSITIVE UPDATE ONLY POSITIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
# IF reward is zero
else:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
if tp_prev == "180": # IF THE PREVIOUS OBJECT WAS NEGATIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
elif tp_prev == "60": # IF THE PREVIOUS OBJECT WAS POSITIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_dist_type_near_propNeg": # I try to solve this with max and min, but it did not work very well(THEY STILL HAVE THE PROBLEM OF NOT PROPAGATING THE NEGATIVE SIGNAL)
max_value_positive = max(model[tp_new].loc[s_new])
min_value_negative = min(model[tp_new].loc[s_new])
if reward != 0:
s_p_c = [int(s) for s in s_prev if s.isdigit()] # s_p_c = state_previous_absolute_distance
if s_p_c[0] < 2 and s_p_c[1] < 2: # IF IT IS CLOSE BY, THEN UPDATE ONLY THE CLOSE ONE:
if reward < 0 and tp_new == "180": # IF REWARD IS NEGATIVE and NEW OBJECT IS NEGATIVE UPDATE ONLY NEGATIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * min_value_negative) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif reward > 0 and tp_new == "60": # IF REWARD IS POSITIVE and NEW OBJECT IS POSITIVE UPDATE ONLY POSITIVE TYPE:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
# IF reward is zero
else:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
if tp_prev == "180": # IF THE PREVIOUS OBJECT WAS NEGATIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * min_value_negative) - Q_v)
elif tp_prev == "60": # IF THE PREVIOUS OBJECT WAS POSITIVE
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
max_value_positive = max(model[tp_new].loc[s_new])
# Find the object that the agent interacted with:
# This means that the agents has to know that the object which interacted with
# After finding it, he has to assign the value to that object.
# This means that I have to find the type and the state of this object that has now x=zero y=zero
# print("obj_new.loc[0]\n", obj_new.loc[0])
# print("obj_new.loc[1]\n", obj_new.loc[1])
# print("action_t\n", action_t)
# print("s_prev\n", s_prev)
if obj_new.loc[0] == 0 and obj_new.loc[1] == 0:
tp_to_update = tp_new
# print("tp_new\n", tp_new)
if action_t == "up":
s_prev_to_update = str((0,1))
elif action_t == "down":
s_prev_to_update = str((0,-1))
elif action_t == "right":
s_prev_to_update = str((-1,0))
elif action_t == "left":
s_prev_to_update = str((1,0))
# print("s_prev_to_update\n", s_prev_to_update)
if end_game == False:
Q_v = model[tp_to_update].loc[s_prev_to_update, action_t]
model[tp_to_update].loc[s_prev_to_update, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_to_update].loc[s_prev_to_update, action_t] = reward
if reward == 0:
if end_game == False:
Q_v = model[tp_prev].loc[s_prev, action_t]
model[tp_prev].loc[s_prev, action_t] = Q_v + alfa * (reward + (gamma * max_value_positive) - Q_v)
else:
model[tp_prev].loc[s_prev, action_t] = reward
elif s_alg == "DQN":
state_t[agent_t_pos[1]][agent_t_pos[0]] = 120
state_t1[agent_t1_pos[1]][agent_t1_pos[0]] = 120
state_t = state_t.reshape((1, -1))
state_t1 = state_t1.reshape((1, -1))
action_t = actions_dict[action_t]
exp_replay.remember([state_t, action_t, reward, state_t1], end_game) # [old_state, old_action, reward, new_state]
inputs, targets = exp_replay.get_batch(model, batch_size=net_conf["Batch_size"])
batch_loss = model.train_on_batch(inputs, targets)
# print("\nNEW MODEL - LEARN\n", model)
return model, batch_loss, exp_replay
''' PROGRAM START '''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def run(s_env, s_alg, s_learn, s_load, s_print, s_auto, s_episode, s_cond_to_end, s_server, s_net_comb_param, s_load_path, s_prob, s_sample, s_save):
net_conf = {"N_actions": n_actions,
"Max_memory": max_memory_list[s_net_comb_param],
"Hidden_size": hidden_size_list[s_net_comb_param],
"Batch_size": batch_size_list[s_net_comb_param],
"Optimizer": optimizer_list[0]}
exp_replay = ExperienceReplay(max_memory=net_conf["Max_memory"])
begin = time.time()
begin_time = time.strftime('%X %x')
print("\n\n --- BEGINING --- s_sample: %s \n begin_time: %s \n" % (s_sample, begin_time))
df_score = pd.DataFrame()
df_percent_list = pd.DataFrame()
df_loss_list = pd.DataFrame()
df_time_sample = pd.DataFrame()
avg_last_score_list = []
if s_server == False: screen = pygame.display.set_mode((400 + 37 * 5, 330 + 37 * 5))
score_list_best = [0]
for sample in list(range(1, s_sample+1)):
experiment_configurations = (sample, s_env, s_alg, s_episode, s_learn, s_load, s_print, s_auto, s_cond_to_end, s_server, s_net_comb_param, s_prob)
print("\n - START - "
"\n sample: %s"
"\n s_env: %s"
"\n s_alg: %s"
"\n s_episode: %s"
"\n s_learn: %s"
"\n s_load: %s"
"\n s_print: %s"
"\n s_auto: %s"
"\n s_cond_to_end: %s"
"\n s_server: %s"
"\n s_net_comb_param: %s"
"\n s_prob: %s" % experiment_configurations)
start = time.time()
start_time = time.strftime('%X %x')
print("\nStart time: ", start_time)
negativo_list, positivo_list, agent, wall_list, h_max, v_max = environment_conf(s_env)
env_dim = [h_max, v_max]
# load file for transfer learning
if s_load == True:
try:
model, op_conf = load_model(s_alg, __location__ + s_load_path)
except Exception as e:
print("DID NOT FIND THE FILE", __location__ + s_load_path, str(e))
else:
model, op_conf = create_model(s_alg, env_dim, net_conf)
# region INITIALIZE VARIABLES 1
percent_list = []
score = 0
score_list = []
episodes = 0
episodes_list = []
steps = 0
steps_list = []
batch_loss = 0
loss_list = []
# endregion
# main component to an episode
while (episodes < s_episode): # max_episodes
negativo_list, positivo_list, agent, wall_list, h_max, v_max = environment_conf(s_env)
# region INITIALIZE VARIABLES 2
episodes += 1
episodes_list.append(episodes)
max_steps = 100
steps_list.append(steps)
steps = 0
act_list = []
last_move = False
action_chosen = ""
encountered = 0
pos_collected = 0
prob = s_prob
# endregion
if s_server == False:
# region DRAW SCREEN
screen.fill(white)
show_Alg(s_alg, screen)
show_Samples(sample, screen)
show_Level(episodes, screen)
show_Score(score, screen)
show_Steps(steps, screen)
show_Percent(percent_list[-10:], screen)
show_Steps_list(steps_list[-30:], screen)
show_Act_List(act_list[-20:], screen)
show_Action(action_chosen, screen)
show_Env(s_env, screen)
draw_objects(agent, positivo_list, negativo_list, wall_list, screen)
pygame.display.flip()
# endregion
# main reinforcement learning part
while (True): # max_steps or condition to finish
sleep(speed)
''' EVENT HANDLE '''
key_pressed = False
set_action = False
while (s_server == False):
for event in pygame.event.get():
# QUIT GAME
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
# ADD OR DELETE WALL
if event.type == pygame.MOUSEBUTTONDOWN:
pass
# if (pygame.mouse.get_pressed() == (1, 0, 0)): # LEFT BUTTON (add wall)
# pos = pygame.mouse.get_pos()
# x = (pos[0] - x_g) / (m + w)
# y = (pos[1] - y_g) / (m + h)
# x = math.trunc(x)
# y = math.trunc(y)
# w_has = False
# for item in wall_list:
# if math.trunc((item[0] - x_g) / (m + w)) == x and math.trunc(
# (item[1] - y_g) / (m + h)) == y:
# w_has = True
# if w_has == False:
# wall = Class.Wall('wall', x, y)
# print('wall ', wall, 'added')
# wall_list.append(wall)
# if (pygame.mouse.get_pressed() == (0, 0, 1)): # RIGHTBUTTON (delete wall)
# pos = pygame.mouse.get_pos()
# x = (pos[0] - x_g) / (m + w)
# y = (pos[1] - y_g) / (m + h)
# x = math.trunc(x)
# y = math.trunc(y)
# wall = Class.Wall('wall', x, y)
# for i in wall_list:
# if i == wall:
# wall_list.remove(wall)
# print('wall ', wall, 'removed')
# EVENT - ANY PRESSED KEY
# PRESS A KEY
if event.type == pygame.KEYDOWN:
# SAVE AND QUIT - KEY P
if event.key == pygame.K_p:
pygame.quit()
sys.exit()
# PLOT AGENT`S PERFORMENCE - KEY G
if event.key == pygame.K_g:
plt.plot(score_list)
plt.ylabel('Score')
plt.xlabel('Total Steps')
plt.title('Performance of the Agent')
plt.show()
plt.plot(percent_list)
plt.ylabel('Percentage of objects +')
plt.xlabel('Total Steps')
plt.title('Episode over 100 times step each')
plt.show()
if s_alg == "DQN":
plt.plot(loss_list)
plt.ylabel('loss')
plt.xlabel('Total Steps')
plt.title('batch_loss')
plt.show()
# MOVE - SPACE BAR
if event.key == pygame.K_SPACE:
key_pressed = True
break
# MOVE - ARROW KEYS
if event.key in p_keys:
key_pressed = True
set_action = True
if event.key == pygame.K_w: # North # add_act('↑') ⇦ ⇨ ⇧ ⇩
key_action = "up"
if event.key == pygame.K_s: # South # add_act('↓') ⬅ ➡ ⬆ ⬇
key_action = "down"
if event.key == pygame.K_d: # West # add_act('→')
key_action = "right"
if event.key == pygame.K_a: # East # add_act('←')
key_action = "left"
break
# Run game if key is preseed or automatic is selected
if key_pressed or s_auto:
break
# BREAK IF IT WAS THE LAST MOVE
if last_move == True:
break
# RUN_GAME
steps += 1
''' OLD STATE - S 1 - 1'''
state_t = update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list)
agent_t = agent.pos
''' CHOOSE ACTION - AGENT ACT - 2'''
action_chosen, model, print_action = choose_action(s_alg, state_t, agent_t, model, prob,steps)
if set_action: action_chosen = key_action
''' CHANGE THE WORLD - UP_ENV - 3'''
agent.try_move(action_chosen, wall_list)
act_list.append(action_chosen)
# if s_print: print(print_action, action_chosen)
''' NEW STATE - S2 - 4'''
state_t1 = update_state(h_max, v_max, agent, positivo_list, negativo_list, wall_list)
agent_t1 = agent.pos
global explore_set
global explore_dict
if s_print:
# print('\n>>>> Level: ' + str(episodes) + ' | Step: ' + str(
# steps) + ' | New_agent_pos: ' + str(agent.pos) + ' <<<<')
pos_tuple = tuple(agent.pos)
explore_set.add(pos_tuple)
if pos_tuple not in explore_dict:
explore_dict[pos_tuple] = 1
else:
explore_dict[pos_tuple] += 1
if steps==max_steps:
print("Number of explore node: "+str(len(explore_set)))
print("Explored Node postion: "+str(explore_dict))
explore_set = set()
explore_dict = dict()
''' GET REWARD - 5 '''
# region GET REWARD AND DELETE COLLECTED OBJECT
prev_score = score
score += step_reward
for positivo in positivo_list:
if agent.pos == positivo.pos:
encountered += 1
pos_collected += 1
score += positive_reward
positivo = Class.Positivo('positivo', agent.pos[0], agent.pos[1])
positivo_list.remove(positivo)
# if s_print == True and s_server == False:
# print(' Hit the Positivo')
for negativo in negativo_list:
if agent.pos == negativo.pos:
encountered += 1
score -= negative_reward
negativo = Class.Negativo('negativo', agent.pos[0], agent.pos[1])
negativo_list.remove(negativo)
# if s_print == True and s_server == False:
# print(' Hit the Negativo')
new_score = score
score_list.append(score)
reward = new_score - prev_score
# endregion
''' LEARN - 6 '''
# CONDITION TO FINISH THE Episode
if s_cond_to_end == 'max_steps':
if steps == max_steps:
last_move = True
elif s_cond_to_end == 'coll_all' or steps > max_steps:
if len(positivo_list) == 0 and len(negativo_list) == 0 or steps > max_steps:
last_move = True
elif s_cond_to_end == 'only_positive' or steps > max_steps:
if len(positivo_list) == 0 or steps > max_steps:
last_move = True
elif s_cond_to_end == 'only_negative' or steps > max_steps:
if len(negativo_list) == 0 or steps > max_steps:
last_move = True
# LEARN
if s_learn == True:
action_t = action_chosen
if last_move == False:
''' LEARN '''
model, batch_loss, exp_replay = learn(s_alg, model, state_t, state_t1, agent_t, agent_t1, reward, action_t, False, net_conf, exp_replay)
else:
''' LEARN FINAL '''
model, batch_loss, exp_replay = learn(s_alg, model, state_t, state_t1, agent_t, agent_t1, reward, action_t, True, net_conf, exp_replay)
if s_server == False:
# region DRAW SCREEN
screen.fill(white)
show_Alg(s_alg, screen)
show_Samples(sample, screen)
show_Level(episodes, screen)
show_Score(score, screen)
show_Steps(steps, screen)
show_Percent(percent_list[-10:], screen)
show_Steps_list(steps_list[-30:], screen)
show_Act_List(act_list[-20:], screen)
show_Action(action_chosen, screen)
show_Env(s_env, screen)
draw_objects(agent, positivo_list, negativo_list, wall_list, screen)
pygame.display.flip()
# endregion
try:
percent = pos_collected / encountered
except ZeroDivisionError:
percent = 0
percent_list.append(percent)
loss_list.append(batch_loss)
print("Episode: ", episodes)
# region TIME 1
print("Start time: ", start_time)
end = time.time()
end_time = time.strftime('%X %x')
print("End time: ", end_time)
time_elapsed = end - start
print("Time elapsed: ", time_elapsed)
# endregion
'''GET THE BEST MODEL'''
if max(score_list) > max(score_list_best):
best_model = model
score_list_best = score_list
# region MAKE LIST OF THE RESULTS
avg_last_score_list.append(score_list[-1])
score_list_df = pd.DataFrame({'Score': score_list})
percent_list_df = pd.DataFrame({'Percent': percent_list})
loss_list_df = pd.DataFrame({'Batch_loss': loss_list})
time_sample_df = pd.DataFrame({'Time': [time_elapsed]})
df_score = pd.concat([df_score, score_list_df], ignore_index=True, axis=1)
df_percent_list = pd.concat([df_percent_list, percent_list_df], ignore_index=True, axis=1)
df_loss_list = pd.concat([df_loss_list, loss_list_df], ignore_index=True, axis=1)
df_time_sample = pd.concat([df_time_sample, time_sample_df], ignore_index=True, axis=1)
# endregion
if s_save == True:
# region PATH TO SAVE
save_path_core = __location__ + "/Results/"
if s_learn == True: save_path = save_path_core + "Train/Env_" + str(s_env) + "/Train_Env_" + str(s_env) + "_" + s_alg
else: save_path = save_path_core + "Test/Env_" + str(s_env) + "/Test_Env_" + str(s_env) + "_" + s_alg
if s_alg == "DQN": save_path += "_" + str(s_net_comb_param)
# convert begin_time to string and format it
time_path = begin_time.replace(" ", " ")
time_path = time_path.replace(":", " ")
time_path = time_path.replace("/", "-")
# append to the save path
save_path = save_path + " " + time_path
if s_load == True:
load_path = " loaded_with " + s_load_path.replace("/", "_")
save_path = save_path + load_path
# If it doesnt find the path, then create a new path
if not os.path.exists(os.path.dirname(save_path)):
try:
os.makedirs(os.path.dirname(save_path))
except OSError as exc: # Guard against race condition
print("ERROR when saving the File")
# endregion
print("save_path: ", save_path)
# region SAVE ALL
# IF IT IS NOT DQN NULL NET CONF. VALUES
if s_alg != "DQN":
op_conf = [0, 0, 0, 0, 0, 0]
net_conf = {"N_actions":0, "Max_memory":0, "Hidden_size":0, "Batch_size":0, "Optimizer":"none"}
avg_last_score = np.average(avg_last_score_list)
config_list = pd.concat([pd.Series({'Run_Conf': "A"}),
pd.Series({'Env_conf': s_env}),
pd.Series({'Algort': s_alg}),
pd.Series({'Learn': s_learn}),
pd.Series({'Load': s_load}),
pd.Series({'Samples': s_sample}),
pd.Series({'Episode': s_episode}),
pd.Series({'Max_steps': max_steps}),
pd.Series({'s_cond_to_end': s_cond_to_end}),
pd.Series({'Auto': s_auto}),
pd.Series({'Server': s_server}),
pd.Series({'Print': s_print}),
pd.Series({'MODEL CONF': ""}),
pd.Series({'alfa': alfa}),
pd.Series({'gamma': gamma}),
pd.Series({'Prob': Prob}),
pd.Series({'N_actions': net_conf["N_actions"]}),
pd.Series({'Max_memory': net_conf["Max_memory"]}),
pd.Series({'Hidden_size': net_conf["Hidden_size"]}),
pd.Series({'Batch_size': net_conf["Batch_size"]}),
pd.Series({'Optimizer': net_conf["Optimizer"]}),
pd.Series({'lr': op_conf[0]}),
pd.Series({'beta_1': op_conf[1]}),
pd.Series({'beta_2': op_conf[2]}),
pd.Series({'epsilon': op_conf[3]}),
pd.Series({'decay': op_conf[4]}),
pd.Series({'rho': op_conf[5]}),
pd.Series({'': ""}),
pd.Series({'AVG SCORE': avg_last_score})])
config_list = config_list.to_frame()
if s_print: print("\nconfig_list:\n", config_list)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(save_path + ".xlsx", engine='xlsxwriter')
# SAVING CONFIG:
config_list.to_excel(writer, sheet_name='Run_Conf', header=False)
worksheet = writer.sheets['Run_Conf']
worksheet.set_column('A:B', 15)
# SAVING SCORE:
df_score_mean = df_score.mean(axis=1)
df_score.insert(0, "Avg " + str(s_sample), df_score_mean)
df_score.to_excel(writer, sheet_name='Score')
worksheet = writer.sheets['Score']
worksheet.write(0, 0, "Score")
# SAVING PERCENT:
df_percent_list_mean = df_percent_list.mean(axis=1)
df_percent_list.insert(0, "Avg " + str(s_sample), df_percent_list_mean)
df_percent_list.to_excel(writer, sheet_name='Percent')
worksheet = writer.sheets['Percent']
worksheet.write(0, 0, "Percent")
# SAVING LOSS:
df_loss_list.to_excel(writer, sheet_name='Loss')
worksheet = writer.sheets['Loss']
worksheet.write(0, 0, "Loss")
# SAVING TIME:
df_time_sample.to_excel(writer, sheet_name='Time')
worksheet = writer.sheets['Time']
worksheet.write(0, 0, "Time")
# region CELL SIZE
# worksheet = writer.sheets['Score']
# worksheet.set_column('A:B', 15)
# worksheet = writer.sheets['Time']
# worksheet.set_column('A:B', 15)
# endregion
# SAVING BEST MODEL (out of # Samples):
if s_alg == "DSRL" or s_alg == "QL" or s_alg == "DSRL_dist" or s_alg == "DSRL_dist_type" or s_alg == "DSRL_dist_type_near" or s_alg == "DSRL_dist_type_near_propNeg" or s_alg == "DSRL_object_near" or s_alg == "DSRL_object":
# SAVING MODEL CONFIGURATIONS:
best_model.to_excel(writer, sheet_name='model')
# CONDITIONAL COLOR
worksheet = writer.sheets['model']
for x in range(2, 700, 4):
cell = "C" + str(x) + ":D" + str(x + 3)
worksheet.conditional_format(cell, {'type': '3_color_scale'})
# CELL SIZE
worksheet = writer.sheets['model']
worksheet.set_column('A:A', 50)
# region ADD PLOTS
# worksheet = writer.sheets['results']
# workbook = writer.book
# chart = workbook.add_chart({'type': 'line'})
# chart2 = workbook.add_chart({'type': 'line'})
# chart.add_series({'values': '=results!$B$2:$B$100'})
# chart2.add_series({'values': '=results!$C$2:$C$10'})
# worksheet.insert_chart('F3', chart)
# worksheet.insert_chart('N3', chart2)
# SAVE DQN MODEL
if s_learn == True and s_alg == "DQN":
save_model(best_model, save_path)
writer.save()
# endregion
print("\n - END - "
"\n sample: %s"
"\n s_env: %s"
"\n s_alg: %s"
"\n s_episode: %s"
"\n s_learn: %s"
"\n s_load: %s"
"\n s_print: %s"
"\n s_auto: %s"
"\n s_cond_to_end: %s"
"\n s_server: %s"
"\n s_net_comb_param: %s"
"\n s_prob: %s" % experiment_configurations)
# region TIME 2
print("\n\nBegin time: ", begin_time)
finish = time.time()
finish_time = time.strftime('%X %x')
print("Final time: ", finish_time)
total_time = finish - begin
print("Total time: ", total_time)
# endregion
return
# -------------------------------------------------------------------------------------------------- #
''' SELECT PARAMETERS TO RUN THE SOFTWARE '''
# environment configuration
Env = 11
Alg_list = ["QL",
"DSRL",
"DSRL_object_near",
"DQN",
"DSRL_dist",
"DSRL_dist_type",
"DSRL_dist_type_near",
"DSRL_dist_type_near_propNeg",
"DSRL_object"]
Alg = Alg_list[2] # Select the algorithm to be used
Learn = False # To update its knowledge
Load = True # To load a learned model
Load_path = "/Results/Train/Env_11/Train_Env_11_DSRL 02 41 20 05-05-21"
# algorithm configuration
Samples = 2 # Usually 10 samples (repeat 100 episodes for 10 times)
Print = True # Print some info in the terminal
Auto = True # Agent moves Automatic or if False it moves by pressing the Spacebar key
Server = False # If running in the server since
# change Prob to 1 for probe training??
Prob = 0.3 # Probability to make a random move (exploration rate)
Cond_to_end = "max_steps" # Choose from below (there are 4)
Save = False # Save the model
speed = 0.05 # seconds per frame
# Cond_to_end = "max_steps"
# Cond_to_end = "coll_all"
# Cond_to_end = "only_negative"
Episodes = 500 # Usually 1000 or 100
# region DQN Model Configurations:
# max_memory_list = [5, 5, 5, 30, 30, 30, 100, 100, 100]
# hidden_size_list = [5, 30, 270, 5, 30, 270, 5, 30, 270]
# batch_size_list = [1, 1, 1, 10, 10, 10, 32, 32, 32]
max_memory_list = [100, 100, 100, 300, 300, 300, 900, 900, 900]
hidden_size_list = [5, 10, 15, 5, 10, 15, 5, 10, 15]
batch_size_list = [32, 32, 32, 32, 32, 32, 32, 32, 32]
optimizer_list = ["adam", "rms_opt"]
n_actions = 4 # [move_up, move_down, move_left, move_right]
# endregion
Net_comb_param = 4
# ------------------------------------------------------------------------------------------- #
run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
# ------------------------------------------------------------------------------------------- #
''' REPEAT DQN Net_Comb_Param '''
# for i in range(9):
# Net_comb_param = i
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
''' REPEAT Alg for a list of Env '''
# env_list = [2,3]
# for Env in env_list:
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
''' Alg_list for Env_list '''
# env_list = [2,3]
# alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN"]
# for Env in env_list:
# for Alg in alg_list:
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
| 1.9375 | 2 |
utils/scene_bounding_box.py | davidemarelli/sfm_flow | 8 | 4858 |
import logging
from typing import Tuple
import bpy
from mathutils import Vector
from .object import get_objs
logger = logging.getLogger(__name__)
class SceneBoundingBox():
"""Scene bounding box, build a bounding box that includes all objects except the excluded ones."""
################################################################################################
# Properties
#
# ==============================================================================================
@property
def width(self):
"""Scene's bounding box width."""
return self.x_max - self.x_min
# ==============================================================================================
@property
def depth(self):
"""Scene's bounding box depth."""
return self.y_max - self.y_min
# ==============================================================================================
@property
def height(self):
"""Scene's bounding box height."""
return self.z_max - self.z_min
# ==============================================================================================
@property
def floor_center(self):
"""Scene's bounding center on lower bbox plane."""
return Vector((self.center[0], self.center[1], self.z_min))
################################################################################################
# Constructor
#
# ==============================================================================================
def __init__(self, scene: bpy.types.Scene,
exclude_collections: Tuple[str] = ("SfM_Environment", "SfM_Reconstructions")):
self.scene = scene
self.exclude_collections = exclude_collections
#
self.center = Vector() # type: Vector
self.x_min = float("inf") # type: float
self.x_max = float("-inf") # type: float
self.y_min = float("inf") # type: float
self.y_max = float("-inf") # type: float
self.z_min = float("inf") # type: float
self.z_max = float("-inf") # type: float
#
self.compute()
################################################################################################
# Methods
#
# ==============================================================================================
def compute(self):
"""Compute the scene bounding box values."""
objs = get_objs(self.scene, exclude_collections=self.exclude_collections, mesh_only=True)
logger.debug("Found %i objects in scene %s", len(objs), self.scene.name)
for obj in objs:
obb = obj.bound_box
for i in range(8):
p = obj.matrix_world @ Vector(obb[i])
self.x_min = min(self.x_min, p[0])
self.x_max = max(self.x_max, p[0])
self.y_min = min(self.y_min, p[1])
self.y_max = max(self.y_max, p[1])
self.z_min = min(self.z_min, p[2])
self.z_max = max(self.z_max, p[2])
if objs:
self.center = Vector(((self.x_max + self.x_min) / 2,
(self.y_max + self.y_min) / 2,
(self.z_max + self.z_min) / 2))
logger.debug(str(self))
# ==============================================================================================
def get_min_vector(self):
"""Get minimum axis."""
return Vector((self.x_min, self.y_min, self.z_min))
# ==============================================================================================
def get_max_vector(self):
"""Get maximum axis."""
return Vector((self.x_max, self.y_max, self.z_max))
################################################################################################
# Builtin methods
#
# ==============================================================================================
def __str__(self):
return "Scene bbox values: X=({:.3f}, {:.3f}), Y=({:.3f}, {:.3f}), Z=({:.3f}, {:.3f}), Center={}".format(
self.x_min, self.x_max, self.y_min, self.y_max, self.z_min, self.z_max, self.center)
| 2.9375 | 3 |
tensor2tensor/trax/rlax/ppo.py | funtion/tensor2tensor | 1 | 4859 | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO in JAX.
Notation:
B, scalar - batch size
T, scalar - number of time-steps in a trajectory, or the value of the padded
time-step dimension.
OBS, tuple - shape of a singular observation from the environment.
Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)
A, scalar - Number of actions, assuming a discrete space.
Policy and Value function signatures:
Policy Function :: [B, T] + OBS -> [B, T, A]
Value Function :: [B, T] + OBS -> [B, T, 1]
Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])
i.e. the policy net should take a batch of *trajectories* and at each time-step
in each batch deliver a probability distribution over actions.
NOTE: It doesn't return logits, rather the expectation is that it returns
log-probabilities instead.
NOTE: The policy and value functions need to take care to not take into account
future time-steps while deciding the actions (or value) for the current
time-step.
Policy and Value Function produces a tuple of the expected output of a policy
function and a value function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
import time
from absl import logging
import gym
from jax import grad
from jax import jit
from jax import lax
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.trax import jaxboard
from tensor2tensor.trax import layers
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensorflow.io import gfile
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50 # 100
NUM_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
def policy_and_value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers_fn=None,
two_towers=True):
"""A policy and value net function."""
# Layers.
# Now, with the current logits, one head computes action probabilities and the
# other computes the value function.
# NOTE: The LogSoftmax instead of the Softmax because of numerical stability.
net = None
if not two_towers:
tower = [] if bottom_layers_fn is None else bottom_layers_fn()
tower.extend([
layers.Branch(
layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()),
layers.Dense(1))
])
net = layers.Serial(*tower)
else:
tower1 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower2 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower1.extend([layers.Dense(num_actions), layers.LogSoftmax()])
tower2.extend([layers.Dense(1)])
net = layers.Branch(
layers.Serial(*tower1),
layers.Serial(*tower2),
)
assert net
return net.initialize(batch_observations_shape, rng_key), net
def optimizer_fun(net_params, step_size=1e-3):
opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)
opt_init = lambda x: (x, opt.tree_init(x))
opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])
get_params = lambda x: x[0]
opt_state = opt_init(net_params)
return opt_state, opt_update, get_params
# Should this be collect 'n' trajectories, or
# Run the env for 'n' steps and take completed trajectories, or
# Any other option?
# TODO(afrozm): Replace this with EnvProblem?
def collect_trajectories(env,
policy_fun,
num_trajectories=1,
policy=env_problem_utils.CATEGORICAL_SAMPLING,
max_timestep=None,
boundary=20,
epsilon=0.1,
reset=True,
rng=None):
"""Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fun to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env returns
done.
boundary: int, boundary for padding, used in EnvProblem envs.
epsilon: float, the epsilon for `epsilon-greedy` policy.
reset: bool, true if we want to reset the envs. The envs are also reset if
max_max_timestep is None or < 0
rng: jax rng, splittable.
Returns:
A tuple (trajectory, number of trajectories that are done)
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
"""
assert isinstance(env, env_problem.EnvProblem)
# This is an env_problem, run its collect function.
return env_problem_utils.play_env_problem_with_policy(
env,
policy_fun,
num_trajectories=num_trajectories,
max_timestep=max_timestep,
boundary=boundary,
policy_sampling=policy,
eps=epsilon,
reset=reset,
rng=rng)
# This function can probably be simplified, ask how?
# Can we do something much simpler than lax.pad, maybe np.pad?
# Others?
def get_padding_value(dtype):
"""Returns the padding value given a dtype."""
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32 or dtype == np.float64:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value
# TODO(afrozm): Use np.pad instead and make jittable?
def pad_trajectories(trajectories, boundary=20):
"""Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the actions and rewards are padded to integer
multiples of boundary.
Returns:
tuple: (padding lengths, reward_mask, padded_observations, padded_actions,
padded_rewards) where padded_observations is shaped (B, T+1) + OBS and
padded_actions, padded_rewards & reward_mask are shaped (B, T).
Where T is max(t) rounded up to an integer multiple of boundary.
padded_length is how much padding we've added and
reward_mask is 1s for actual rewards and 0s for the padding.
"""
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r) in trajectories)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
# So all obs will be padded to t_max + 1 and actions and rewards to t_max.
padded_observations = []
padded_actions = []
padded_rewards = []
padded_lengths = []
reward_masks = []
for (o, a, r) in trajectories:
# Determine the amount to pad, this holds true for obs, actions and rewards.
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
continue
# First pad observations.
padding_config = [(0, num_to_pad, 0)]
for _ in range(o.ndim - 1):
padding_config.append((0, 0, 0))
padding_config = tuple(padding_config)
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
# Now pad actions and rewards.
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
# Also create the mask to use later.
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(padded_rewards)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def rewards_to_go(rewards, mask, gamma=0.99):
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T).
"""
B, T = rewards.shape # pylint: disable=invalid-name,unused-variable
masked_rewards = rewards * mask # (B, T)
# We use the following recurrence relation, derived from the equation above:
#
# r2g[t+1] = (r2g[t] - r[t]) / gamma
#
# This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..
#
# **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1)
@jit
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma=0.99,
epsilon=0.2,
value_prediction_old=None):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
epsilon: float, clip-fraction, used if value_value_prediction_old isn't None
value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions
using the old parameters. If provided, we incorporate this in the loss as
well. This is from the OpenAI baselines implementation.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1.
"""
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T) == reward_mask.shape
assert (B, T + 1, 1) == value_prediction.shape
value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)
value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
return np.sum(loss) / np.sum(reward_mask)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def deltas(predicted_values, rewards, mask, gamma=0.99):
r"""Computes TD-residuals from V(s) and rewards.
Where a `delta`, i.e. a td-residual is defined as:
delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}.
Args:
predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was
squeezed. These represent V(s_bt) for b < B and t < T+1
rewards: ndarray of shape (B, T) of rewards.
mask: ndarray of shape (B, T) of mask for rewards.
gamma: float, discount factor.
Returns:
ndarray of shape (B, T) of one-step TD-residuals.
"""
# `d`s are basically one-step TD residuals.
d = []
_, T = rewards.shape # pylint: disable=invalid-name
for t in range(T):
d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) -
predicted_values[:, t])
return np.array(d).T * mask
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates.
"""
return rewards_to_go(td_deltas, mask, lambda_ * gamma)
def chosen_probabs(probab_observations, actions):
"""Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which
action was chosen in the b^th trajectory's t^th time-step.
Returns:
`[B, T]` ndarray with the log-probabilities of the chosen actions.
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions]
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
"""Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios
def clipped_probab_ratios(probab_ratios, epsilon=0.2):
return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)
def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):
return np.minimum(
probab_ratios * advantages,
clipped_probab_ratios(probab_ratios, epsilon=epsilon) *
advantages) * reward_mask
@jit
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
"""PPO objective, with an eventual minus sign, given predictions."""
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T) == padded_actions.shape
assert (B, T) == reward_mask.shape
_, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name
assert (B, T + 1, 1) == value_predictions_old.shape
assert (B, T + 1, A) == log_probab_actions_old.shape
assert (B, T + 1, A) == log_probab_actions_new.shape
# (B, T)
td_deltas = deltas(
np.squeeze(value_predictions_old, axis=2), # (B, T+1)
padded_rewards,
reward_mask,
gamma=gamma)
# (B, T)
advantages = gae_advantages(
td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)
# Normalize the advantages.
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# (B, T)
ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,
padded_actions, reward_mask)
assert (B, T) == ratios.shape
# (B, T)
objective = clipped_objective(
ratios, advantages, reward_mask, epsilon=epsilon)
assert (B, T) == objective.shape
# ()
average_objective = np.sum(objective) / np.sum(reward_mask)
# Loss is negative objective.
return -average_objective
@jit
def combined_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_prediction_new,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01):
"""Computes the combined (clipped loss + value loss) given predictions."""
loss_value = value_loss_given_predictions(
value_prediction_new,
padded_rewards,
reward_mask,
gamma=gamma,
value_prediction_old=value_prediction_old,
epsilon=epsilon)
loss_ppo = ppo_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)
return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,
loss_value, entropy_bonus)
@functools.partial(jit, static_argnums=(3,))
def combined_loss(new_params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01,
rng=None):
"""Computes the combined (clipped loss + value loss) given observations."""
log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(
padded_observations, new_params, rng=rng)
# (combined_loss, ppo_loss, value_loss, entropy_bonus)
return combined_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_predictions_new,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
c1=c1,
c2=c2)
@functools.partial(jit, static_argnums=(2, 3, 4))
def policy_and_value_opt_step(i,
opt_state,
opt_update,
get_params,
policy_and_value_net_apply,
log_probab_actions_old,
value_predictions_old,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=1.0,
c2=0.01,
gamma=0.99,
lambda_=0.95,
epsilon=0.1,
rng=None):
"""Policy and Value optimizer step."""
# Combined loss function given the new params.
def policy_and_value_loss(params):
"""Returns the combined loss given just parameters."""
(loss, _, _, _) = combined_loss(
params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
rng=rng)
return loss
new_params = get_params(opt_state)
g = grad(policy_and_value_loss)(new_params)
# TODO(afrozm): Maybe clip gradients?
return opt_update(i, g, opt_state)
def get_time(t1, t2=None):
if t2 is None:
t2 = time.time()
return round((t2 - t1) * 1000, 2)
def approximate_kl(log_prob_new, log_prob_old, mask):
"""Computes the approximate KL divergence between the old and new log-probs.
Args:
log_prob_new: (B, T+1, A) log probs new
log_prob_old: (B, T+1, A) log probs old
mask: (B, T)
Returns:
Approximate KL.
"""
diff = log_prob_old - log_prob_new
# Cut the last time-step out.
diff = diff[:, :-1]
# Mask out the irrelevant part.
diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)
# Average on non-masked part.
return np.sum(diff) / np.sum(mask)
def masked_entropy(log_probs, mask):
"""Computes the entropy for the given log-probs.
Args:
log_probs: (B, T+1, A) log probs
mask: (B, T) mask.
Returns:
Entropy.
"""
# Cut the last time-step out.
lp = log_probs[:, :-1]
# Mask out the irrelevant part.
lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)
p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)
# Average on non-masked part and take negative.
return -(np.sum(lp * p) / np.sum(mask))
def evaluate_policy(eval_env,
get_predictions,
boundary,
max_timestep=20000,
rng=None):
"""Evaluate the policy."""
avg_rewards = {}
for policy in [
env_problem_utils.CATEGORICAL_SAMPLING, env_problem_utils.GUMBEL_SAMPLING,
env_problem_utils.EPSILON_GREEDY
]:
trajs, _ = env_problem_utils.play_env_problem_with_policy(
eval_env,
get_predictions,
boundary=boundary,
max_timestep=max_timestep,
reset=True,
policy_sampling=policy,
rng=rng)
avg_rewards[policy] = float(sum(
np.sum(traj[2]) for traj in trajs)) / len(trajs)
return avg_rewards
def maybe_restore_params(output_dir, policy_and_value_net_params):
"""Maybe restore the params from the checkpoint dir.
Args:
output_dir: Directory where saved model checkpoints are stored.
policy_and_value_net_params: Default params, returned if model is'nt found.
Returns:
triple (restore (bool), params, iter(int)) where iter is the epoch from
which we restored the params, 0 is restore = False.
"""
model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
if not model_files:
return False, policy_and_value_net_params, 0
model_file = sorted(model_files)[-1]
model_file_basename = os.path.basename(model_file) # model-??????.pkl
i = int(filter(str.isdigit, model_file_basename))
with gfile.GFile(model_file, "rb") as f:
policy_and_value_net_params = pickle.load(f)
return True, policy_and_value_net_params, i
def training_loop(
env=None,
epochs=EPOCHS,
policy_and_value_net_fun=None,
policy_and_value_optimizer_fun=None,
batch_size=BATCH_TRAJECTORIES,
num_optimizer_steps=NUM_OPTIMIZER_STEPS,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=None,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
epsilon=EPSILON,
c1=1.0,
c2=0.01,
output_dir=None,
eval_every_n=1000,
eval_env=None,
done_frac_for_policy_save=0.5,
enable_early_stopping=True,
env_name=None,
):
"""Runs the training loop for PPO, with fixed policy and value nets."""
assert env
assert output_dir
assert env_name
gfile.makedirs(output_dir)
# Create summary writers and history.
train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train"))
timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing"))
eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval"))
train_sw.text("env_name", env_name)
timing_sw.text("env_name", env_name)
eval_sw.text("env_name", env_name)
jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)
# Batch Observations Shape = [-1, -1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
batch_observations_shape = (-1, -1) + env.observation_space.shape
assert isinstance(env.action_space, gym.spaces.Discrete)
num_actions = env.action_space.n
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
# Initialize the policy and value network.
policy_and_value_net_params, policy_and_value_net_apply = (
policy_and_value_net_fun(key1, batch_observations_shape, num_actions))
# Maybe restore the policy params. If there is nothing to restore, then
# iteration = 0 and policy_and_value_net_params are returned as is.
restore, policy_and_value_net_params, iteration = (
maybe_restore_params(output_dir, policy_and_value_net_params))
if restore:
logging.info("Restored parameters from iteration [%d]", iteration)
# We should start from the next iteration.
iteration += 1
policy_and_value_net_apply = jit(policy_and_value_net_apply)
# Initialize the optimizers.
policy_and_value_optimizer = (
policy_and_value_optimizer_fun(policy_and_value_net_params))
(policy_and_value_opt_state, policy_and_value_opt_update,
policy_and_value_get_params) = policy_and_value_optimizer
num_trajectories_done = 0
last_saved_at = 0
logging.info("Starting the PPO training loop.")
for i in range(iteration, epochs):
epoch_start_time = time.time()
# Params we'll use to collect the trajectories.
policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
# A function to get the policy and value predictions.
def get_predictions(observations, rng=None):
"""Returns log-probs, value predictions and key back."""
key, key1 = jax_random.split(rng, num=2)
log_probs, value_preds = policy_and_value_net_apply(
observations, policy_and_value_net_params, rng=key1)
return log_probs, value_preds, key
# Evaluate the policy.
policy_eval_start_time = time.time()
if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):
jax_rng_key, key = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Epoch [% 6d] evaluating policy.", i)
avg_reward = evaluate_policy(
eval_env,
get_predictions,
boundary,
max_timestep=max_timestep_eval,
rng=key)
for k, v in avg_reward.items():
eval_sw.scalar("eval/mean_reward/%s" % k, v, step=i)
logging.info("Epoch [% 6d] Policy Evaluation [%s] = %10.2f", i, k, v)
policy_eval_time = get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i)
jax_rng_key, key = jax_random.split(jax_rng_key)
trajs, num_done = collect_trajectories(
env,
policy_fun=get_predictions,
num_trajectories=batch_size,
max_timestep=max_timestep,
boundary=boundary,
rng=key,
reset=(i == 0) or restore,
epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.
trajectory_collection_time = get_time(trajectory_collection_start_time)
logging.vlog(1, "Collecting trajectories took %0.2f msec.",
trajectory_collection_time)
avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)
max_reward = max(np.sum(traj[2]) for traj in trajs)
min_reward = min(np.sum(traj[2]) for traj in trajs)
train_sw.scalar("train/mean_reward", avg_reward, step=i)
logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s",
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]",
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs])
padding_start_time = time.time()
(_, reward_mask, padded_observations, padded_actions,
padded_rewards) = pad_trajectories(
trajs, boundary=boundary)
padding_time = get_time(padding_start_time)
logging.vlog(1, "Padding trajectories took %0.2f msec.",
get_time(padding_start_time))
logging.vlog(1, "Padded Observations' shape [%s]",
str(padded_observations.shape))
logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape))
logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape))
# Calculate log-probabilities and value predictions of the trajectories.
# We'll pass these to the loss functions so as to not get recomputed.
# NOTE:
# There is a slight problem here, if the policy network contains
# stochasticity in the log-probabilities (ex: dropout), then calculating
# these again here is not going to be correct and should be done in the
# collect function.
log_prob_recompute_start_time = time.time()
jax_rng_key, key = jax_random.split(jax_rng_key)
log_probabs_traj, value_predictions_traj, _ = get_predictions(
padded_observations, rng=key)
log_prob_recompute_time = get_time(log_prob_recompute_start_time)
# Some assertions.
B, T = padded_actions.shape # pylint: disable=invalid-name
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T + 1) + env.observation_space.shape == padded_observations.shape
# Linear annealing from 0.1 to 0.0
# epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -
# (i /
# (epochs - 1)))
# Constant epsilon.
epsilon_schedule = epsilon
# Compute value and ppo losses.
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(2, "Starting to compute P&V loss.")
loss_compute_start_time = time.time()
cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (
combined_loss(
policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=key1))
loss_compute_time = get_time(loss_compute_start_time)
logging.vlog(
1,
"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.",
cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,
get_time(loss_compute_start_time))
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Policy and Value Optimization")
optimization_start_time = time.time()
keys = jax_random.split(key1, num=num_optimizer_steps)
for j in range(num_optimizer_steps):
k1, k2, k3 = jax_random.split(keys[j], num=3)
t = time.time()
# Update the optimizer state.
policy_and_value_opt_state = policy_and_value_opt_step(
j,
policy_and_value_opt_state,
policy_and_value_opt_update,
policy_and_value_get_params,
policy_and_value_net_apply,
log_probabs_traj,
value_predictions_traj,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
rng=k1)
# Compute the approx KL for early stopping.
new_policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
log_probab_actions_new, _ = policy_and_value_net_apply(
padded_observations, new_policy_and_value_net_params, rng=k2)
approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,
reward_mask)
early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl
if early_stopping:
logging.vlog(
1, "Early stopping policy and value optimization at iter: %d, "
"with approx_kl: %0.2f", j, approx_kl)
# We don't return right-away, we want the below to execute on the last
# iteration.
t2 = time.time()
if (((j + 1) % print_every_optimizer_steps == 0) or
(j == num_optimizer_steps - 1) or early_stopping):
# Compute and log the loss.
(loss_combined, loss_ppo, loss_value, entropy_bonus) = (
combined_loss(
new_policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=k3))
logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec",
get_time(t, t2))
logging.vlog(
1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->"
" [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined,
loss_value, loss_ppo, entropy_bonus)
if early_stopping:
break
optimization_time = get_time(optimization_start_time)
logging.vlog(
1, "Total Combined Loss reduction [%0.2f]%%",
(100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
# Or if this is the last iteration.
policy_save_start_time = time.time()
num_trajectories_done += num_done
if (((num_trajectories_done >= done_frac_for_policy_save * batch_size)
and (i - last_saved_at > eval_every_n)) or (i == epochs - 1)):
logging.vlog(1, "Epoch [% 6d] saving model.", i)
params_file = os.path.join(output_dir, "model-%06d.pkl" % i)
with gfile.GFile(params_file, "wb") as f:
pickle.dump(policy_and_value_net_params, f)
# Reset this number.
num_trajectories_done = 0
last_saved_at = i
policy_save_time = get_time(policy_save_start_time)
epoch_time = get_time(epoch_start_time)
logging.info(
"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined"
" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward,
max_reward, avg_reward, loss_combined, loss_value, loss_ppo,
entropy_bonus)
timing_dict = {
"epoch": epoch_time,
"policy_eval": policy_eval_time,
"trajectory_collection": trajectory_collection_time,
"padding": padding_time,
"log_prob_recompute": log_prob_recompute_time,
"loss_compute": loss_compute_time,
"optimization": optimization_time,
"policy_save": policy_save_time,
}
for k, v in timing_dict.items():
timing_sw.scalar("timing/%s" % k, v, step=i)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
"%s : % 10.2f" % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list))
# Reset restore.
restore = False
# Flush summary writers once in a while.
if (i+1) % 1000 == 0 or i == epochs - 1:
train_sw.flush()
timing_sw.flush()
eval_sw.flush()
| 2.1875 | 2 |
models/cal.py | SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring | 0 | 4860 | <reponame>SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
"""
WS-DAN models
<NAME> al.,
"See Better Before Looking Closer: Weakly Supervised Data Augmentation Network for Fine-Grained Visual Classification",
arXiv:1901.09891
"""
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.resnet as resnet
from models.inception import inception_v3, BasicConv2d
import models.coatnet as coatnet
import random
__all__ = ['WSDAN_CAL']
EPSILON = 1e-6
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
# Bilinear Attention Pooling
class BAP(nn.Module):
def __init__(self, pool='GAP'):
super(BAP, self).__init__()
assert pool in ['GAP', 'GMP']
if pool == 'GAP':
self.pool = None
else:
self.pool = nn.AdaptiveMaxPool2d(1)
def forward(self, features, attentions):
B, C, H, W = features.size()
_, M, AH, AW = attentions.size()
# match size
if AH != H or AW != W:
attentions = F.upsample_bilinear(attentions, size=(H, W))
# feature_matrix: (B, M, C) -> (B, M * C)
if self.pool is None:
feature_matrix = (torch.einsum('imjk,injk->imn', (attentions, features)) / float(H * W)).view(B, -1)
else:
feature_matrix = []
for i in range(M):
AiF = self.pool(features * attentions[:, i:i + 1, ...]).view(B, -1)
feature_matrix.append(AiF)
feature_matrix = torch.cat(feature_matrix, dim=1)
# sign-sqrt
feature_matrix_raw = torch.sign(feature_matrix) * torch.sqrt(torch.abs(feature_matrix) + EPSILON)
# l2 normalization along dimension M and C
feature_matrix = F.normalize(feature_matrix_raw, dim=-1)
if self.training:
fake_att = torch.zeros_like(attentions).uniform_(0, 2)
else:
fake_att = torch.ones_like(attentions)
counterfactual_feature = (torch.einsum('imjk,injk->imn', (fake_att, features)) / float(H * W)).view(B, -1)
counterfactual_feature = torch.sign(counterfactual_feature) * torch.sqrt(torch.abs(counterfactual_feature) + EPSILON)
counterfactual_feature = F.normalize(counterfactual_feature, dim=-1)
return feature_matrix, counterfactual_feature
def batch_augment(images, attention_map, mode='crop', theta=0.5, padding_ratio=0.1):
batches, _, imgH, imgW = images.size()
if mode == 'crop':
crop_images = []
for batch_index in range(batches):
atten_map = attention_map[batch_index:batch_index + 1]
if isinstance(theta, tuple):
theta_c = random.uniform(*theta) * atten_map.max()
else:
theta_c = theta * atten_map.max()
crop_mask = F.upsample_bilinear(atten_map, size=(imgH, imgW)) >= theta_c
nonzero_indices = torch.nonzero(crop_mask[0, 0, ...])
height_min = max(int(nonzero_indices[:, 0].min().item() - padding_ratio * imgH), 0)
height_max = min(int(nonzero_indices[:, 0].max().item() + padding_ratio * imgH), imgH)
width_min = max(int(nonzero_indices[:, 1].min().item() - padding_ratio * imgW), 0)
width_max = min(int(nonzero_indices[:, 1].max().item() + padding_ratio * imgW), imgW)
crop_images.append(
F.upsample_bilinear(images[batch_index:batch_index + 1, :, height_min:height_max, width_min:width_max],
size=(imgH, imgW)))
crop_images = torch.cat(crop_images, dim=0)
return crop_images
elif mode == 'drop':
drop_masks = []
for batch_index in range(batches):
atten_map = attention_map[batch_index:batch_index + 1]
if isinstance(theta, tuple):
theta_d = random.uniform(*theta) * atten_map.max()
else:
theta_d = theta * atten_map.max()
drop_masks.append(F.upsample_bilinear(atten_map, size=(imgH, imgW)) < theta_d)
drop_masks = torch.cat(drop_masks, dim=0)
drop_images = images * drop_masks.float()
return drop_images
else:
raise ValueError('Expected mode in [\'crop\', \'drop\'], but received unsupported augmentation method %s' % mode)
class WSDAN_CAL(nn.Module):
def __init__(self, num_classes, M=32, net='inception_mixed_6e', pretrained=False):
super(WSDAN_CAL, self).__init__()
self.num_classes = num_classes
self.M = M
self.net = net
# Network Initialization
if 'inception' in net:
if net == 'inception_mixed_6e':
self.features = inception_v3(pretrained=pretrained).get_features_mixed_6e()
self.num_features = 768
elif net == 'inception_mixed_7c':
self.features = inception_v3(pretrained=pretrained).get_features_mixed_7c()
self.num_features = 2048
else:
raise ValueError('Unsupported net: %s' % net)
elif 'resnet' in net:
self.features = getattr(resnet, net)(pretrained=pretrained).get_features()
self.num_features = 512 * self.features[-1][-1].expansion
elif 'coat' in net:
self.features = getattr(coatnet, net)().get_features()
if '0' in net or '1' in net:
self.num_features = 768
elif '2' in net:
self.num_features = 1026
elif '3' in net or '4' in net:
self.num_features = 1536
else:
raise ValueError('Not given valid CoAtNet size.')
else:
raise ValueError('Unsupported net: %s' % net)
# Attention Maps
self.attentions = BasicConv2d(self.num_features, self.M, kernel_size=1)
# Bilinear Attention Pooling
self.bap = BAP(pool='GAP')
# Classification Layer
self.fc = nn.Linear(self.M * self.num_features, self.num_classes, bias=False)
logging.info('WSDAN: using {} as feature extractor, num_classes: {}, num_attentions: {}'.format(net, self.num_classes, self.M))
def visualize(self, x):
batch_size = x.size(0)
# Feature Maps, Attention Maps and Feature Matrix
feature_maps = self.features(x)
if self.net != 'inception_mixed_7c':
attention_maps = self.attentions(feature_maps)
else:
attention_maps = feature_maps[:, :self.M, ...]
# print(feature_maps.shape)
# print(attention_maps.shape)
feature_matrix = self.bap(feature_maps, attention_maps)[0]
p = self.fc(feature_matrix * 100.)
return p, attention_maps
def forward(self, x):
batch_size = x.size(0)
# Feature Maps, Attention Maps and Feature Matrix
feature_maps = self.features(x)
if self.net != 'inception_mixed_7c':
attention_maps = self.attentions(feature_maps)
else:
attention_maps = feature_maps[:, :self.M, ...]
feature_matrix, feature_matrix_hat = self.bap(feature_maps, attention_maps)
# Classification
p = self.fc(feature_matrix * 100.)
# Generate Attention Map
if self.training:
# Randomly choose one of attention maps Ak
attention_map = []
for i in range(batch_size):
attention_weights = torch.sqrt(attention_maps[i].sum(dim=(1, 2)).detach() + EPSILON)
attention_weights = F.normalize(attention_weights, p=1, dim=0)
k_index = np.random.choice(self.M, 2, p=attention_weights.cpu().numpy())
attention_map.append(attention_maps[i, k_index, ...])
attention_map = torch.stack(attention_map) # (B, 2, H, W) - one for cropping, the other for dropping
else:
attention_map = torch.mean(attention_maps, dim=1, keepdim=True) # (B, 1, H, W)
return p, p - self.fc(feature_matrix_hat * 100.), feature_matrix, attention_map
def load_state_dict(self, state_dict, strict=True):
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in state_dict.items()
if k in model_dict and model_dict[k].size() == v.size()}
if len(pretrained_dict) == len(state_dict):
print('%s: All params loaded' % type(self).__name__)
else:
print('%s: Some params were not loaded:' % type(self).__name__)
not_loaded_keys = [k for k in state_dict.keys() if k not in pretrained_dict.keys()]
print(('%s, ' * (len(not_loaded_keys) - 1) + '%s') % tuple(not_loaded_keys))
model_dict.update(pretrained_dict)
super(WSDAN_CAL, self).load_state_dict(model_dict)
| 2.296875 | 2 |
tests/walls/analytic/plates.py | noabauma/Mirheo | 0 | 4861 | #!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (8, 16, 8)
force = (1.0, 0, 0)
density = 4
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=density)
u.registerParticleVector(pv=pv, ic=ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=50.0, kBT=1.0, power=0.5)
u.registerInteraction(dpd)
plate_lo = mir.Walls.Plane("plate_lo", (0, 0, -1), (0, 0, 1))
plate_hi = mir.Walls.Plane("plate_hi", (0, 0, 1), (0, 0, domain[2] - 1))
u.registerWall(plate_lo, 0)
u.registerWall(plate_hi, 0)
vv = mir.Integrators.VelocityVerlet("vv")
frozen = u.makeFrozenWallParticles(pvName="plates", walls=[plate_lo, plate_hi], interactions=[dpd], integrator=vv, number_density=density)
u.setWall(plate_lo, pv)
u.setWall(plate_hi, pv)
for p in (pv, frozen):
u.setInteraction(dpd, p, pv)
vv_dp = mir.Integrators.VelocityVerlet_withConstForce("vv_dp", force)
u.registerIntegrator(vv_dp)
u.setIntegrator(vv_dp, pv)
sample_every = 2
dump_every = 1000
bin_size = (1., 1., 0.5)
u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-'))
u.run(7002)
# nTEST: walls.analytic.plates
# cd walls/analytic
# rm -rf h5
# mir.run --runargs "-n 2" ./plates.py
# mir.avgh5 xy velocities h5/solvent-0000[4-7].h5 | awk '{print $1}' > profile.out.txt
| 1.921875 | 2 |
scraper-code/myanimelist/base.py | XueAlfred/MALAnalysis | 15 | 4862 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import abc
import bs4
import functools
import utilities
class Error(Exception):
"""Base exception class that takes a message to display upon raising.
"""
def __init__(self, message=None):
"""Creates an instance of Error.
:type message: str
:param message: A message to display when raising the exception.
"""
super(Error, self).__init__()
self.message = message
def __str__(self):
return unicode(self.message) if self.message is not None else u""
class MalformedPageError(Error):
"""Indicates that a page on MAL has broken markup in some way.
"""
def __init__(self, id, html, message=None):
super(MalformedPageError, self).__init__(message=message)
if isinstance(id, unicode):
self.id = id
else:
self.id = str(id).decode(u'utf-8')
if isinstance(html, unicode):
self.html = html
else:
self.html = str(html).decode(u'utf-8')
def __str__(self):
return "\n".join([
super(MalformedPageError, self).__str__(),
"ID: " + self.id,
"HTML: " + self.html
]).encode(u'utf-8')
class InvalidBaseError(Error):
"""Indicates that the particular resource instance requested does not exist on MAL.
"""
def __init__(self, id, message=None):
super(InvalidBaseError, self).__init__(message=message)
self.id = id
def __str__(self):
return "\n".join([
super(InvalidBaseError, self).__str__(),
"ID: " + unicode(self.id)
])
def loadable(func_name):
"""Decorator for getters that require a load() upon first access.
:type func_name: function
:param func_name: class method that requires that load() be called if the class's _attribute value is None
:rtype: function
:return: the decorated class method.
"""
def inner(func):
cached_name = '_' + func.__name__
@functools.wraps(func)
def _decorator(self, *args, **kwargs):
if getattr(self, cached_name) is None:
getattr(self, func_name)()
return func(self, *args, **kwargs)
return _decorator
return inner
class Base(object):
"""Abstract base class for MAL resources. Provides autoloading, auto-setting functionality for other MAL objects.
"""
__metaclass__ = abc.ABCMeta
"""Attribute name for primary reference key to this object.
When an attribute by the name given by _id_attribute is passed into set(), set() doesn't prepend an underscore for load()ing.
"""
_id_attribute = "id"
def __repr__(self):
return u"".join([
"<",
self.__class__.__name__,
" ",
self._id_attribute,
": ",
unicode(getattr(self, self._id_attribute)),
">"
])
def __hash__(self):
return hash('-'.join([self.__class__.__name__, unicode(getattr(self, self._id_attribute))]))
def __eq__(self, other):
return isinstance(other, self.__class__) and getattr(self, self._id_attribute) == getattr(other, other._id_attribute)
def __ne__(self, other):
return not self.__eq__(other)
def __init__(self, session):
"""Create an instance of Base.
:type session: :class:`myanimelist.session.Session`
:param session: A valid MAL session.
"""
self.session = session
@abc.abstractmethod
def load(self):
"""A callback to run before any @loadable attributes are returned.
"""
pass
def set(self, attr_dict):
"""Sets attributes of this user object.
:type attr_dict: dict
:param attr_dict: Parameters to set, with attribute keys.
:rtype: :class:`.Base`
:return: The current object.
"""
for key in attr_dict:
if key == self._id_attribute:
setattr(self, self._id_attribute, attr_dict[key])
else:
setattr(self, u"_" + key, attr_dict[key])
return self | 3.25 | 3 |
p2/core/http.py | BeryJu/p2 | 0 | 4863 | """p2 core http responses"""
from wsgiref.util import FileWrapper
from django.http import StreamingHttpResponse
from p2.core.constants import ATTR_BLOB_MIME, ATTR_BLOB_SIZE_BYTES
from p2.core.models import Blob
class BlobResponse(StreamingHttpResponse):
"""Directly return blob's content. Optionally return as attachment if as_download is True"""
def __init__(self, blob: Blob, chunk_size=8192):
super().__init__(FileWrapper(blob, chunk_size))
self['Content-Length'] = blob.attributes.get(ATTR_BLOB_SIZE_BYTES, 0)
self['Content-Type'] = blob.attributes.get(ATTR_BLOB_MIME, 'text/plain')
| 2.75 | 3 |
lattedb/project/formfac/migrations/0009_auto_20200528_0907.py | callat-qcd/lattedb | 1 | 4864 | <filename>lattedb/project/formfac/migrations/0009_auto_20200528_0907.py
# Generated by Django 3.0.6 on 2020-05-28 09:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('project_formfac', '0008_auto_20200408_0823'),
]
operations = [
migrations.AlterField(
model_name='concatenatedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='correlatormeta',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='diskconcatenatedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='diskcorrelatorh5dset',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='diskformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='diskspectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='disktslicedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='disktslicedsaveragedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='disktslicedsaveragedspectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='disktslicedspectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='formfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='spectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tapeconcatenatedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tapecorrelatorh5dset',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tapetslicedsaveragedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tapetslicedsaveragedspectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tslicedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tslicedsaveragedformfactor4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tslicedsaveragedspectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='tslicedspectrum4dfile',
name='user',
field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 1.578125 | 2 |
SIO_Code/SIO_coherence.py | mmstoll/Ocean569_Code | 0 | 4865 | """
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m')
# remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate
for i in range(0,len(sal_data['SURF_SAL_PSU'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
for i in range(0,len(temp_data['SURF_TEMP_C'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
# interpolate missing temp and sal data
sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate()
temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate()
sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1]
# remove the average from the sal and temp data and create new columns
sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean()
temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean()
# remove trends from the sal and temp data and create new columns
sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1)
sal_fit_fn = np.poly1d(sal_fit)
temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1)
temp_fit_fn = np.poly1d(temp_fit)
sal_fit_value = sal_fit_fn(sal_data.index)
temp_fit_value = temp_fit_fn(temp_data.index)
sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value
temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value
sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
# # 1. FFT the SIO Data
# t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND'])
# # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index
# fs = 1 # sampling frequency, once per day
# fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days)
# w = fc / (fs / 2) #normalize the frequency
# b, a = signal.butter(4, w, 'low')
# temp_output = signal.filtfilt(b, a, t_spec)
# # 3. Inverse FFT of filtered SIO data
# temp_ifft = np.fft.irfft(temp_output,n=len(temp_output))
# # 4. Subsample new SIO time series with same delta t as ENSO index (once per month)
# temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1)
# temp_ifft_len = temp_ifft_sampled[0:618]
# x = np.linspace(0,18770, 18770)
# plt.figure()
# plt.loglog(x, temp_ifft)
# plt.show()
# butterworth low pass filter for temperature and salinity
fs = 1 # sampling frequency, once per day
fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days)
w = fc / (fs / 2) #normalize the frequency
b, a = signal.butter(4, w, 'low')
temp_output = signal.filtfilt(b, a, temp_tri)
sal_output = signal.filtfilt(b, a, sal_tri)
temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251
# create dataframe with spectra for each variable
spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft'])
spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft'])
spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft'])
spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])
# for coherence, start all records at 1916-01-01
# ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254]
# Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31
# PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985]
# compute spectral variables for each variable
for j in range(0,4):
data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]]
freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j])
if j == 0:
spectra_temp_df['Temp_freq'] = freq
spectra_temp_df['Temp_spec'] = spec
spectra_temp_df['Temp_fft'] = fft
if j == 1:
spectra_sal_df['Sal_freq'] = freq
spectra_sal_df['Sal_spec'] = spec
spectra_sal_df['Sal_fft'] = fft
if j == 2:
spectra_PDO_df['PDO_freq'] = freq
spectra_PDO_df['PDO_spec'] = spec
spectra_PDO_df['PDO_fft'] = fft
if j == 3:
spectra_ENSO_df['ENSO_freq'] = freq
spectra_ENSO_df['ENSO_spec'] = spec
spectra_ENSO_df['ENSO_fft'] = fft
def band_average(fft_var1,fft_var2,frequency,n_av):
# fft_var1 and fft_var2 are the inputs computed via fft
# they can be the same variable or different variables
# n_av is the number of bands to be used for smoothing (nice if it is an odd number)
# this function is limnited to 100,000 points but can easily be modified
nmax=100000
# T_length = (len(fft_var1) * 2 - 2)
# define some variables and arrays
n_spec=len(fft_var1)
n_av2=int(n_av//2+1) #number of band averages/2 + 1
spec_amp_av=np.zeros(nmax)
spec_phase_av=np.zeros(nmax)
freq_av=np.zeros(nmax)
# average the lowest frequency bands first (with half as many points in the average)
sum_low_amp=0.
sum_low_phase=0.
count=0
spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))#/(2.*np.pi*T_length*delt)
spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True) #/(2.*np.pi*T_length*delt) don't know if I need the 2pi/Tdeltt here...
#
for i in range(0,n_av2):
sum_low_amp+=spectrum_amp[i]
sum_low_phase+=spectrum_phase[i]
spec_amp_av[0]=sum_low_amp/n_av2
spec_phase_av[0]=sum_low_phase/n_av
# compute the rest of the averages
for i in range(n_av2,n_spec-n_av,n_av):
count+=1
spec_amp_est=np.mean(spectrum_amp[i:i+n_av])
spec_phase_est=np.mean(spectrum_phase[i:i+n_av])
freq_est=frequency[i+n_av//2]
spec_amp_av[count]=spec_amp_est
spec_phase_av[count]=spec_phase_est
freq_av[count]=freq_est
# omega0 = 2.*np.pi/(T_length*delt)
# contract the arrays
spec_amp_av=spec_amp_av[0:count]
spec_phase_av=spec_phase_av[0:count]
freq_av=freq_av[0:count]
return spec_amp_av,spec_phase_av,freq_av,count
n_av = 5
# define terms to compute coherence between temp and ENSO
t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av)
e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b)
# define colors
t_color = 'cadetblue'
s_color = 'darkslateblue'
p_color = 'seagreen'
e_color = 'steelblue'
freq_ann = 2*np.pi/365.25
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase'
im_name = 'SIO_TempENSO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = e_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = e_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
n_av = 5
# define terms to compute coherence between temp and ENSO
#t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
#t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av)
p_fft_star = np.conj(spectra_PDO_df['PDO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b)
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and PDO Index \nCoherence and Phase'
im_name = 'SIO_TempPDO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = p_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = p_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
| 2.515625 | 3 |
abfs/group_data_split.py | rcdilorenzo/abfs | 7 | 4866 | <gh_stars>1-10
from collections import namedtuple as Struct
from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit
DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed'])
DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337)
class GroupDataSplit():
def __init__(self, df, key, config=DEFAULT_SPLIT_CONFIG):
self.config = config
self.key = key
self._df = df
self._split_data()
@property
def total(self):
"""Total records in the data frame"""
return len(self._df)
def train_df(self):
"""Randomized train data frame"""
return self._train_df.sample(frac=1).reset_index(drop=True)
@property
def val_df(self):
"""Validation data frame"""
return self._val_df
@property
def test_df(self):
"""Test data frame"""
return self._test_df
@property
def test_split(self):
return GroupShuffleSplit(test_size=self.config.test_size,
random_state=self.config.random_seed).split
@property
def val_split(self):
val_size = self.config.validation_size / (1 - self.config.test_size)
return GroupShuffleSplit(test_size=val_size,
random_state=self.config.random_seed).split
def _split_data(self):
rem_indices, test_indices = next(
self.test_split(self._df, groups=self._df[self.key])
)
rem_df = self._df.iloc[rem_indices]
train_indices, val_indices = next(
self.val_split(rem_df, groups=rem_df[self.key])
)
self._test_df = self._df.iloc[test_indices]
self._val_df = rem_df.iloc[val_indices]
self._train_df = rem_df.iloc[train_indices]
| 2.921875 | 3 |
mmcls/models/utils/se_layer.py | YuxinZou/mmclassification | 1,190 | 4867 | <gh_stars>1000+
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
squeeze_channels (None or int): The intermediate channel number of
SElayer. Default: None, means the value of ``squeeze_channels``
is ``make_divisible(channels // ratio, divisor)``.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will
be ``make_divisible(channels // ratio, divisor)``. Only used when
``squeeze_channels`` is None. Default: 16.
divisor(int): The divisor to true divide the channel number. Only
used when ``squeeze_channels`` is None. Default: 8.
conv_cfg (None or dict): Config dict for convolution layer. Default:
None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
"""
def __init__(self,
channels,
squeeze_channels=None,
ratio=16,
divisor=8,
bias='auto',
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
if squeeze_channels is None:
squeeze_channels = make_divisible(channels // ratio, divisor)
assert isinstance(squeeze_channels, int) and squeeze_channels > 0, \
'"squeeze_channels" should be a positive integer, but get ' + \
f'{squeeze_channels} instead.'
self.conv1 = ConvModule(
in_channels=channels,
out_channels=squeeze_channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=squeeze_channels,
out_channels=channels,
kernel_size=1,
stride=1,
bias=bias,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
| 2.96875 | 3 |
instagram/admin.py | James19stack/instagram-copy_cat | 0 | 4868 | <reponame>James19stack/instagram-copy_cat<filename>instagram/admin.py
from django.contrib import admin
from .models import Images,Comments,Profile
# Register your models here.
class CommentInline(admin.TabularInline):
model=Comments
extra=3
class ImageInline(admin.ModelAdmin):
fieldsets=[
(None,{'fields':['image']}),
(None,{'fields':['image_name']}),
(None,{'fields':['image_caption']}),
(None,{'fields':['likes']}),
]
inlines=[CommentInline]
admin.site.site_header='InstaPost Admin'
admin.site.site_title='InstaPost Admin Dashboard'
admin.site.register(Images,ImageInline)
admin.site.register(Profile)
| 1.773438 | 2 |
mandelbruh/util.py | pereradrian/mandelbruh | 0 | 4869 | import numpy as np
def normalize(x):
return x / np.linalg.norm(x)
def norm_sq(v):
return np.dot(v,v)
def norm(v):
return np.linalg.norm(v)
def get_sub_keys(v):
if type(v) is not tuple and type(v) is not list:
return []
return [k for k in v if type(k) is str]
def to_vec3(v):
if isinstance(v, (float, int)):
return np.array([v, v, v], dtype=np.float32)
elif len(get_sub_keys(v)) > 0:
return v
else:
return np.array([v[0], v[1], v[2]], dtype=np.float32)
def to_str(x):
if type(x) is bool:
return "1" if x else "0"
elif isinstance(x, (list, tuple)):
return vec3_str(x)
else:
return str(x)
def float_str(x):
if type(x) is str:
return '_' + x
else:
return str(x)
def vec3_str(v):
if type(v) is str:
return '_' + v
elif isinstance(v, (float, int)):
return 'vec3(' + str(v) + ')'
else:
return 'vec3(' + float_str(v[0]) + ',' + float_str(v[1]) + ',' + float_str(v[2]) + ')'
def vec3_eq(v, val):
if type(v) is str:
return False
for i in range(3):
if v[i] != val[i]:
return False
return True
def smin(a, b, k):
h = min(max(0.5 + 0.5*(b - a)/k, 0.0), 1.0)
return b*(1 - h) + a*h - k*h*(1.0 - h)
def get_global(k):
if type(k) is str:
return _mandelbruh_GLOBAL_VARS[k]
elif type(k) is tuple or type(k) is list:
return np.array([get_global(i) for i in k], dtype=np.float32)
else:
return k
def set_global_float(k):
if type(k) is str:
_mandelbruh_GLOBAL_VARS[k] = 0.0
return k
def set_global_vec3(k):
if type(k) is str:
_mandelbruh_GLOBAL_VARS[k] = to_vec3((0,0,0))
return k
elif isinstance(k, (float, int)):
return to_vec3(k)
else:
sk = get_sub_keys(k)
for i in sk:
_mandelbruh_GLOBAL_VARS[i] = 0.0
return to_vec3(k)
def cond_offset(p):
if type(p) is str or np.count_nonzero(p) > 0:
return ' - vec4(' + vec3_str(p) + ', 0)'
return ''
def cond_subtract(p):
if type(p) is str or p > 0:
return ' - ' + float_str(p)
return ''
def make_color(geo):
if type(geo.color) is tuple or type(geo.color) is np.ndarray:
return 'vec4(' + vec3_str(geo.color) + ', ' + geo.glsl() + ')'
elif geo.color == 'orbit' or geo.color == 'o':
return 'vec4(orbit, ' + geo.glsl() + ')'
else:
raise Exception("Invalid coloring type")
_mandelbruh_GLOBAL_VARS = {}
| 2.84375 | 3 |
core/recognizer.py | awen1988/yry | 129 | 4870 | """
recognize face landmark
"""
import json
import os
import requests
import numpy as np
FACE_POINTS = list(range(0, 83))
JAW_POINTS = list(range(0, 19))
LEFT_EYE_POINTS = list(range(19, 29))
LEFT_BROW_POINTS = list(range(29, 37))
MOUTH_POINTS = list(range(37, 55))
NOSE_POINTS = list(range(55, 65))
RIGHT_EYE_POINTS = list(range(65, 75))
RIGHT_BROW_POINTS = list(range(75, 83))
LEFT_FACE = list(range(0, 10)) + list(range(29, 34))
RIGHT_FACE = list(range(9, 19)) + list(range(75, 80))
JAW_END = 19
FACE_START = 0
FACE_END = 83
OVERLAY_POINTS = [
LEFT_FACE,
RIGHT_FACE,
JAW_POINTS,
]
def face_points(image):
points = []
txt = image + '.txt'
if os.path.isfile(txt):
with open(txt) as file:
for line in file:
points = line
elif os.path.isfile(image):
points = landmarks_by_face__(image)
with open(txt, 'w') as file:
file.write(str(points))
faces = json.loads(points)['faces']
if len(faces) == 0:
err = 404
else:
err = 0
matrix_list = np.matrix(matrix_marks(faces[0]['landmark']))
point_list = []
for p in matrix_list.tolist():
point_list.append((int(p[0]), int(p[1])))
return matrix_list, point_list, err
def landmarks_by_face__(image):
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
params = {
'api_key': '<KEY>',
'api_secret': '<KEY>',
'return_landmark': 1,
}
file = {'image_file': open(image, 'rb')}
r = requests.post(url=url, files=file, data=params)
if r.status_code == requests.codes.ok:
return r.content.decode('utf-8')
else:
return r.content
def matrix_rectangle(left, top, width, height):
pointer = [
(left, top),
(left + width / 2, top),
(left + width - 1, top),
(left + width - 1, top + height / 2),
(left, top + height / 2),
(left, top + height - 1),
(left + width / 2, top + height - 1),
(left + width - 1, top + height - 1)
]
return pointer
def matrix_marks(res):
pointer = [
[res['contour_left1']['x'], res['contour_left1']['y']],
[res['contour_left2']['x'], res['contour_left2']['y']],
[res['contour_left3']['x'], res['contour_left3']['y']],
[res['contour_left4']['x'], res['contour_left4']['y']],
[res['contour_left5']['x'], res['contour_left5']['y']],
[res['contour_left6']['x'], res['contour_left6']['y']],
[res['contour_left7']['x'], res['contour_left7']['y']],
[res['contour_left8']['x'], res['contour_left8']['y']],
[res['contour_left9']['x'], res['contour_left9']['y']],
[res['contour_chin']['x'], res['contour_chin']['y']],
[res['contour_right9']['x'], res['contour_right9']['y']],
[res['contour_right8']['x'], res['contour_right8']['y']],
[res['contour_right7']['x'], res['contour_right7']['y']],
[res['contour_right6']['x'], res['contour_right6']['y']],
[res['contour_right5']['x'], res['contour_right5']['y']],
[res['contour_right4']['x'], res['contour_right4']['y']],
[res['contour_right3']['x'], res['contour_right3']['y']],
[res['contour_right2']['x'], res['contour_right2']['y']],
[res['contour_right1']['x'], res['contour_right1']['y']],
[res['left_eye_bottom']['x'], res['left_eye_bottom']['y']],
[res['left_eye_center']['x'], res['left_eye_center']['y']],
[res['left_eye_left_corner']['x'], res['left_eye_left_corner']['y']],
[res['left_eye_lower_left_quarter']['x'], res['left_eye_lower_left_quarter']['y']],
[res['left_eye_lower_right_quarter']['x'], res['left_eye_lower_right_quarter']['y']],
[res['left_eye_pupil']['x'], res['left_eye_pupil']['y']],
[res['left_eye_right_corner']['x'], res['left_eye_right_corner']['y']],
[res['left_eye_top']['x'], res['left_eye_top']['y']],
[res['left_eye_upper_left_quarter']['x'], res['left_eye_upper_left_quarter']['y']],
[res['left_eye_upper_right_quarter']['x'], res['left_eye_upper_right_quarter']['y']],
[res['left_eyebrow_left_corner']['x'], res['left_eyebrow_left_corner']['y']],
[res['left_eyebrow_upper_left_quarter']['x'], res['left_eyebrow_upper_left_quarter']['y']],
[res['left_eyebrow_upper_middle']['x'], res['left_eyebrow_upper_middle']['y']],
[res['left_eyebrow_upper_right_quarter']['x'], res['left_eyebrow_upper_right_quarter']['y']],
[res['left_eyebrow_right_corner']['x'], res['left_eyebrow_right_corner']['y']],
[res['left_eyebrow_lower_left_quarter']['x'], res['left_eyebrow_lower_left_quarter']['y']],
[res['left_eyebrow_lower_middle']['x'], res['left_eyebrow_lower_middle']['y']],
[res['left_eyebrow_lower_right_quarter']['x'], res['left_eyebrow_lower_right_quarter']['y']],
[res['mouth_left_corner']['x'], res['mouth_left_corner']['y']],
[res['mouth_lower_lip_bottom']['x'], res['mouth_lower_lip_bottom']['y']],
[res['mouth_lower_lip_left_contour1']['x'], res['mouth_lower_lip_left_contour1']['y']],
[res['mouth_lower_lip_left_contour2']['x'], res['mouth_lower_lip_left_contour2']['y']],
[res['mouth_lower_lip_left_contour3']['x'], res['mouth_lower_lip_left_contour3']['y']],
[res['mouth_lower_lip_right_contour1']['x'], res['mouth_lower_lip_right_contour1']['y']],
[res['mouth_lower_lip_right_contour2']['x'], res['mouth_lower_lip_right_contour2']['y']],
[res['mouth_lower_lip_right_contour3']['x'], res['mouth_lower_lip_right_contour3']['y']],
[res['mouth_lower_lip_top']['x'], res['mouth_lower_lip_top']['y']],
[res['mouth_right_corner']['x'], res['mouth_right_corner']['y']],
[res['mouth_upper_lip_bottom']['x'], res['mouth_upper_lip_bottom']['y']],
[res['mouth_upper_lip_left_contour1']['x'], res['mouth_upper_lip_left_contour1']['y']],
[res['mouth_upper_lip_left_contour2']['x'], res['mouth_upper_lip_left_contour2']['y']],
[res['mouth_upper_lip_left_contour3']['x'], res['mouth_upper_lip_left_contour3']['y']],
[res['mouth_upper_lip_right_contour1']['x'], res['mouth_upper_lip_right_contour1']['y']],
[res['mouth_upper_lip_right_contour2']['x'], res['mouth_upper_lip_right_contour2']['y']],
[res['mouth_upper_lip_right_contour3']['x'], res['mouth_upper_lip_right_contour3']['y']],
[res['mouth_upper_lip_top']['x'], res['mouth_upper_lip_top']['y']],
[res['nose_contour_left1']['x'], res['nose_contour_left1']['y']],
[res['nose_contour_left2']['x'], res['nose_contour_left2']['y']],
[res['nose_contour_left3']['x'], res['nose_contour_left3']['y']],
[res['nose_contour_lower_middle']['x'], res['nose_contour_lower_middle']['y']],
[res['nose_contour_right1']['x'], res['nose_contour_right1']['y']],
[res['nose_contour_right2']['x'], res['nose_contour_right2']['y']],
[res['nose_contour_right3']['x'], res['nose_contour_right3']['y']],
[res['nose_left']['x'], res['nose_left']['y']],
[res['nose_right']['x'], res['nose_right']['y']],
[res['nose_tip']['x'], res['nose_tip']['y']],
[res['right_eye_bottom']['x'], res['right_eye_bottom']['y']],
[res['right_eye_center']['x'], res['right_eye_center']['y']],
[res['right_eye_left_corner']['x'], res['right_eye_left_corner']['y']],
[res['right_eye_lower_left_quarter']['x'], res['right_eye_lower_left_quarter']['y']],
[res['right_eye_lower_right_quarter']['x'], res['right_eye_lower_right_quarter']['y']],
[res['right_eye_pupil']['x'], res['right_eye_pupil']['y']],
[res['right_eye_right_corner']['x'], res['right_eye_right_corner']['y']],
[res['right_eye_top']['x'], res['right_eye_top']['y']],
[res['right_eye_upper_left_quarter']['x'], res['right_eye_upper_left_quarter']['y']],
[res['right_eye_upper_right_quarter']['x'], res['right_eye_upper_right_quarter']['y']],
[res['right_eyebrow_left_corner']['x'], res['right_eyebrow_left_corner']['y']],
[res['right_eyebrow_upper_left_quarter']['x'], res['right_eyebrow_upper_left_quarter']['y']],
[res['right_eyebrow_upper_middle']['x'], res['right_eyebrow_upper_middle']['y']],
[res['right_eyebrow_upper_right_quarter']['x'], res['right_eyebrow_upper_right_quarter']['y']],
[res['right_eyebrow_right_corner']['x'], res['right_eyebrow_right_corner']['y']],
[res['right_eyebrow_lower_left_quarter']['x'], res['right_eyebrow_lower_left_quarter']['y']],
[res['right_eyebrow_lower_middle']['x'], res['right_eyebrow_lower_middle']['y']],
[res['right_eyebrow_lower_right_quarter']['x'], res['right_eyebrow_lower_right_quarter']['y']],
]
return pointer
| 3.125 | 3 |
magvar.py | rafidmorshedi/mag-dec-api | 0 | 4871 | import requests
import time
from bs4 import BeautifulSoup
import re
def decdeg2dms(dd):
negative = dd < 0
dd = abs(dd)
minutes,seconds = divmod(dd*3600,60)
degrees,minutes = divmod(minutes,60)
if negative:
if degrees > 0:
degrees = -degrees
elif minutes > 0:
minutes = -minutes
else:
seconds = -seconds
return (degrees,minutes,seconds)
def get_mag_var(lat, lon, year, month, day, elev=0):
"""Returns the magnetic variation at a particulat point on earth.
Keyword Arguments
lat -- latitude (e.g. -180.6 deg)
lon -- longitude (e.g. -34.6 deg)
elev -- elevation in km (default 0.0)
year -- year (e.g. 2015)
month -- month (e.g. 11)
day -- day (e.g. 30)
Returns
float -- magnetic variation
"""
(latd, latm, lats) = decdeg2dms(lat)
(lond, lonm, lons) = decdeg2dms(lon)
payload = {'latd': latd,'latm':latm,'lats':lats,'lond':lond,'lonm':lonm,
'lons':lons,'elev':elev,'year':year,'month':month,'day':day,'Ein':'D'}
url = 'http://www.ga.gov.au/oracle/cgi/geoAGRF.sh'
# Sleep to avoid spamming server
time.sleep(1)
r = requests.get(url, params=payload)
if r.status_code == 200:
c = r.content
soup = BeautifulSoup(c,'html.parser')
deg_text = soup.find_all('b')[-1].text.strip()
# strip out the junk so we have a number
# Strip spaces before the search
deg_text = deg_text.replace(" ","")
deg = re.search(r'D=(.*?)deg', deg_text).group(1)
deg = float(deg)
return deg
else:
return 'something went wrong' | 3.203125 | 3 |
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py | bopopescu/Social-Lite | 0 | 4872 | <filename>google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
"""Generated message classes for datacatalog version v1beta1.
A fully managed and highly scalable data discovery and metadata management
service.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'datacatalog'
class Binding(_messages.Message):
r"""Associates `members` with a `role`.
Fields:
condition: The condition that is associated with this binding. NOTE: An
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: Specifies the identities requesting access for a Cloud Platform
resource. `members` can have the following values: * `allUsers`: A
special identifier that represents anyone who is on the internet;
with or without a Google account. * `allAuthenticatedUsers`: A special
identifier that represents anyone who is authenticated with a Google
account or a service account. * `user:{emailid}`: An email address that
represents a specific Google account. For example,
`<EMAIL>` . * `serviceAccount:{emailid}`: An email address
that represents a service account. For example, `my-other-
<EMAIL>`. * `group:{emailid}`: An email address
that represents a Google group. For example, `<EMAIL>`. *
`deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
identifier) representing a user that has been recently deleted. For
example, `<EMAIL>?uid=123456789012345678901`. If the user is
recovered, this value reverts to `user:{emailid}` and the recovered user
retains the role in the binding. *
`deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address
(plus unique identifier) representing a service account that has been
recently deleted. For example, `my-other-
<EMAIL>?uid=123456789012345678901`. If the
service account is undeleted, this value reverts to
`serviceAccount:{emailid}` and the undeleted service account retains the
role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An
email address (plus unique identifier) representing a Google group
that has been recently deleted. For example,
`<EMAIL>?uid=123456789012345678901`. If the group is
recovered, this value reverts to `group:{emailid}` and the recovered
group retains the role in the binding. * `domain:{domain}`: The G
Suite domain (primary) that represents all the users of that domain.
For example, `google.com` or `example.com`.
role: Role that is assigned to `members`. For example, `roles/viewer`,
`roles/editor`, or `roles/owner`.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
class DatacatalogEntriesLookupRequest(_messages.Message):
r"""A DatacatalogEntriesLookupRequest object.
Fields:
linkedResource: The full name of the Google Cloud Platform resource the
Data Catalog entry represents. See:
https://cloud.google.com/apis/design/resource_names#full_resource_name.
Full names are case-sensitive. Examples: * //bigquery.googleapis.com/
projects/projectId/datasets/datasetId/tables/tableId *
//pubsub.googleapis.com/projects/projectId/topics/topicId
sqlResource: The SQL name of the entry. SQL names are case-sensitive.
Examples: * `cloud_pubsub.project_id.topic_id` *
``pubsub.project_id.`topic.id.with.dots` `` *
`bigquery.table.project_id.dataset_id.table_id` *
`bigquery.dataset.project_id.dataset_id` *
`datacatalog.entry.project_id.location_id.entry_group_id.entry_id`
`*_id`s shoud satisfy the standard SQL rules for identifiers.
https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical.
"""
linkedResource = _messages.StringField(1)
sqlResource = _messages.StringField(2)
class DatacatalogProjectsLocationsEntryGroupsCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsCreateRequest object.
Fields:
entryGroupId: Required. The id of the entry group to create. The id must
begin with a letter or underscore, contain only English letters, numbers
and underscores, and be at most 64 characters.
googleCloudDatacatalogV1beta1EntryGroup: A
GoogleCloudDatacatalogV1beta1EntryGroup resource to be passed as the
request body.
parent: Required. The name of the project this entry group is in. Example:
* projects/{project_id}/locations/{location} Note that this EntryGroup
and its child resources may not actually be stored in the location in
this name.
"""
entryGroupId = _messages.StringField(1)
googleCloudDatacatalogV1beta1EntryGroup = _messages.MessageField('GoogleCloudDatacatalogV1beta1EntryGroup', 2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsEntryGroupsDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsDeleteRequest object.
Fields:
force: Optional. If true, deletes all entries in the entry group.
name: Required. The name of the entry group. For example,
`projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
`.
"""
force = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest object.
Fields:
entryId: Required. The id of the entry to create.
googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry
resource to be passed as the request body.
parent: Required. The name of the entry group this entry is in. Example:
*
projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
Note that this Entry and its child resources may not actually be stored
in the location in this name.
"""
entryId = _messages.StringField(1)
googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest object.
Fields:
name: Required. The name of the entry. Example: * projects/{project_id}/l
ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest
object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest object.
Fields:
name: Required. The name of the entry. Example: * projects/{project_id}/l
ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id}
Entry groups are logical groupings of entries. Currently, users cannot
create/modify entry groups. They are created by Data Catalog; they
include `@bigquery` for all BigQuery entries, and `@pubsub` for all
Cloud Pub/Sub entries.
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry
resource to be passed as the request body.
name: The Data Catalog resource name of the entry in URL format. Example:
* projects/{project_id}/locations/{location}/entryGroups/{entry_group_id
}/entries/{entry_id} Note that this Entry and its child resources may
not actually be stored in the location in this name.
updateMask: The fields to update on the entry. If absent or empty, all
modifiable fields are updated. The following fields are modifiable: *
For entries with type `DATA_STREAM`: * `schema` * For entries with
type `FILESET` * `schema` * `display_name` * `description` *
`gcs_fileset_spec` * `gcs_fileset_spec.file_patterns`
"""
googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest
object.
Fields:
googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag
resource to be passed as the request body.
parent: Required. The name of the resource to attach this tag to. Tags can
be attached to Entries. Example: * projects/{project_id}/locations/{loc
ation}/entryGroups/{entry_group_id}/entries/{entry_id} Note that this
Tag and its child resources may not actually be stored in the location
in this name.
"""
googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1)
parent = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest
object.
Fields:
name: Required. The name of the tag to delete. Example: * projects/{proje
ct_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_
id}/tags/{tag_id}
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest object.
Fields:
pageSize: The maximum number of tags to return. Default is 10. Max limit
is 1000.
pageToken: Token that specifies which page is requested. If empty, the
first page is returned.
parent: Required. The name of the Data Catalog resource to list the tags
of. The resource could be an Entry.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag
resource to be passed as the request body.
name: The resource name of the tag in URL format. Example: * projects/{pr
oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent
ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier.
Note that this Tag may not actually be stored in the location in this
name.
updateMask: The fields to update on the Tag. If absent or empty, all
modifiable fields are updated. Currently the only modifiable field is
the field `fields`.
"""
googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest(_messages.Message):
r"""A
DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsEntryGroupsGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsGetRequest object.
Fields:
name: Required. The name of the entry group. For example,
`projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
`.
readMask: The fields to return. If not set or empty, all fields are
returned.
"""
name = _messages.StringField(1, required=True)
readMask = _messages.StringField(2)
class DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DatacatalogProjectsLocationsTagTemplatesCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesCreateRequest object.
Fields:
googleCloudDatacatalogV1beta1TagTemplate: A
GoogleCloudDatacatalogV1beta1TagTemplate resource to be passed as the
request body.
parent: Required. The name of the project and the location this template
is in. Example: * projects/{project_id}/locations/{location}
TagTemplate and its child resources may not actually be stored in the
location in this name.
tagTemplateId: Required. The id of the tag template to create.
"""
googleCloudDatacatalogV1beta1TagTemplate = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplate', 1)
parent = _messages.StringField(2, required=True)
tagTemplateId = _messages.StringField(3)
class DatacatalogProjectsLocationsTagTemplatesDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesDeleteRequest object.
Fields:
force: Required. Currently, this field must always be set to `true`. This
confirms the deletion of any possible tags using this template. `force =
false` will be supported in the future.
name: Required. The name of the tag template to delete. Example: *
projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id
}
"""
force = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTagTemplatesFieldsCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesFieldsCreateRequest object.
Fields:
googleCloudDatacatalogV1beta1TagTemplateField: A
GoogleCloudDatacatalogV1beta1TagTemplateField resource to be passed as
the request body.
parent: Required. The name of the project this template is in. Example: *
projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id
} Note that this TagTemplateField may not actually be stored in the
location in this name.
tagTemplateFieldId: Required. The ID of the tag template field to create.
Field ids can contain letters (both uppercase and lowercase), numbers
(0-9), underscores (_) and dashes (-). Field IDs must be at least 1
character long and at most 128 characters long. Field IDs must also be
unique within their template.
"""
googleCloudDatacatalogV1beta1TagTemplateField = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 1)
parent = _messages.StringField(2, required=True)
tagTemplateFieldId = _messages.StringField(3)
class DatacatalogProjectsLocationsTagTemplatesFieldsDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesFieldsDeleteRequest object.
Fields:
force: Required. Currently, this field must always be set to `true`. This
confirms the deletion of this field from any tags using this field.
`force = false` will be supported in the future.
name: Required. The name of the tag template field to delete. Example: *
projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id
}/fields/{tag_template_field_id}
"""
force = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTagTemplatesFieldsPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesFieldsPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1TagTemplateField: A
GoogleCloudDatacatalogV1beta1TagTemplateField resource to be passed as
the request body.
name: Required. The name of the tag template field. Example: * projects/{
project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{
tag_template_field_id}
updateMask: Optional. The field mask specifies the parts of the template
to be updated. Allowed fields: * `display_name` * `type.enum_type`
* `is_required` If `update_mask` is not set or empty, all of the
allowed fields above will be updated. When updating an enum type, the
provided values will be merged with the existing values. Therefore, enum
values can only be added, existing enum values cannot be deleted nor
renamed. Updating a template field from optional to required is NOT
allowed.
"""
googleCloudDatacatalogV1beta1TagTemplateField = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsTagTemplatesFieldsRenameRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesFieldsRenameRequest object.
Fields:
googleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest: A
GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest resource to
be passed as the request body.
name: Required. The name of the tag template. Example: * projects/{projec
t_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_te
mplate_field_id}
"""
googleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest = _messages.MessageField('GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest', 1)
name = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTagTemplatesGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesGetIamPolicyRequest object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTagTemplatesGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesGetRequest object.
Fields:
name: Required. The name of the tag template. Example: *
projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id
}
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsTagTemplatesPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1TagTemplate: A
GoogleCloudDatacatalogV1beta1TagTemplate resource to be passed as the
request body.
name: The resource name of the tag template in URL format. Example: *
projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id
} Note that this TagTemplate and its child resources may not actually
be stored in the location in this name.
updateMask: The field mask specifies the parts of the template to
overwrite. Allowed fields: * `display_name` If absent or empty, all
of the allowed fields above will be updated.
"""
googleCloudDatacatalogV1beta1TagTemplate = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplate', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsTagTemplatesSetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DatacatalogProjectsLocationsTagTemplatesTestIamPermissionsRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTagTemplatesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DatacatalogProjectsLocationsTaxonomiesCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesCreateRequest object.
Fields:
googleCloudDatacatalogV1beta1Taxonomy: A
GoogleCloudDatacatalogV1beta1Taxonomy resource to be passed as the
request body.
parent: Required. Resource name of the project that the taxonomy will
belong to.
"""
googleCloudDatacatalogV1beta1Taxonomy = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1)
parent = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTaxonomiesDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesDeleteRequest object.
Fields:
name: Required. Resource name of the taxonomy to be deleted. All policy
tags in this taxonomy will also be deleted.
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsTaxonomiesExportRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesExportRequest object.
Fields:
parent: Required. Resource name of the project that taxonomies to be
exported will share.
serializedTaxonomies: Export taxonomies as serialized taxonomies.
taxonomies: Required. Resource names of the taxonomies to be exported.
"""
parent = _messages.StringField(1, required=True)
serializedTaxonomies = _messages.BooleanField(2)
taxonomies = _messages.StringField(3, repeated=True)
class DatacatalogProjectsLocationsTaxonomiesGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesGetIamPolicyRequest object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTaxonomiesGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesGetRequest object.
Fields:
name: Required. Resource name of the requested taxonomy.
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsTaxonomiesImportRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesImportRequest object.
Fields:
googleCloudDatacatalogV1beta1ImportTaxonomiesRequest: A
GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest resource to be
passed as the request body.
parent: Required. Resource name of project that the newly created
taxonomies will belong to.
"""
googleCloudDatacatalogV1beta1ImportTaxonomiesRequest = _messages.MessageField('GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest', 1)
parent = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTaxonomiesListRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesListRequest object.
Fields:
pageSize: The maximum number of items to return. Must be a value between 1
and 1000. If not set, defaults to 50.
pageToken: The next_page_token value returned from a previous list
request, if any. If not set, defaults to an empty string.
parent: Required. Resource name of the project to list the taxonomies of.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsTaxonomiesPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1Taxonomy: A
GoogleCloudDatacatalogV1beta1Taxonomy resource to be passed as the
request body.
name: Output only. Resource name of this taxonomy, whose format is:
"projects/{project_number}/locations/{location_id}/taxonomies/{id}".
updateMask: The update mask applies to the resource. For the `FieldMask`
definition, see https://developers.google.com/protocol-
buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to
all of the fields that are allowed to update.
"""
googleCloudDatacatalogV1beta1Taxonomy = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsCreateRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsCreateRequest object.
Fields:
googleCloudDatacatalogV1beta1PolicyTag: A
GoogleCloudDatacatalogV1beta1PolicyTag resource to be passed as the
request body.
parent: Required. Resource name of the taxonomy that the policy tag will
belong to.
"""
googleCloudDatacatalogV1beta1PolicyTag = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 1)
parent = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsDeleteRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsDeleteRequest object.
Fields:
name: Required. Resource name of the policy tag to be deleted. All of its
descendant policy tags will also be deleted.
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetIamPolicyRequest
object.
Fields:
getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the
request body.
resource: REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this
field.
"""
getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1)
resource = _messages.StringField(2, required=True)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetRequest object.
Fields:
name: Required. Resource name of the requested policy tag.
"""
name = _messages.StringField(1, required=True)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsListRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsListRequest object.
Fields:
pageSize: The maximum number of items to return. Must be a value between 1
and 1000. If not set, defaults to 50.
pageToken: The next_page_token value returned from a previous List
request, if any. If not set, defaults to an empty string.
parent: Required. Resource name of the taxonomy to list the policy tags
of.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsPatchRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsPatchRequest object.
Fields:
googleCloudDatacatalogV1beta1PolicyTag: A
GoogleCloudDatacatalogV1beta1PolicyTag resource to be passed as the
request body.
name: Output only. Resource name of this policy tag, whose format is: "pro
jects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/
policyTags/{id}".
updateMask: The update mask applies to the resource. Only display_name,
description and parent_policy_tag can be updated and thus can be listed
in the mask. If update_mask is not provided, all allowed fields (i.e.
display_name, description and parent) will be updated. For more
information including the `FieldMask` definition, see
https://developers.google.com/protocol-
buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to
all of the fields that are allowed to update.
"""
googleCloudDatacatalogV1beta1PolicyTag = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsSetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsSetIamPolicyRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DatacatalogProjectsLocationsTaxonomiesPolicyTagsTestIamPermissionsRequest(_messages.Message):
r"""A
DatacatalogProjectsLocationsTaxonomiesPolicyTagsTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class DatacatalogProjectsLocationsTaxonomiesSetIamPolicyRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesSetIamPolicyRequest object.
Fields:
resource: REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this
field.
setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the
request body.
"""
resource = _messages.StringField(1, required=True)
setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2)
class DatacatalogProjectsLocationsTaxonomiesTestIamPermissionsRequest(_messages.Message):
r"""A DatacatalogProjectsLocationsTaxonomiesTestIamPermissionsRequest
object.
Fields:
resource: REQUIRED: The resource for which the policy detail is being
requested. See the operation documentation for the appropriate value for
this field.
testIamPermissionsRequest: A TestIamPermissionsRequest resource to be
passed as the request body.
"""
resource = _messages.StringField(1, required=True)
testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class Expr(_messages.Message):
r"""Represents a textual expression in the Common Expression Language (CEL)
syntax. CEL is a C-like expression language. The syntax and semantics of CEL
are documented at https://github.com/google/cel-spec. Example (Comparison):
title: "Summary size limit" description: "Determines if a summary is
less than 100 chars" expression: "document.summary.size() < 100"
Example (Equality): title: "Requestor is owner" description:
"Determines if requestor is the document owner" expression:
"document.owner == request.auth.claims.email" Example (Logic): title:
"Public documents" description: "Determine whether the document should
be publicly visible" expression: "document.type != 'private' &&
document.type != 'internal'" Example (Data Manipulation): title:
"Notification string" description: "Create a notification string with a
timestamp." expression: "'New message received at ' +
string(document.create_time)" The exact variables and functions that may be
referenced within an expression are determined by the service that evaluates
it. See the service documentation for additional information.
Fields:
description: Optional. Description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax.
location: Optional. String indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: Optional. Title for the expression, i.e. a short string describing
its purpose. This can be used e.g. in UIs which allow to enter the
expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class GetIamPolicyRequest(_messages.Message):
r"""Request message for `GetIamPolicy` method.
Fields:
options: OPTIONAL: A `GetPolicyOptions` object for specifying options to
`GetIamPolicy`. This field is only used by Cloud IAM.
"""
options = _messages.MessageField('GetPolicyOptions', 1)
class GetPolicyOptions(_messages.Message):
r"""Encapsulates settings provided to GetIamPolicy.
Fields:
requestedPolicyVersion: Optional. The policy format version to be
returned. Valid values are 0, 1, and 3. Requests specifying an invalid
value will be rejected. Requests for policies with any conditional
bindings must specify version 3. Policies without any conditional
bindings may specify any valid value or leave the field unset.
"""
requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32)
class GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec(_messages.Message):
r"""Spec for a group of BigQuery tables with name pattern
`[prefix]YYYYMMDD`. Context: https://cloud.google.com/bigquery/docs
/partitioned-tables#partitioning_versus_sharding
Fields:
dataset: Output only. The Data Catalog resource name of the dataset entry
the current table belongs to, for example, `projects/{project_id}/locati
ons/{location}/entrygroups/{entry_group_id}/entries/{entry_id}`.
shardCount: Output only. Total number of shards.
tablePrefix: Output only. The table name prefix of the shards. The name of
any given shard is `[table_prefix]YYYYMMDD`, for example, for shard
`MyTable20180101`, the `table_prefix` is `MyTable`.
"""
dataset = _messages.StringField(1)
shardCount = _messages.IntegerField(2)
tablePrefix = _messages.StringField(3)
class GoogleCloudDatacatalogV1beta1BigQueryTableSpec(_messages.Message):
r"""Describes a BigQuery table.
Enums:
TableSourceTypeValueValuesEnum: Output only. The table source type.
Fields:
tableSourceType: Output only. The table source type.
tableSpec: Spec of a BigQuery table. This field should only be populated
if `table_source_type` is `BIGQUERY_TABLE`.
viewSpec: Table view specification. This field should only be populated if
`table_source_type` is `BIGQUERY_VIEW`.
"""
class TableSourceTypeValueValuesEnum(_messages.Enum):
r"""Output only. The table source type.
Values:
TABLE_SOURCE_TYPE_UNSPECIFIED: Default unknown type.
BIGQUERY_VIEW: Table view.
BIGQUERY_TABLE: BigQuery native table.
"""
TABLE_SOURCE_TYPE_UNSPECIFIED = 0
BIGQUERY_VIEW = 1
BIGQUERY_TABLE = 2
tableSourceType = _messages.EnumField('TableSourceTypeValueValuesEnum', 1)
tableSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1TableSpec', 2)
viewSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1ViewSpec', 3)
class GoogleCloudDatacatalogV1beta1ColumnSchema(_messages.Message):
r"""Representation of a column within a schema. Columns could be nested
inside other columns.
Fields:
column: Required. Name of the column.
description: Optional. Description of the column. Default value is an
empty string.
mode: Optional. A column's mode indicates whether the values in this
column are required, nullable, etc. Only `NULLABLE`, `REQUIRED` and
`REPEATED` are supported. Default mode is `NULLABLE`.
subcolumns: Optional. Schema of sub-columns. A column can have zero or
more sub-columns.
type: Required. Type of the column.
"""
column = _messages.StringField(1)
description = _messages.StringField(2)
mode = _messages.StringField(3)
subcolumns = _messages.MessageField('GoogleCloudDatacatalogV1beta1ColumnSchema', 4, repeated=True)
type = _messages.StringField(5)
class GoogleCloudDatacatalogV1beta1Entry(_messages.Message):
r"""Entry Metadata. A Data Catalog Entry resource represents another
resource in Google Cloud Platform, such as a BigQuery dataset or a Cloud
Pub/Sub topic. Clients can use the `linked_resource` field in the Entry
resource to refer to the original resource ID of the source system. An
Entry resource contains resource details, such as its schema. An Entry can
also be used to attach flexible metadata, such as a Tag.
Enums:
TypeValueValuesEnum: The type of the entry.
Fields:
bigqueryDateShardedSpec: Specification for a group of BigQuery tables with
name pattern `[prefix]YYYYMMDD`. Context:
https://cloud.google.com/bigquery/docs/partitioned-
tables#partitioning_versus_sharding.
bigqueryTableSpec: Specification that applies to a BigQuery table. This is
only valid on entries of type `TABLE`.
description: Entry description, which can consist of several sentences or
paragraphs that describe entry contents. Default value is an empty
string.
displayName: Display information such as title and description. A short
name to identify the entry, for example, "Analytics Data - Jan 2011".
Default value is an empty string.
gcsFilesetSpec: Specification that applies to a Cloud Storage fileset.
This is only valid on entries of type FILESET.
linkedResource: Output only. The resource this metadata entry refers to.
For Google Cloud Platform resources, `linked_resource` is the [full name
of the resource](https://cloud.google.com/apis/design/resource_names#ful
l_resource_name). For example, the `linked_resource` for a table
resource from BigQuery is: * //bigquery.googleapis.com/projects/project
Id/datasets/datasetId/tables/tableId
name: The Data Catalog resource name of the entry in URL format. Example:
* projects/{project_id}/locations/{location}/entryGroups/{entry_group_id
}/entries/{entry_id} Note that this Entry and its child resources may
not actually be stored in the location in this name.
schema: Schema of the entry. An entry might not have any schema attached
to it.
sourceSystemTimestamps: Output only. Timestamps about the underlying
Google Cloud Platform resource, not about this Data Catalog Entry.
type: The type of the entry.
"""
class TypeValueValuesEnum(_messages.Enum):
r"""The type of the entry.
Values:
ENTRY_TYPE_UNSPECIFIED: Default unknown type
TABLE: Output only. The type of entry that has a GoogleSQL schema,
including logical views.
MODEL: Output only. The type of models.
DATA_STREAM: Output only. An entry type which is used for streaming
entries. Example: Cloud Pub/Sub topic.
FILESET: Alpha feature. An entry type which is a set of files or
objects. Example: Cloud Storage fileset.
"""
ENTRY_TYPE_UNSPECIFIED = 0
TABLE = 1
MODEL = 2
DATA_STREAM = 3
FILESET = 4
bigqueryDateShardedSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec', 1)
bigqueryTableSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1BigQueryTableSpec', 2)
description = _messages.StringField(3)
displayName = _messages.StringField(4)
gcsFilesetSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1GcsFilesetSpec', 5)
linkedResource = _messages.StringField(6)
name = _messages.StringField(7)
schema = _messages.MessageField('GoogleCloudDatacatalogV1beta1Schema', 8)
sourceSystemTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 9)
type = _messages.EnumField('TypeValueValuesEnum', 10)
class GoogleCloudDatacatalogV1beta1EntryGroup(_messages.Message):
r"""EntryGroup Metadata. An EntryGroup resource represents a logical
grouping of zero or more Data Catalog Entry resources.
Fields:
dataCatalogTimestamps: Output only. Timestamps about this EntryGroup.
Default value is empty timestamps.
description: Entry group description, which can consist of several
sentences or paragraphs that describe entry group contents. Default
value is an empty string.
displayName: A short name to identify the entry group, for example,
"analytics data - jan 2011". Default value is an empty string.
name: The resource name of the entry group in URL format. Example: *
projects/{project_id}/locations/{location}/entryGroups/{entry_group_id}
Note that this EntryGroup and its child resources may not actually be
stored in the location in this name.
"""
dataCatalogTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 1)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
name = _messages.StringField(4)
class GoogleCloudDatacatalogV1beta1ExportTaxonomiesResponse(_messages.Message):
r"""Response message for ExportTaxonomies.
Fields:
taxonomies: List of taxonomies and policy tags in a tree structure.
"""
taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedTaxonomy', 1, repeated=True)
class GoogleCloudDatacatalogV1beta1FieldType(_messages.Message):
r"""A GoogleCloudDatacatalogV1beta1FieldType object.
Enums:
PrimitiveTypeValueValuesEnum: Represents primitive types - string, bool
etc.
Fields:
enumType: Represents an enum type.
primitiveType: Represents primitive types - string, bool etc.
"""
class PrimitiveTypeValueValuesEnum(_messages.Enum):
r"""Represents primitive types - string, bool etc.
Values:
PRIMITIVE_TYPE_UNSPECIFIED: This is the default invalid value for a
type.
DOUBLE: A double precision number.
STRING: An UTF-8 string.
BOOL: A boolean value.
TIMESTAMP: A timestamp.
"""
PRIMITIVE_TYPE_UNSPECIFIED = 0
DOUBLE = 1
STRING = 2
BOOL = 3
TIMESTAMP = 4
enumType = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldTypeEnumType', 1)
primitiveType = _messages.EnumField('PrimitiveTypeValueValuesEnum', 2)
class GoogleCloudDatacatalogV1beta1FieldTypeEnumType(_messages.Message):
r"""A GoogleCloudDatacatalogV1beta1FieldTypeEnumType object.
Fields:
allowedValues: Required on create; optional on update. The set of allowed
values for this enum. This set must not be empty, the display names of
the values in this set must not be empty and the display names of the
values must be case-insensitively unique within this set. Currently,
enum values can only be added to the list of allowed values. Deletion
and renaming of enum values are not supported. Can have up to 500
allowed values.
"""
allowedValues = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue', 1, repeated=True)
class GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue(_messages.Message):
r"""A GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue object.
Fields:
displayName: Required. The display name of the enum value. Must not be an
empty string.
"""
displayName = _messages.StringField(1)
class GoogleCloudDatacatalogV1beta1GcsFileSpec(_messages.Message):
r"""Specifications of a single file in Cloud Storage.
Fields:
filePath: Required. The full file path. Example:
`gs://bucket_name/a/b.txt`.
gcsTimestamps: Output only. Timestamps about the Cloud Storage file.
sizeBytes: Output only. The size of the file, in bytes.
"""
filePath = _messages.StringField(1)
gcsTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 2)
sizeBytes = _messages.IntegerField(3)
class GoogleCloudDatacatalogV1beta1GcsFilesetSpec(_messages.Message):
r"""Describes a Cloud Storage fileset entry.
Fields:
filePatterns: Required. Patterns to identify a set of files in Google
Cloud Storage. See [Cloud Storage
documentation](/storage/docs/gsutil/addlhelp/WildcardNames) for more
information. Note that bucket wildcards are currently not supported.
Examples of valid file_patterns: * `gs://bucket_name/dir/*`: matches
all files within `bucket_name/dir`
directory. * `gs://bucket_name/dir/**`: matches all files in
`bucket_name/dir` spanning all
subdirectories. * `gs://bucket_name/file*`: matches files prefixed by
`file` in `bucket_name` *
`gs://bucket_name/??.txt`: matches files with two characters followed by
`.txt` in `bucket_name` * `gs://bucket_name/[aeiou].txt`: matches files
that contain a single vowel character
followed by `.txt` in `bucket_name`
* `gs://bucket_name/[a-m].txt`: matches files that contain `a`, `b`, ...
or `m` followed by `.txt` in `bucket_name` * `gs://bucket_name/a/*/b`:
matches all files in `bucket_name` that match
`a/*/b` pattern, such as `a/c/b`, `a/d/b` *
`gs://another_bucket/a.txt`: matches `gs://another_bucket/a.txt` You
can combine wildcards to provide more powerful matches, for example: *
`gs://bucket_name/[a-m]??.j*g`
sampleGcsFileSpecs: Output only. Sample files contained in this fileset,
not all files contained in this fileset are represented here.
"""
filePatterns = _messages.StringField(1, repeated=True)
sampleGcsFileSpecs = _messages.MessageField('GoogleCloudDatacatalogV1beta1GcsFileSpec', 2, repeated=True)
class GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest(_messages.Message):
r"""Request message for ImportTaxonomies.
Fields:
inlineSource: Inline source used for taxonomies import
"""
inlineSource = _messages.MessageField('GoogleCloudDatacatalogV1beta1InlineSource', 1)
class GoogleCloudDatacatalogV1beta1ImportTaxonomiesResponse(_messages.Message):
r"""Response message for ImportTaxonomies.
Fields:
taxonomies: Taxonomies that were imported.
"""
taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1, repeated=True)
class GoogleCloudDatacatalogV1beta1InlineSource(_messages.Message):
r"""Inline source used for taxonomies import.
Fields:
taxonomies: Required. Taxonomies to be imported.
"""
taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedTaxonomy', 1, repeated=True)
class GoogleCloudDatacatalogV1beta1ListPolicyTagsResponse(_messages.Message):
r"""Response message for ListPolicyTags.
Fields:
nextPageToken: Token used to retrieve the next page of results, or empty
if there are no more results in the list.
policyTags: The policy tags that are in the requested taxonomy.
"""
nextPageToken = _messages.StringField(1)
policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 2, repeated=True)
class GoogleCloudDatacatalogV1beta1ListTagsResponse(_messages.Message):
r"""Response message for ListTags.
Fields:
nextPageToken: Token to retrieve the next page of results. It is set to
empty if no items remain in results.
tags: Tag details.
"""
nextPageToken = _messages.StringField(1)
tags = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 2, repeated=True)
class GoogleCloudDatacatalogV1beta1ListTaxonomiesResponse(_messages.Message):
r"""Response message for ListTaxonomies.
Fields:
nextPageToken: Token used to retrieve the next page of results, or empty
if there are no more results in the list.
taxonomies: Taxonomies that the project contains.
"""
nextPageToken = _messages.StringField(1)
taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 2, repeated=True)
class GoogleCloudDatacatalogV1beta1PolicyTag(_messages.Message):
r"""Denotes one policy tag in a taxonomy (e.g. ssn). Policy Tags can be
defined in a hierarchy. For example, consider the following hierachy:
Geolocation -> (LatLong, City, ZipCode). PolicyTag "Geolocation" contains
three child policy tags: "LatLong", "City", and "ZipCode".
Fields:
childPolicyTags: Output only. Resource names of child policy tags of this
policy tag.
description: Description of this policy tag. It must: contain only unicode
characters, tabs, newlines, carriage returns and page breaks; and be at
most 2000 bytes long when encoded in UTF-8. If not set, defaults to an
empty description. If not set, defaults to an empty description.
displayName: Required. User defined name of this policy tag. It must: be
unique within the parent taxonomy; contain only unicode letters,
numbers, underscores, dashes and spaces; not start or end with spaces;
and be at most 200 bytes long when encoded in UTF-8.
name: Output only. Resource name of this policy tag, whose format is: "pro
jects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/
policyTags/{id}".
parentPolicyTag: Resource name of this policy tag's parent policy tag
(e.g. for the "LatLong" policy tag in the example above, this field
contains the resource name of the "Geolocation" policy tag). If empty,
it means this policy tag is a top level policy tag (e.g. this field is
empty for the "Geolocation" policy tag in the example above). If not
set, defaults to an empty string.
"""
childPolicyTags = _messages.StringField(1, repeated=True)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
name = _messages.StringField(4)
parentPolicyTag = _messages.StringField(5)
class GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest(_messages.Message):
r"""Request message for RenameTagTemplateField.
Fields:
newTagTemplateFieldId: Required. The new ID of this tag template field.
For example, `my_new_field`.
"""
newTagTemplateFieldId = _messages.StringField(1)
class GoogleCloudDatacatalogV1beta1Schema(_messages.Message):
r"""Represents a schema (e.g. BigQuery, GoogleSQL, Avro schema).
Fields:
columns: Required. Schema of columns. A maximum of 10,000 columns and sub-
columns can be specified.
"""
columns = _messages.MessageField('GoogleCloudDatacatalogV1beta1ColumnSchema', 1, repeated=True)
class GoogleCloudDatacatalogV1beta1SearchCatalogRequest(_messages.Message):
r"""Request message for SearchCatalog.
Fields:
orderBy: Specifies the ordering of results, currently supported case-
sensitive choices are: * `relevance`, only supports desecending *
`last_access_timestamp [asc|desc]`, defaults to descending if not
specified * `last_modified_timestamp [asc|desc]`, defaults to
descending if not specified If not specified, defaults to
`relevance` descending.
pageSize: Number of results in the search page. If <=0 then defaults to
10. Max limit for page_size is 1000. Throws an invalid argument for
page_size > 1000.
pageToken: Optional. Pagination token returned in an earlier
SearchCatalogResponse.next_page_token, which indicates that this is a
continuation of a prior SearchCatalogRequest call, and that the system
should return the next page of data. If empty, the first page is
returned.
query: Required. The query string in search query syntax. The query must
be non-empty. Query strings can be simple as "x" or more qualified as:
* name:x * column:x * description:y Note: Query tokens need to have a
minimum of 3 characters for substring matching to work correctly. See
[Data Catalog Search Syntax](/data-catalog/docs/how-to/search-reference)
for more information.
scope: Required. The scope of this search request.
"""
orderBy = _messages.StringField(1)
pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(3)
query = _messages.StringField(4)
scope = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope', 5)
class GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope(_messages.Message):
r"""A GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope object.
Fields:
includeGcpPublicDatasets: If `true`, include Google Cloud Platform (GCP)
public datasets in the search results. Info on GCP public datasets is
available at https://cloud.google.com/public-datasets/. By default, GCP
public datasets are excluded.
includeOrgIds: Data Catalog tries to automatically choose the right corpus
of data to search through. You can ensure an organization is included by
adding it to `include_org_ids`. You can ensure a project's org is
included with `include_project_ids`. You must specify at least one
organization using `include_org_ids` or `include_project_ids` in all
search requests. List of organization IDs to search within. To find
your organization ID, follow instructions in https://cloud.google.com
/resource-manager/docs/creating-managing-organization.
includeProjectIds: List of project IDs to search within. To learn more
about the distinction between project names/IDs/numbers, go to
https://cloud.google.com/docs/overview/#projects.
"""
includeGcpPublicDatasets = _messages.BooleanField(1)
includeOrgIds = _messages.StringField(2, repeated=True)
includeProjectIds = _messages.StringField(3, repeated=True)
class GoogleCloudDatacatalogV1beta1SearchCatalogResponse(_messages.Message):
r"""Response message for SearchCatalog.
Fields:
nextPageToken: The token that can be used to retrieve the next page of
results.
results: Search results.
"""
nextPageToken = _messages.StringField(1)
results = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogResult', 2, repeated=True)
class GoogleCloudDatacatalogV1beta1SearchCatalogResult(_messages.Message):
r"""A result that appears in the response of a search request. Each result
captures details of one entry that matches the search.
Enums:
SearchResultTypeValueValuesEnum: Type of the search result. This field can
be used to determine which Get method to call to fetch the full
resource.
Fields:
linkedResource: The full name of the cloud resource the entry belongs to.
See:
https://cloud.google.com/apis/design/resource_names#full_resource_name.
Example: * `//bigquery.googleapis.com/projects/projectId/datasets/data
setId/tables/tableId`
relativeResourceName: The relative resource name of the resource in URL
format. Examples: * `projects/{project_id}/locations/{location_id}/ent
ryGroups/{entry_group_id}/entries/{entry_id}` *
`projects/{project_id}/tagTemplates/{tag_template_id}`
searchResultSubtype: Sub-type of the search result. This is a dot-
delimited description of the resource's full type, and is the same as
the value callers would provide in the "type" search facet. Examples:
`entry.table`, `entry.dataStream`, `tagTemplate`.
searchResultType: Type of the search result. This field can be used to
determine which Get method to call to fetch the full resource.
"""
class SearchResultTypeValueValuesEnum(_messages.Enum):
r"""Type of the search result. This field can be used to determine which
Get method to call to fetch the full resource.
Values:
SEARCH_RESULT_TYPE_UNSPECIFIED: Default unknown type.
ENTRY: An Entry.
TAG_TEMPLATE: A TagTemplate.
ENTRY_GROUP: An EntryGroup.
"""
SEARCH_RESULT_TYPE_UNSPECIFIED = 0
ENTRY = 1
TAG_TEMPLATE = 2
ENTRY_GROUP = 3
linkedResource = _messages.StringField(1)
relativeResourceName = _messages.StringField(2)
searchResultSubtype = _messages.StringField(3)
searchResultType = _messages.EnumField('SearchResultTypeValueValuesEnum', 4)
class GoogleCloudDatacatalogV1beta1SerializedPolicyTag(_messages.Message):
r"""Message representing one policy tag when exported as a nested proto.
Fields:
childPolicyTags: Children of the policy tag if any.
description: Description of the serialized policy tag. The length of the
description is limited to 2000 bytes when encoded in UTF-8. If not set,
defaults to an empty description.
displayName: Required. Display name of the policy tag. Max 200 bytes when
encoded in UTF-8.
"""
childPolicyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 1, repeated=True)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
class GoogleCloudDatacatalogV1beta1SerializedTaxonomy(_messages.Message):
r"""Message capturing a taxonomy and its policy tag hierarchy as a nested
proto. Used for taxonomy import/export and mutation.
Fields:
description: Description of the serialized taxonomy. The length of the
description is limited to 2000 bytes when encoded in UTF-8. If not set,
defaults to an empty description.
displayName: Required. Display name of the taxonomy. Max 200 bytes when
encoded in UTF-8.
policyTags: Top level policy tags associated with the taxonomy if any.
"""
description = _messages.StringField(1)
displayName = _messages.StringField(2)
policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 3, repeated=True)
class GoogleCloudDatacatalogV1beta1SystemTimestamps(_messages.Message):
r"""Timestamps about this resource according to a particular system.
Fields:
createTime: The creation time of the resource within the given system.
expireTime: Output only. The expiration time of the resource within the
given system. Currently only apllicable to BigQuery resources.
updateTime: The last-modified time of the resource within the given
system.
"""
createTime = _messages.StringField(1)
expireTime = _messages.StringField(2)
updateTime = _messages.StringField(3)
class GoogleCloudDatacatalogV1beta1TableSpec(_messages.Message):
r"""Normal BigQuery table spec.
Fields:
groupedEntry: Output only. If the table is a dated shard, i.e., with name
pattern `[prefix]YYYYMMDD`, `grouped_entry` is the Data Catalog resource
name of the date sharded grouped entry, for example, `projects/{project_
id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id}
`. Otherwise, `grouped_entry` is empty.
"""
groupedEntry = _messages.StringField(1)
class GoogleCloudDatacatalogV1beta1Tag(_messages.Message):
r"""Tags are used to attach custom metadata to Data Catalog resources. Tags
conform to the specifications within their tag template. See [Data Catalog
IAM](/data-catalog/docs/concepts/iam) for information on the permissions
needed to create or view tags.
Messages:
FieldsValue: Required. This maps the ID of a tag field to the value of and
additional information about that field. Valid field IDs are defined by
the tag's template. A tag must have at least 1 field and at most 500
fields.
Fields:
column: Resources like Entry can have schemas associated with them. This
scope allows users to attach tags to an individual column based on that
schema. For attaching a tag to a nested column, use `.` to separate the
column names. Example: * `outer_column.inner_column`
fields: Required. This maps the ID of a tag field to the value of and
additional information about that field. Valid field IDs are defined by
the tag's template. A tag must have at least 1 field and at most 500
fields.
name: The resource name of the tag in URL format. Example: * projects/{pr
oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent
ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier.
Note that this Tag may not actually be stored in the location in this
name.
template: Required. The resource name of the tag template that this tag
uses. Example: * projects/{project_id}/locations/{location}/tagTemplate
s/{tag_template_id} This field cannot be modified after creation.
templateDisplayName: Output only. The display name of the tag template.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class FieldsValue(_messages.Message):
r"""Required. This maps the ID of a tag field to the value of and
additional information about that field. Valid field IDs are defined by
the tag's template. A tag must have at least 1 field and at most 500
fields.
Messages:
AdditionalProperty: An additional property for a FieldsValue object.
Fields:
additionalProperties: Additional properties of type FieldsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a FieldsValue object.
Fields:
key: Name of the additional property.
value: A GoogleCloudDatacatalogV1beta1TagField attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagField', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
column = _messages.StringField(1)
fields = _messages.MessageField('FieldsValue', 2)
name = _messages.StringField(3)
template = _messages.StringField(4)
templateDisplayName = _messages.StringField(5)
class GoogleCloudDatacatalogV1beta1TagField(_messages.Message):
r"""Contains the value and supporting information for a field within a Tag.
Fields:
boolValue: Holds the value for a tag field with boolean type.
displayName: Output only. The display name of this field.
doubleValue: Holds the value for a tag field with double type.
enumValue: Holds the value for a tag field with enum type. This value must
be one of the allowed values in the definition of this enum.
stringValue: Holds the value for a tag field with string type.
timestampValue: Holds the value for a tag field with timestamp type.
"""
boolValue = _messages.BooleanField(1)
displayName = _messages.StringField(2)
doubleValue = _messages.FloatField(3)
enumValue = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagFieldEnumValue', 4)
stringValue = _messages.StringField(5)
timestampValue = _messages.StringField(6)
class GoogleCloudDatacatalogV1beta1TagFieldEnumValue(_messages.Message):
r"""Holds an enum value.
Fields:
displayName: The display name of the enum value.
"""
displayName = _messages.StringField(1)
class GoogleCloudDatacatalogV1beta1TagTemplate(_messages.Message):
r"""A tag template defines a tag, which can have one or more typed fields.
The template is used to create and attach the tag to GCP resources. [Tag
template roles](/iam/docs/understanding-roles#data-catalog-roles) provide
permissions to create, edit, and use the template (see, for example, the
[TagTemplate User](/data-catalog/docs/how-to/template-user) role, which
includes permission to use the tag template to tag resources.
Messages:
FieldsValue: Required. Map of tag template field IDs to the settings for
the field. This map is an exhaustive list of the allowed fields. This
map must contain at least one field and at most 500 fields. The keys to
this map are tag template field IDs. Field IDs can contain letters (both
uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs
must be at least 1 character long and at most 64 characters long. Field
IDs must start with a letter or underscore.
Fields:
displayName: The display name for this template. Defaults to an empty
string.
fields: Required. Map of tag template field IDs to the settings for the
field. This map is an exhaustive list of the allowed fields. This map
must contain at least one field and at most 500 fields. The keys to
this map are tag template field IDs. Field IDs can contain letters (both
uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs
must be at least 1 character long and at most 64 characters long. Field
IDs must start with a letter or underscore.
name: The resource name of the tag template in URL format. Example: *
projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id
} Note that this TagTemplate and its child resources may not actually
be stored in the location in this name.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class FieldsValue(_messages.Message):
r"""Required. Map of tag template field IDs to the settings for the field.
This map is an exhaustive list of the allowed fields. This map must
contain at least one field and at most 500 fields. The keys to this map
are tag template field IDs. Field IDs can contain letters (both uppercase
and lowercase), numbers (0-9) and underscores (_). Field IDs must be at
least 1 character long and at most 64 characters long. Field IDs must
start with a letter or underscore.
Messages:
AdditionalProperty: An additional property for a FieldsValue object.
Fields:
additionalProperties: Additional properties of type FieldsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a FieldsValue object.
Fields:
key: Name of the additional property.
value: A GoogleCloudDatacatalogV1beta1TagTemplateField attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
fields = _messages.MessageField('FieldsValue', 2)
name = _messages.StringField(3)
class GoogleCloudDatacatalogV1beta1TagTemplateField(_messages.Message):
r"""The template for an individual field within a tag template.
Fields:
displayName: The display name for this field. Defaults to an empty string.
isRequired: Whether this is a required field. Defaults to false.
name: Output only. The resource name of the tag template field in URL
format. Example: * projects/{project_id}/locations/{location}/tagTempla
tes/{tag_template}/fields/{field} Note that this TagTemplateField may
not actually be stored in the location in this name.
type: Required. The type of value this tag field can contain.
"""
displayName = _messages.StringField(1)
isRequired = _messages.BooleanField(2)
name = _messages.StringField(3)
type = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldType', 4)
class GoogleCloudDatacatalogV1beta1Taxonomy(_messages.Message):
r"""A taxonomy is a collection of policy tags that classify data along a
common axis. For instance a data *sensitivity* taxonomy could contain policy
tags denoting PII such as age, zipcode, and SSN. A data *origin* taxonomy
could contain policy tags to distinguish user data, employee data, partner
data, public data.
Enums:
ActivatedPolicyTypesValueListEntryValuesEnum:
Fields:
activatedPolicyTypes: Optional. A list of policy types that are activated
for this taxonomy. If not set, defaults to an empty list.
description: Optional. Description of this taxonomy. It must: contain only
unicode characters, tabs, newlines, carriage returns and page breaks;
and be at most 2000 bytes long when encoded in UTF-8. If not set,
defaults to an empty description.
displayName: Required. User defined name of this taxonomy. It must:
contain only unicode letters, numbers, underscores, dashes and spaces;
not start or end with spaces; and be at most 200 bytes long when encoded
in UTF-8.
name: Output only. Resource name of this taxonomy, whose format is:
"projects/{project_number}/locations/{location_id}/taxonomies/{id}".
"""
class ActivatedPolicyTypesValueListEntryValuesEnum(_messages.Enum):
r"""ActivatedPolicyTypesValueListEntryValuesEnum enum type.
Values:
POLICY_TYPE_UNSPECIFIED: <no description>
FINE_GRAINED_ACCESS_CONTROL: <no description>
"""
POLICY_TYPE_UNSPECIFIED = 0
FINE_GRAINED_ACCESS_CONTROL = 1
activatedPolicyTypes = _messages.EnumField('ActivatedPolicyTypesValueListEntryValuesEnum', 1, repeated=True)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
name = _messages.StringField(4)
class GoogleCloudDatacatalogV1beta1ViewSpec(_messages.Message):
r"""Table view specification.
Fields:
viewQuery: Output only. The query that defines the table view.
"""
viewQuery = _messages.StringField(1)
class Policy(_messages.Message):
r"""An Identity and Access Management (IAM) policy, which specifies access
controls for Google Cloud resources. A `Policy` is a collection of
`bindings`. A `binding` binds one or more `members` to a single `role`.
Members can be user accounts, service accounts, Google groups, and domains
(such as G Suite). A `role` is a named list of permissions; each `role` can
be an IAM predefined role or a user-created custom role. Optionally, a
`binding` can specify a `condition`, which is a logical expression that
allows access to a resource only if the expression evaluates to `true`. A
condition can add constraints based on attributes of the request, the
resource, or both. **JSON example:** { "bindings": [ {
"role": "roles/resourcemanager.organizationAdmin", "members": [
"user:<EMAIL>", "group:<EMAIL>",
"domain:google.com", "serviceAccount:<EMAIL>-
<EMAIL>" ] }, {
"role": "roles/resourcemanager.organizationViewer", "members":
["user:<EMAIL>"], "condition": { "title":
"expirable access", "description": "Does not grant access after
Sep 2020", "expression": "request.time <
timestamp('2020-10-01T00:00:00.000Z')", } } ],
"etag": "BwWWja0YfJA=", "version": 3 } **YAML example:**
bindings: - members: - user:<EMAIL> -
group:<EMAIL> - domain:google.com - serviceAccount
:<EMAIL> role:
roles/resourcemanager.organizationAdmin - members: -
user:<EMAIL> role: roles/resourcemanager.organizationViewer
condition: title: expirable access description: Does not
grant access after Sep 2020 expression: request.time <
timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= -
version: 3 For a description of IAM and its features, see the [IAM
documentation](https://cloud.google.com/iam/docs/).
Fields:
bindings: Associates a list of `members` to a `role`. Optionally, may
specify a `condition` that determines how and when the `bindings` are
applied. Each of the `bindings` must contain at least one member.
etag: `etag` is used for optimistic concurrency control as a way to help
prevent simultaneous updates of a policy from overwriting each other. It
is strongly suggested that systems make use of the `etag` in the read-
modify-write cycle to perform policy updates in order to avoid race
conditions: An `etag` is returned in the response to `getIamPolicy`, and
systems are expected to put that etag in the request to `setIamPolicy`
to ensure that their change will be applied to the same version of the
policy. **Important:** If you use IAM Conditions, you must include the
`etag` field whenever you call `setIamPolicy`. If you omit this field,
then IAM allows you to overwrite a version `3` policy with a version `1`
policy, and all of the conditions in the version `3` policy are lost.
version: Specifies the format of the policy. Valid values are `0`, `1`,
and `3`. Requests that specify an invalid value are rejected. Any
operation that affects conditional role bindings must specify version
`3`. This requirement applies to the following operations: * Getting a
policy that includes a conditional role binding * Adding a conditional
role binding to a policy * Changing a conditional role binding in a
policy * Removing any role binding, with or without a condition, from a
policy that includes conditions **Important:** If you use IAM
Conditions, you must include the `etag` field whenever you call
`setIamPolicy`. If you omit this field, then IAM allows you to overwrite
a version `3` policy with a version `1` policy, and all of the
conditions in the version `3` policy are lost. If a policy does not
include any conditions, operations on that policy may specify any valid
version or leave the field unset.
"""
bindings = _messages.MessageField('Binding', 1, repeated=True)
etag = _messages.BytesField(2)
version = _messages.IntegerField(3, variant=_messages.Variant.INT32)
class SetIamPolicyRequest(_messages.Message):
r"""Request message for `SetIamPolicy` method.
Fields:
policy: REQUIRED: The complete policy to be applied to the `resource`. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
"""
policy = _messages.MessageField('Policy', 1)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class TestIamPermissionsRequest(_messages.Message):
r"""Request message for `TestIamPermissions` method.
Fields:
permissions: The set of permissions to check for the `resource`.
Permissions with wildcards (such as '*' or 'storage.*') are not allowed.
For more information see [IAM
Overview](https://cloud.google.com/iam/docs/overview#permissions).
"""
permissions = _messages.StringField(1, repeated=True)
class TestIamPermissionsResponse(_messages.Message):
r"""Response message for `TestIamPermissions` method.
Fields:
permissions: A subset of `TestPermissionsRequest.permissions` that the
caller is allowed.
"""
permissions = _messages.StringField(1, repeated=True)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 1.742188 | 2 |
materials/migrations/0072_auto_20190422_1708.py | mgovoni-devel/MatD3 | 7 | 4873 | <reponame>mgovoni-devel/MatD3
# Generated by Django 2.1.7 on 2019-04-22 21:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('materials', '0071_auto_20190416_1557'),
]
operations = [
migrations.RemoveField(
model_name='atomicpositions',
name='idinfo_ptr',
),
migrations.RemoveField(
model_name='atomicpositions',
name='synthesis_method',
),
migrations.RemoveField(
model_name='atomicpositions',
name='system',
),
migrations.DeleteModel(
name='AtomicPositions',
),
]
| 1.554688 | 2 |
Deep-Learning/Crowd-Count/src/data_preprocess.py | sadbb/CVCode | 1 | 4874 | # -*- coding:utf-8 -*-
# ------------------------
# written by <NAME>
# 2018-10
# ------------------------
import os
import skimage.io
from skimage.color import rgb2gray
import skimage.transform
from scipy.io import loadmat
import numpy as np
import cv2
import math
import warnings
import random
import torch
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
def gaussian_kernel(image, points):
image_density = np.zeros(image.shape)
h, w = image_density.shape
if len(points) == 0:
return image_density
for j in range(len(points)):
f_sz = 15
sigma = 4.0
# convert x, y to int
x = min(w, max(0, int(points[j, 0])))
y = min(h, max(0, int(points[j, 1])))
gap = f_sz // 2
x1 = x - gap if x - gap > 0 else 0
x2 = x + gap if x + gap < w else w - 1
y1 = y - gap if y - gap > 0 else 0
y2 = y + gap if y + gap < h else h - 1
# generate 2d gaussian kernel
kx = cv2.getGaussianKernel(y2 - y1 + 1, sigma=sigma)
ky = cv2.getGaussianKernel(x2 - x1 + 1, sigma=sigma)
gaussian = np.multiply(kx, ky.T)
image_density[y1:y2 + 1, x1:x2 + 1] += gaussian
return image_density
def extract_data(mode="train", patch_number=9, part="A"):
num_images = 300 if mode=="train" else 182
# original path
dataset_path = "../data/original/part_{0}_final/".format(part)
mode_data = os.path.join(dataset_path, "{0}_data".format(mode))
mode_images = os.path.join(mode_data, "images")
mode_ground_truth = os.path.join(mode_data, "ground_truth")
# preprocessed path
preprocessed_mode = "../data/preprocessed/{0}/".format(mode)
preprocessed_mode_density = "../data/preprocessed/{0}_density/".format(mode)
if not os.path.exists("../data/preprocessed/"):
os.mkdir("../data/preprocessed/")
if not os.path.exists(preprocessed_mode):
os.mkdir(preprocessed_mode)
if not os.path.exists(preprocessed_mode_density):
os.mkdir(preprocessed_mode_density)
# convert images to gray-density for each
for index in range(1, num_images + 1):
if index % 10 == 9:
print("{0} images have been processed".format(index + 1))
image_path = os.path.join(mode_images, "IMG_{0}.jpg".format(index))
ground_truth_path = os.path.join(mode_ground_truth, "GT_IMG_{0}.mat".format(index))
image = skimage.io.imread(image_path)
# convert to gray map
if image.shape[-1] == 3:
image = rgb2gray(image)
mat = loadmat(ground_truth_path)
image_info = mat["image_info"]
ann_points = image_info[0][0][0][0][0]
# gaussian transfer
image_density = gaussian_kernel(image, ann_points)
# split image into 9 patches where patch is 1/4 size
h, w = image.shape
w_block = math.floor(w / 8)
h_block = math.floor(h / 8)
for j in range(patch_number):
x = math.floor((w - 2 * w_block) * random.random() + w_block)
y = math.floor((h - 2 * h_block) * random.random() + h_block)
image_sample = image[y - h_block:y + h_block, x - w_block:x + w_block]
image_density_sample = image_density[y - h_block:y + h_block, x - w_block:x + w_block]
img_idx = "{0}_{1}".format(index, j)
np.save(os.path.join(preprocessed_mode_density, "{0}.npy".format(img_idx)), image_density_sample)
skimage.io.imsave(os.path.join(preprocessed_mode, "{0}.jpg".format(img_idx)), image_sample)
def extract_test_data(part="A"):
num_images = 183 if part == "A" else 317
test_data_path = "../data/original/part_{part}_final/test_data/images".format(part=part)
test_ground_path = "../data/original/part_{part}_final/test_data/ground_truth".format(part=part)
test_density_path = "../data/preprocessed/test_density"
print("create directory........")
if not os.path.exists(test_density_path):
os.mkdir(test_density_path)
print("begin to preprocess test data........")
for index in range(1, num_images):
if index % 10 == 0:
print("{num} images are done".format(num=index))
image_path = os.path.join(test_data_path, "IMG_{0}.jpg".format(index))
ground_truth_path = os.path.join(test_ground_path, "GT_IMG_{0}.mat".format(index))
# load mat and image
image = skimage.io.imread(image_path)
if image.shape[-1] == 3:
image = rgb2gray(image)
mat = loadmat(ground_truth_path)
image_info = mat["image_info"]
# ann_points: points pixels mean people
# number: number of people in the image
ann_points = image_info[0][0][0][0][0]
number = image_info[0][0][0][0][1]
h = float(image.shape[0])
w = float(image.shape[1])
# convert images to density
image_density = gaussian_kernel(image, ann_points)
np.save(os.path.join(test_density_path, "IMG_{0}.npy".format(index)), image_density)
extract_test_data()
| 2.390625 | 2 |
for1.py | satyamraj123/set-of-python-programs | 0 | 4875 | fruit='banana'
x=len(fruit)
print(x) | 2.734375 | 3 |
Django_Intershala/recruiter/migrations/0004_auto_20210305_1551.py | samir321-pixel/Django_Intershala | 7 | 4876 | <reponame>samir321-pixel/Django_Intershala
# Generated by Django 3.1.7 on 2021-03-05 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recruiter', '0003_auto_20210304_2132'),
]
operations = [
migrations.RemoveField(
model_name='recruiter',
name='middle_Name',
),
migrations.AlterField(
model_name='recruiter',
name='first_Name',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='recruiter',
name='last_Name',
field=models.CharField(max_length=200, null=True),
),
]
| 1.601563 | 2 |
src/promnesia/sources/telegram.py | halhenke/promnesia | 1,327 | 4877 | <reponame>halhenke/promnesia<filename>src/promnesia/sources/telegram.py
'''
Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data
'''
from pathlib import Path
from textwrap import dedent
from typing import Optional, Union, TypeVar
from urllib.parse import unquote # TODO mm, make it easier to rememember to use...
from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain
# TODO potentially, belongs to my. package
# TODO kython?
T = TypeVar("T")
def unwrap(res: Union[T, Exception]) -> T:
if isinstance(res, Exception):
raise res
else:
return res
# TODO move to common?
def dataset_readonly(db: Path):
import dataset # type: ignore
# see https://github.com/pudo/dataset/issues/136#issuecomment-128693122
import sqlite3
creator = lambda: sqlite3.connect(f'file:{db}?immutable=1', uri=True)
return dataset.connect('sqlite:///' , engine_kwargs={'creator': creator})
def index(database: PathIsh, *, http_only: bool=None) -> Results:
"""
:param database:
the path of the sqlite generated by the _telegram_backup_ java program
:param http_only:
when true, do not collect IP-addresses and `python.py` strings
"""
logger = get_logger()
path = Path(database)
assert path.is_file(), path # TODO could check is_file inside `dataset_readonly()`
def make_query(text_query: str):
extra_criteria = "AND (M.has_media == 1 OR text LIKE '%http%')" if http_only else ""
return dedent(
f"""
WITH entities AS (
SELECT 'dialog' as type
, id
, coalesce(username, id) as handle
, coalesce(first_name || " " || last_name
, username
, id
) as display_name FROM users
UNION
SELECT 'group' as type
, id
, id as handle
, coalesce(name, id) as display_name FROM chats
)
SELECT src.display_name AS chatname
, src.handle AS chat
, snd.display_name AS sender
, M.time AS time
, {text_query} AS text
, M.id AS mid
FROM messages AS M
/* chat types are 'dialog' (1-1), 'group' and 'supergroup' */
/* this is abit hacky way to handle all groups in one go */
LEFT JOIN entities AS src ON M.source_id = src.id AND src.type = (CASE M.source_type WHEN 'supergroup' THEN 'group' ELSE M.source_type END)
LEFT JOIN entities AS snd ON M.sender_id = snd.id AND snd.type = 'dialog'
WHERE
M.message_type NOT IN ('service_message', 'empty_message')
{extra_criteria}
ORDER BY time;
""")
# TODO context manager?
with dataset_readonly(path) as db:
# TODO yield error if chatname or chat or smth else is null?
for row in db.query(make_query('M.text')):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
# , None, sys.exc_info()[2]
# TODO hmm. traceback isn't preserved; wonder if that's because it's too heavy to attach to every single exception object..
# old (also 'stable') version doesn't have 'json' column yet...
if 'json' in db['messages'].columns:
for row in db.query(make_query("json_extract(json, '$.media.webpage.description')")):
try:
yield from _handle_row(row)
except Exception as ex:
yield echain(RuntimeError(f'While handling {row}'), ex)
def _handle_row(row) -> Results:
text = row['text']
if text is None:
return
urls = extract_urls(text)
if len(urls) == 0:
return
dt = from_epoch(row['time'])
mid: str = unwrap(row['mid'])
# TODO perhaps we could be defensive with null sender/chat etc and still emit the Visit
sender: str = unwrap(row['sender'])
chatname: str = unwrap(row['chatname'])
chat: str = unwrap(row['chat'])
in_context = f'https://t.me/{chat}/{mid}'
for u in urls:
# https://www.reddit.com/r/Telegram/comments/6ufwi3/link_to_a_specific_message_in_a_channel_possible/
# hmm, only seems to work on mobile app, but better than nothing...
yield Visit(
url=unquote(u),
dt=dt,
context=f"{sender}: {text}",
locator=Loc.make(
title=f"chat with {chatname}",
href=in_context,
),
)
| 2.171875 | 2 |
shellmacros/istr.py | duaneellissd/shellmacros | 0 | 4878 | '''
Created on Dec 27, 2019
@author: duane
'''
DOLLAR = ord('$')
LBRACE = ord('{')
RBRACE = ord('}')
LPAREN = ord('(')
RPAREN = ord(')')
class IStrFindResult(object):
OK = 0
NOTFOUND = 1
SYNTAX = 2
def __init__(self):
self.result = IStrFindResult.SYNTAX
self.lhs = 0
self.rhs = 0
self.name = None
class IStr(list):
'''
This closely models a basic ASCII string
Note: Unicode strings are expressly not supported here.
The problem this addresses occurs during macro processing.
Sometimes macros are defined externally
Other times, macros are fully defined with a package.
Often macros need to be resolved either partially or fully
When a macro is only external - they get in the way of resolving other macros
To work around that, we convert the string into an array of integers
Then for every macro byte that is 'external' we add 0x100
This makes the byte 'non-matchable'
Later, when we convert the resolved string into we strip the 0x100.
'''
IGNORE = 0x100
def __init__(self, s):
'''
Constructor
'''
# convert to integers
list.__init__(self, map(ord, s))
def __str__(self):
# return as string, stripping flags
return ''.join(map(lambda v: chr(v & 0xff), self))
def sslice(self, lhs, rhs):
# return as string, stripping flags
return ''.join(map(lambda v: chr(v & 0xff), self[lhs:rhs]))
def iarray(self):
return self[:]
def mark(self, lhs, rhs, flagvalue=IGNORE):
'''
Apply flags to locations between left and right hand sides, ie: [lhs:rhs]
'''
for idx in range(lhs, rhs):
self[idx] |= flagvalue
def locate(self, needle, lhs, rhs):
'''Find this needle(char) in the hay stack(list).'''
try:
return self.index(needle, lhs, rhs)
except:
# not found
return -1
def replace(self, lhs, rhs, newcontent):
'''replace the data between [lhs:rhs] with newcontent'''
self[lhs: rhs] = map(ord, newcontent)
def next_macro(self, lhs, rhs):
'''
Find a macro within the string, return (lhs,rhs) if found
If not found, return (-1,-1)
If syntax error, return (-2,-2)
'''
result = IStrFindResult()
result.lhs = lhs
result.rhs = rhs
# if it is not long enough...
if (rhs - lhs) < 4:
result.code = result.NOTFOUND
return result
# We search for the CLOSING
# Consider nested: ${ ${foo}_${bar} }
# The first thing we must do is "foo"
# So find the close
tmp = self.locate(RBRACE, result.lhs,result.rhs)
if tmp >= 0:
_open_symbol = LBRACE
else:
tmp = self.locate(RPAREN,result.lhs,result.rhs)
_open_symbol = RPAREN
if tmp < 0:
# not found
result.code = result.NOTFOUND
return result
# We want to end at RHS where the closing symbol is
result.rhs = tmp
while result.lhs < result.rhs:
# find DOLLAR
dollar_loc = self.locate(DOLLAR, result.lhs, result.rhs)
if dollar_loc < 0:
# above, we know we have a CLOSE
# We could call this a SYNTAX error
# but ... we won't we'll leave this as NOT FOUND
result.code = result.NOTFOUND
return result
# we have: DOLLAR + CLOSE
# Can we find DOLLAR + OPEN?
ch = self[dollar_loc+1]
if ch != _open_symbol:
# Nope... try again after dollar
result.lhs = dollar_loc+1
continue
result.lhs = dollar_loc
# Do we have a nested macro, ie: ${${x}}
tmp = self.locate(DOLLAR, dollar_loc + 1, result.rhs)
if tmp >= 0:
# we do have a nested macro
result.lhs = tmp
continue
# nope, we are good
# Everything between LHS and RHS should be a macro
result.code = result.OK
result.name = self.sslice(result.lhs + 2, result.rhs)
# the RHS should include the closing symbol
result.rhs += 1
return result
# not found syntax stray dollar or brace
result.code = result.SYNTAX
return result
def test_istr():
def check2(l, r, text, dut):
print("----")
print("Check (%d,%d)" % (l, r))
print("s = %s" % str(dut))
print("i = %s" % dut.iarray())
result = dut.next_macro(0, len(dut))
if (result.lhs != l) or (result.rhs != r):
print("str = %s" % str(dut))
print("int = %s" % dut.iarray())
print("Error: (%d,%d) != (%d,%d)" % (l, r, result.lhs, result.rhs))
assert (False)
if text is not None:
assert( result.name == text )
dut.mark(l, r)
return dut
def check(l, r, s):
if l >= 0:
expected = s[l + 2:r - 1]
else:
expected = None
dut = IStr(s)
check2(l, r, expected, dut)
st = str(dut)
assert (st == s)
return dut
check(-1, -1, "")
check(-1, -1, "a")
check(-1, -1, "ab")
check(-1, -1, "abc")
check(-1, -1, "abcd")
check(-1, -1, "abcde")
check(-1, -1, "abcdef")
check(0, 4, "${a}")
check(0, 5, "${ab}")
check(0, 6, "${abc}")
check(0, 7, "${abcd}")
check(1, 5, "a${a}")
check(2, 6, "ab${a}")
check(3, 7, "abc${a}")
check(4, 8, "abcd${a}")
check(5, 9, "abcde${a}")
check(0, 4, "${a}a")
check(0, 4, "${a}ab")
check(0, 4, "${a}abc")
check(0, 4, "${a}abcd")
check(0, 4, "${a}abcde")
dut = check(4, 8, "abcd${a}xyz")
dut.replace(4, 8, "X")
check2(-1, -1, None, dut)
r = str(dut)
print("Got: %s" % r)
assert ("abcdXxyz" == str(dut))
# now nested tests
dut = check(5, 9, "abc${${Y}}xyz")
dut.replace(5, 9, "X")
r = str(dut)
assert (r == "abc${X}xyz")
dut = check2(3, 7, "${X}", dut)
dut.replace(3, 7, "ABC")
s = str(dut)
r = "abcABCxyz"
assert (s == r)
print("Success")
if __name__ == '__main__':
test_istr()
| 2.84375 | 3 |
cli.py | checktheroads/deenis | 4 | 4879 | <filename>cli.py
#!/usr/bin/env python3
"""
CLI for Accessing Deenis
"""
# Standard Imports
import sys
from pathlib import Path
# Module Imports
import click
# Path Fixes
working_dir = Path(__file__).resolve().parent
sys.path.append(str(working_dir))
# Project Imports
from deenis import Deenis
@click.group(
help=(
"Deenis can be used to group and automate boring DNS tasks. For example, "
"`host` can take a hostname, IPv4 Address, and IPv6 Address, and create "
"forward A & AAAA, and reverse PTR records (4 actions) with a single command."
)
)
def add_records():
"""Click Command Group Definition"""
# pylint: disable=unnecessary-pass
# Dear Pylint: This is how Click likes to do things. Get over it bruh.
pass
@add_records.command("host", help="Add a Host Record")
@click.option("-c", "--config-file", "config_file", help="Path to YAML Config File")
@click.option("-4", "--ipv4-address", "ipv4", default=None, help="IPv4 Address")
@click.option("-6", "--ipv6-address", "ipv6", default=None, help="IPv6 Address")
@click.option("-f", "--fqdn", "fqdn", required=True, help="FQDN")
def host(**click_input):
"""Add host records from CLI"""
if not click_input["config_file"]:
config_path = Path.cwd().joinpath("deenis.yaml")
if not config_path.exists():
raise click.UsageError(
click.style(
(
f"Config file not specified and not found at {config_path}. "
"Please specify a config file path."
),
fg="red",
bold=True,
)
)
elif click_input["config_file"]:
config_path = Path().resolve(click_input["config_file"])
if not click_input["ipv4"] and not click_input["ipv6"]:
raise click.UsageError(
click.style("At least one IP Address is required", fg="red", bold=True)
)
try:
responses = Deenis(str(config_path)).AddHost(
{
"hostname": click_input["fqdn"],
"ipv4": click_input["ipv4"],
"ipv6": click_input["ipv6"],
}
)
if responses:
for res in responses:
status, record_record, record, target, errors = res
if status == "Success":
click.echo(
"Added "
+ click.style(record_record, fg="green", bold=True)
+ " Record for "
+ click.style(record, fg="yellow", bold=True)
+ " Pointing to "
+ click.style(target, fg="blue", bold=True)
)
elif status == "Failure":
click.echo(
"Error Adding "
+ click.style(record_record, fg="magenta", bold=True)
+ " Record for "
+ click.style(record, fg="cyan", bold=True)
+ " Pointing to "
+ click.style(target, fg="red", bold=True)
+ f"\nErrors:\n"
)
for err in errors:
click.secho(err, fg="red")
if not responses:
click.secho("\nNo records were added", fg="magenta", bold=True)
except (RuntimeError, AttributeError) as error_exception:
raise click.UsageError(click.style(str(error_exception), fg="red", bold=True))
@add_records.command("tenant", help="Bulk Add PTR Records for a Tenant/Customer")
@click.option("-c", "--config-file", "config_file", help="Path to YAML Config File")
@click.option(
"-i", "--crm-id", "crm_id", default=None, help="Unique Tenant Indentifier"
)
@click.option(
"-4", "--ipv4-prefix", "prefix4", default=None, help="IPv4 Prefix Assignment"
)
@click.option(
"-6", "--ipv6-prefix", "prefix6", default=None, help="IPv6 Prefix Assignment"
)
@click.option(
"-f4", "--ipv4-fqdn", "host4", default=None, help="FQDN for IPv4 PTR Target"
)
@click.option(
"-f6", "--ipv6-fqdn", "host6", default=None, help="FQDN for IPv6 PTR Target"
)
def tenant_reverse(**click_input):
"""Add Tenant Records from CLI"""
if not click_input["config_file"]:
config_path = Path.cwd().joinpath("deenis.yaml")
if not config_path.exists():
raise click.UsageError(
click.style(
(
f"Config file not specified and not found at {config_path}. "
"Please specify a config file path."
),
fg="red",
bold=True,
)
)
elif click_input["config_file"]:
config_path = Path().resolve(click_input["config_file"])
if not click_input["prefix4"] and not click_input["prefix6"]:
raise click.UsageError(
click.style("At least one prefix is required", fg="red", bold=True)
)
try:
responses = Deenis(str(config_path)).TenantReverse(
{
"crm_id": click_input["crm_id"],
"host4": click_input["host4"],
"host6": click_input["host6"],
"prefix4": click_input["prefix4"],
"prefix6": click_input["prefix6"],
}
)
"""
Response format:
[
(
'Success',
'A',
'test011.omnificent.io',
'172.16.31.10',
[]
),
(
'Success',
'PTR',
'250',
'test011.omnificent.io',
[]
)
]
"""
nl = "\n"
tab = " "
_text = {"fg": "white", "bold": True}
_stat_suc = {"fg": "green", "bold": True}
_stat_fail = {"fg": "red", "bold": True}
_rec_type = {"fg": "yellow", "bold": True}
_rec_name = {"fg": "magenta", "bold": True}
_rec_trgt = {"fg": "cyan", "bold": True}
_error = {"fg": "red"}
click.secho(nl + "Records:" + nl, **_text)
for res in responses:
status, rec_type, rec_name, rec_trgt, errors = res
if status == "Success":
_status = ("⚡ " + status, _stat_suc)
elif status == "Failure":
_status = ("☝ " + status, _stat_fail)
click.echo(
tab
+ click.style(_status[0], **_status[1])
+ nl
+ tab * 4
+ click.style(rec_type, **_rec_type)
+ click.style(" ⟫ ", **_text)
+ click.style(rec_name, **_rec_name)
+ click.style(" ⟩ ", **_text)
+ click.style(rec_trgt, **_rec_trgt)
)
if errors:
click.echo(tab * 4 + click.style("Errors: ", **_stat_fail))
for err in errors:
if isinstance(err, dict):
for ename in err.keys():
click.echo(
tab * 6
+ click.style(str(ename) + ":", **_error)
+ tab
+ click.style(str(err[ename]), **_error)
)
elif isinstance(err, str):
click.echo(tab * 4 + click.style(err, **_error))
except (AttributeError, RuntimeError) as tenant_error:
raise click.ClickException(tenant_error)
if __name__ == "__main__":
add_records()
| 2.921875 | 3 |
main_cl.py | spiolynn/pybo | 0 | 4880 | # coding: utf-8
from bigone import BigOneDog
from common import gen_logger
import logging
import time
import json
def strategy_eth_big_bnc_eth(dog):
"""
正向:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC
反向:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH
:param dog: implemention of BigOneDog
:return: 正向收益率,反向收益率
"""
big_eth_data = dog.get_order_book('BIG-ETH')
big_bnc_data = dog.get_order_book('BIG-BNC')
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('BIG-ETH')
print('卖一', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount'])
print('买一', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount'])
print('BIG-BNC')
print('卖一', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount'])
print('买一', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount'])
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
# positive transaction
pos_anc = 0.999*0.999*0.999*\
((1 / (float(big_eth_data['asks'][0]['price'])))
* float(big_bnc_data['bids'][0]['price']) )
pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1
# negative transaction
neg_anc = 0.999 * 0.999 * 0.999 * \
(float(eth_bnc_data['bids'][0]['price'])
/ float(big_bnc_data['asks'][0]['price'])
* float(big_eth_data['asks'][0]['price']))
neg_anc = neg_anc / 1 - 1
flag = False
amt = 2.0
if float(big_eth_data['asks'][0]['amount']) >= amt:
if float(big_bnc_data['bids'][0]['amount']) >= amt:
if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']):
flag = True
msg = "预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润:"
if pos_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "利润空间大于1%"
if flag is False:
result = "{},{}".format(result,"量不足, 放弃本次套利 0")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "{},{}".format(result,"执行本次套利 1")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt)))
print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt)))
print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'],
str(amt * float(big_eth_data['asks'][0]['price']))))
# dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0')
# dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0')
# dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'],
# str(2.0 * float(big_eth_data['asks'][0]['price'])))
return True
if neg_anc < 0.01:
result = "利润空间小于1%, 放弃本次套利 0"
else:
result = "利润空间大于1%, 执行本次套利 1"
logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
return False
# return pos_anc, neg_anc
def strategy_eth_bnc(dog):
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('ETH-BNC')
print('卖一', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('买一', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
anc = float(eth_bnc_data['asks'][0]['price']) / float(eth_bnc_data['bids'][0]['price']) - 1
print(anc)
if anc > 0.02:
r = dog.create_order('ETH-BNC', 'BID', str(float(eth_bnc_data['bids'][0]['price'])+0.01), '0.01' )
bid_order_id = r['order_id']
r = dog.create_order('ETH-BNC', 'ASK', str(float(eth_bnc_data['asks'][0]['price'])-0.01), '0.01' )
ask_order_id = r['order_id']
return anc, anc
if __name__ == '__main__':
gen_logger('bigonetest')
logger = logging.getLogger("bigone")
with open("PRIVATE_KEY.json",'r') as f:
private_key = json.load(f)["key"]
dog = BigOneDog(private_key)
# strategy_eth_bnc(dog)
# dog.get_orders("ETH-BNC",'10')
# r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d")
# print(r)
# r = dog.get_orders('ETH-BNC','10')
# print(r)
while True:
flag = strategy_eth_big_bnc_eth(dog)
if flag is True:
break
else:
print("休眠10秒")
print("")
time.sleep(10)
# break
# pos_anc, neg_anc = strategy_eth_bnc(dog)
# if pos_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[正向套利:买BIG/ETH -> 卖BIG/BNC -> 买ETH/BNC]利润: {0:.2f}%, {1}".format(pos_anc*100,result))
#
# if neg_anc < 0.01:
# result = "利润空间小于1%, 放弃本次套利 0"
# else:
# result = "利润空间大于1%, 执行本次套利 1"
#
# logger.info("预期本次[反向套利:卖ETH/BNC -> 买BIG/BNC -> 卖BIG/ETH]利润: {0:.2f}%, {1}".format(neg_anc*100,result))
#
# print("休眠10秒")
# print("")
# time.sleep(10)
| 2.3125 | 2 |
run_experiments.py | gahaalt/cifar-vs-tensorflow2 | 6 | 4881 | import os
import yaml
import logging
import importlib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
from cifar_training_tools import cifar_training, cifar_error_test
def print_dict(d, tabs=0):
tab = '\t'
for key in d:
if type(d[key]) == dict:
print(f"{tab*tabs}{key}:")
print_dict(d[key], tabs+1)
else:
print(f"{tab*tabs}{key}: {d[key]}")
print('\n' + '#' * 19)
print("TESTING FOR ERRORS!")
print('#' * 19)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
if 'skip_error_test' in exp and exp['skip_error_test']:
continue
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_error_test(model(**exp['model_parameters']))
print("OK!")
print('\n' + '#' * 22)
print("MODEL TRAINING BEGINS!")
print('#' * 22)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
print(); print_dict(exp); print();
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_training(model(**exp['model_parameters']), **exp['train_parameters']) | 2.359375 | 2 |
json2yaml.py | cristicalin/tools | 0 | 4882 | <filename>json2yaml.py<gh_stars>0
#!/usr/bin/python
import sys
import yaml
import json
if __name__ == '__main__':
content = json.load(sys.stdin)
print yaml.dump(content, indent=2, default_flow_style=False)
| 2.078125 | 2 |
histdata/mt5db/script_DownloadAndStoreToMongodb.py | UpSea/midProjects | 1 | 4883 | # -*- coding: utf-8 -*-
import os,sys
from PyQt4 import QtGui,QtCore
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
from data.mongodb.DataSourceMongodb import Mongodb
import datetime as dt
def getSymbols():
#mid 1)从excel赋值粘贴获得如下数据
codesStr = """
XAGUSD
"""
#mid 2)将字符串使用split()分割为list,默认会去除\n和所有空格。
#codeList = ['000021.SZ','000022.SZ']
codeList = [code.split('.')[0] for code in codesStr.split()]
return codeList
def subMain():
DC = dataCenter.dataCenter()
remoteDataSourceType = 'mt5'
localStorageType = 'mongodb'
periodType = 'D'
timeStart = dt.datetime(2000,10,20)
timeEnd = dt.datetime.now()
# 1)get codes form eastmoney
codeList = getSymbols()
# 2)download history data
dataDict = DC.downloadHistData(providerType=remoteDataSourceType,storageType=localStorageType,periodType=periodType,
codeList=codeList,timeFrom = timeStart,timeTo = timeEnd)
if __name__ == '__main__':
#app = QtGui.QApplication(sys.argv)
#mid-----------------------------------------------------------------------------------------------------------------------------
subMain()
#mid-----------------------------------------------------------------------------------------------------------------------------
#sys.exit(app.exec_()) | 2.109375 | 2 |
daproli/transformer.py | ermshaua/daproli | 0 | 4884 | <filename>daproli/transformer.py
from joblib import Parallel, delayed
from tqdm import tqdm
from .processing import map, filter, split, expand, combine, join
from .manipulation import windowed, flatten
class BaseTransformer:
'''
The BaseTransformer defines a generic data transformation pattern that
can be implemented with a number of data processing concepts.
'''
def transform(self, data, *args, **kwargs):
raise NotImplementedError()
class Mapper(BaseTransformer):
def __init__(self, func, ret_type=None, expand_args=True, n_jobs=1, verbose=0, **kwargs):
'''
dp.Mapper is the respective transformer for dp.map.
Parameters
-----------
:param func: the mapping function
:param ret_type: if provided the used return type, otherwise ret_type(data)
:param expand_args: true if args should be expanded, false otherwise
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.func = func
self.ret_type = ret_type
self.expand_args = expand_args
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
return map(self.func, data, self.ret_type, expand_args=self.expand_args, n_jobs=self.n_jobs,
verbose=self.verbose, **self.kwargs)
class Filter(BaseTransformer):
def __init__(self, pred, ret_type=None, expand_args=True, n_jobs=1, verbose=0, **kwargs):
'''
dp.Filter is the respective transformer for dp.filter.
Parameters
-----------
:param pred: the filter predicate
:param ret_type: if provided the used return type, otherwise ret_type(data)
:param expand_args: true if args should be expanded, false otherwise
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.pred = pred
self.ret_type = ret_type
self.expand_args = expand_args
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
return filter(self.pred, data, ret_type=self.ret_type, expand_args=self.expand_args, n_jobs=self.n_jobs,
verbose=self.verbose, **self.kwargs)
class Splitter(BaseTransformer):
def __init__(self, func, ret_type=None, return_labels=False, expand_args=True, n_jobs=1, verbose=0, **kwargs):
'''
dp.Splitter is the respective transformer for dp.split.
Parameters
-----------
:param func: the discriminator function
:param ret_type: if provided the used return type, otherwise ret_type(data)
:param return_labels: true if the associated labels should be returned, false otherwise
:param expand_args: true if args should be expanded, false otherwise
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.func = func
self.ret_type = ret_type
self.return_labels = return_labels
self.expand_args = expand_args
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
return split(self.func, data, ret_type=self.ret_type, return_labels=self.return_labels,
expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs)
class Expander(BaseTransformer):
def __init__(self, func, ret_type=None, expand_args=True, n_jobs=1, verbose=0, **kwargs):
'''
dp.Expander is the respective transformer for dp.expand.
Parameters
-----------
:param func: the expansion function
:param ret_type: if provided the used return type, otherwise ret_type(data)
:param expand_args: true if args should be expanded, false otherwise
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.func = func
self.ret_type = ret_type
self.expand_args = expand_args
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
return expand(self.func, data, ret_type=self.ret_type, expand_args=self.expand_args, n_jons=self.n_jobs,
verbose=self.verbose, **self.kwargs)
class Combiner(BaseTransformer):
def __init__(self, func, expand_args=True, n_jobs=1, verbose=0, **kwargs):
'''
dp.Combiner is the respective transformer for dp.combine.
Parameters
-----------
:param func: the combination function
:param expand_args: true if args should be expanded, false otherwise
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.func = func
self.expand_args = expand_args
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
return combine(self.func, *data, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs)
class Joiner(BaseTransformer):
def __init__(self, func, expand_args=True, n_jobs=1, verbose=0, **kwargs):
'''
dp.Joiner is the respective transformer for dp.join.
Parameters
-----------
:param func: the join function
:param expand_args: true if args should be expanded, false otherwise
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.func = func
self.expand_args = expand_args
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
return join(self.func, *data, expand_args=self.expand_args, n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs)
class Manipulator(BaseTransformer):
def __init__(self, func, void=False, *args, **kwargs):
'''
dp.Manipulator is a transformer to manipulate the entire collection of data items.
Parameters
-----------
:param func: the manipulation function
:param void: if true the result is not returned
:param args: additional args for func
:param kwargs: additional kwargs for func
'''
self.func = func
self.void = void
self.args = args
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
res = self.func(data, *self.args, **self.kwargs)
return res if self.void is False else data
class Window(BaseTransformer):
def __init__(self, size, step=1, ret_type=None):
'''
dp.Window is the respective transformer for dp.windowed.
Parameters
-----------
:param data: an iterable collection of data
:param size: the window size
:param step: the window step
:param ret_type: if provided the used return type, otherwise ret_type(data)
'''
self.size = size
self.step = step
self.ret_type = ret_type
def transform(self, data, *args, **kwargs):
return windowed(data, self.size, step=self.step, ret_type=self.ret_type)
class Flat(BaseTransformer):
def __init__(self, ret_type=None):
'''
dp.Flat is the respective transformer for dp.flatten.
Parameters
-----------
:param ret_type: if provided the used return type, otherwise ret_type(data)
'''
self.ret_type = ret_type
def transform(self, data, *args, **kwargs):
return flatten(data, ret_type=self.ret_type)
class Union(BaseTransformer):
def __init__(self, *transformers, n_jobs=1, verbose=0, **kwargs):
'''
dp.Union is a construct to manipulate mutli-collections of data tiems.
Parameters
-----------
:param transformers: the transformers for the respective collections of data items
:param n_jobs: amount of used threads/processes
:param verbose: verbosity level for tqdm / joblib
:param kwargs: additional arguments for joblib.Parallel, e.g. backend='loky'
'''
self.transformers = transformers
self.n_jobs = n_jobs
self.verbose = verbose
self.kwargs = kwargs
def transform(self, data, *args, **kwargs):
if self.n_jobs == 1:
return [transformer.transform(items, *args, **kwargs)
for transformer, items in tqdm(zip(self.transformers, data), disable=self.verbose < 1)]
return Parallel(n_jobs=self.n_jobs, verbose=self.verbose, **self.kwargs)(delayed(transformer.transform)
(items, *args, **kwargs) for transformer, items in zip(self.transformers, data))
class Pipeline(BaseTransformer):
def __init__(self, *transformers, verbose=0):
'''
dp.Pipeline is a construct to pipe a collection of transformers.
Parameters
-----------
:param transformers: the transformer sequence to apply
:param verbose: verbosity level for tqdm
'''
self.transformers = list(transformers)
self.verbose = verbose
def transform(self, data, *args, **kwargs):
res = data
for transformer in tqdm(self.transformers, disable=self.verbose < 1):
res = transformer.transform(res, *args, **kwargs)
return res
| 2.59375 | 3 |
Ad-Hoc/2454.py | LorranSutter/URI-Online-Judge | 0 | 4885 | <gh_stars>0
P, R = input().split()
if P == '0': print('C')
elif R == '0': print('B')
else: print('A') | 3.46875 | 3 |
main.py | brunotoshio/castella | 2 | 4886 | <filename>main.py
import pymongo
import yaml
import sched
import time
import json
from castella import TweetCrawler
class Castella(object):
def __init__(self):
# Get connection parameters
with open("settings.yml", "r") as stream:
try:
settings = yaml.safe_load(stream)["settings"]
# Database
self.server_url = settings["output"]["database"]["url"]
self.server_port = settings["output"]["database"]["port"]
self.database_name = settings["output"]["database"]["database"]
self.collection_name = settings["output"]["database"]["collection"]
# Search
self.query = settings["search"]["query"]
self.search_params = settings["search"]["params"]
# Schedule
self.interval_type = settings["interval"]["each"]
self.interval_amount = settings["interval"]["amount"]
self.total_executions = 0
except yaml.YAMLError as exc:
print("ERROR: No settings.yml found or it could not be read")
def execute_search(self):
# Mongo connection
client = pymongo.MongoClient(self.server_url, self.server_port)
db = client[self.database_name]
self.tweets = db[self.collection_name]
self._create_scheduled_executions()
def _save_tweet(self, tweet):
print("Saving: ", tweet._json["id_str"])
try:
bson = tweet._json
bson["query_str"] = self.query
self.tweets.insert_one(bson)
except:
print("Error occurred when trying to save")
def _search(self):
# Continue from last id
try:
self.tweets.create_index([("id", pymongo.DESCENDING)])
last_tweet = self.tweets.find({}).sort([("id", pymongo.DESCENDING)]).next()
except StopIteration:
last_tweet = None
# Searching
tc = TweetCrawler()
params = dict(result_type="recent", include_entities=True, count=100)
if isinstance(self.search_params, dict):
params.update(self.search_params)
if last_tweet is not None:
print("============================================================")
print("Resuming from tweet id:", last_tweet['id_str'])
print("============================================================")
params["since_id"] = last_tweet.get("id_str")
tc.search(self.query, self._save_tweet, params)
self.total_executions += 1
print("============================================================")
print("Finished for today...")
print(self.total_executions, "out of", self.interval_amount, "scheduled executions")
print("============================================================")
if self.total_executions < self.interval_amount:
print("Keep this process running until the execution of the last scheduled iteration, or stop this process to cancel further executions.")
print("============================================================")
# Preparing functions for scheduler
def _days(self):
return time.time() / (60 * 60 * 24)
def _weeks(self):
return time.time() / (60 * 60 * 24 * 7)
# Scheduling events
def _create_scheduled_executions(self):
if self.interval_type == "day":
handler = self._days
else:
handler = self._weeks
scheduler = sched.scheduler(handler, time.sleep)
for i in range(self.interval_amount):
scheduler.enter(i, 1, self._search)
scheduler.run()
if __name__ == "__main__":
searcher = Castella()
searcher.execute_search() | 2.5 | 2 |
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py | monroid/openvino | 2,406 | 4887 | <reponame>monroid/openvino
#
# slice paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle as pdpd
import sys
data_type = 'float32'
def slice(name : str, x, axes : list, start : list, end : list):
pdpd.enable_static()
with pdpd.static.program_guard(pdpd.static.Program(), pdpd.static.Program()):
node_x = pdpd.static.data(name='x', shape=x.shape, dtype = data_type)
out = pdpd.fluid.layers.slice(node_x, axes = axes, starts = start, ends = end)
cpu = pdpd.static.cpu_places(1)
exe = pdpd.static.Executor(cpu[0])
# startup program will call initializer to initialize the parameters.
exe.run(pdpd.static.default_startup_program())
outs = exe.run(
feed={'x': x},
fetch_list=[out])
saveModel(name, exe, feedkeys=['x'], fetchlist=[out], inputs=[x], outputs=[outs[0]], target_dir=sys.argv[1])
return outs[0]
def main():
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(4, 3, 5).astype(data_type)
slice("slice", x, axes=[1, 2], start=(0, 1), end=(-1, 3))
x = np.linspace(1, 60, num = 60, dtype=np.int32).reshape(2, 30).astype(data_type)
slice("slice_1d", x, axes=[0], start=[0], end=[1])
if __name__ == "__main__":
main() | 2.46875 | 2 |
tacker/sol_refactored/common/vnf_instance_utils.py | h1r0mu/tacker | 116 | 4888 | # Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__) # not used at the moment
def get_inst(context, inst_id):
inst = objects.VnfInstanceV2.get_by_id(context, inst_id)
if inst is None:
raise sol_ex.VnfInstanceNotFound(inst_id=inst_id)
return inst
def get_inst_all(context):
return objects.VnfInstanceV2.get_all(context)
def inst_href(inst_id, endpoint):
return "{}/v2/vnflcm/vnf_instances/{}".format(endpoint, inst_id)
def make_inst_links(inst, endpoint):
links = objects.VnfInstanceV2_Links()
self_href = inst_href(inst.id, endpoint)
links.self = objects.Link(href=self_href)
if inst.instantiationState == 'NOT_INSTANTIATED':
links.instantiate = objects.Link(href=self_href + "/instantiate")
else: # 'INSTANTIATED'
links.terminate = objects.Link(href=self_href + "/terminate")
# TODO(oda-g): add when the operation supported
# links.scale = objects.Link(href = self_href + "/scale")
# etc.
return links
# see IETF RFC 7396
def json_merge_patch(target, patch):
if isinstance(patch, dict):
if not isinstance(target, dict):
target = {}
for key, value in patch.items():
if value is None:
if key in target:
del target[key]
else:
target[key] = json_merge_patch(target.get(key), value)
return target
else:
return patch
def select_vim_info(vim_connection_info):
# NOTE: It is assumed that vimConnectionInfo has only one item
# at the moment. If there are multiple items, it is uncertain
# which item is selected.
for vim_info in vim_connection_info.values():
return vim_info
| 1.835938 | 2 |
testfixtures/compat.py | cjw296/testfixtures | 0 | 4889 | # compatibility module for different python versions
import sys
if sys.version_info[:2] > (3, 0):
PY2 = False
PY3 = True
Bytes = bytes
Unicode = str
basestring = str
class_type_name = 'class'
ClassType = type
exception_module = 'builtins'
new_class = type
self_name = '__self__'
from io import StringIO
xrange = range
else:
PY2 = True
PY3 = False
Bytes = str
Unicode = unicode
basestring = basestring
class_type_name = 'type'
from types import ClassType
exception_module = 'exceptions'
from new import classobj as new_class
self_name = 'im_self'
from cStringIO import StringIO
xrange = xrange
| 2.390625 | 2 |
old_py2/tests/models_tests/notifications/test_match_score.py | ofekashery/the-blue-alliance | 266 | 4890 | <reponame>ofekashery/the-blue-alliance<filename>old_py2/tests/models_tests/notifications/test_match_score.py<gh_stars>100-1000
import re
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.notification_type import NotificationType
from helpers.event.event_test_creator import EventTestCreator
from models.team import Team
from models.notifications.match_score import MatchScoreNotification
class TestMatchScoreNotification(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
ndb.get_context().clear_cache() # Prevent data from leaking between tests
self.testbed.init_taskqueue_stub(root_path=".")
for team_number in range(6):
Team(id="frc%s" % team_number,
team_number=team_number).put()
self.event = EventTestCreator.createPresentEvent()
self.match = self.event.matches[0]
self.notification = MatchScoreNotification(self.match)
def tearDown(self):
self.testbed.deactivate()
def test_type(self):
self.assertEqual(MatchScoreNotification._type(), NotificationType.MATCH_SCORE)
def test_fcm_notification(self):
self.assertIsNotNone(self.notification.fcm_notification)
self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results')
match_regex = re.compile(r'^\d+, \d+, \d+ beat \d+, \d+, \d+ scoring \d+-\d+.$')
match = re.match(match_regex, self.notification.fcm_notification.body)
self.assertIsNotNone(match)
def test_fcm_notification_tied(self):
score = self.notification.match.alliances['red']['score']
self.notification.match.alliances['blue']['score'] = score
self.assertIsNotNone(self.notification.fcm_notification)
self.assertEqual(self.notification.fcm_notification.title, 'TESTPRESENT Q1 Results')
match_regex = re.compile(r'^\d+, \d+, \d+ tied with \d+, \d+, \d+ scoring \d+-\d+.$')
match = re.match(match_regex, self.notification.fcm_notification.body)
self.assertIsNotNone(match)
def test_fcm_notification_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
self.assertEqual(notification.fcm_notification.title, 'Team 1 TESTPRESENT Q1 Results')
def test_data_payload(self):
payload = self.notification.data_payload
self.assertEqual(len(payload), 2)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name))
def test_data_payload_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
payload = notification.data_payload
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['match_key'], '{}_qm1'.format(self.event.key_name))
self.assertEqual(payload['team_key'], 'frc1')
def test_webhook_message_data(self):
# Has `event_name`
payload = self.notification.webhook_message_data
self.assertEqual(len(payload), 3)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['event_name'], 'Present Test Event')
self.assertIsNotNone(payload['match'])
def test_webhook_message_data_team(self):
team = Team.get_by_id('frc1')
notification = MatchScoreNotification(self.match, team)
payload = notification.webhook_message_data
self.assertEqual(len(payload), 4)
self.assertEqual(payload['event_key'], self.event.key_name)
self.assertEqual(payload['event_name'], 'Present Test Event')
self.assertEqual(payload['team_key'], 'frc1')
self.assertIsNotNone(payload['match'])
| 2.296875 | 2 |
util/submission/templates.py | jeanlucf22/mgmol | 25 | 4891 | md_template_d144 = """verbosity=0
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=160
ny=80
nz=80
[Domain]
ox=0.
oy=0.
oz=0.
lx=42.4813
ly=21.2406
lz=21.2406
[Potentials]
pseudopotential=pseudo.D_tm_pbe
[Poisson]
solver=@
max_steps_initial=@50
max_steps=@50
reset=@
bcx=periodic
bcy=periodic
bcz=periodic
[Run]
type=MD
[MD]
type=@
num_steps=@
dt=@15.
[XLBOMD]
dissipation=@5
align=@
[Quench]
max_steps=@5
max_steps_tight=@
atol=1.e-@10
num_lin_iterations=3
ortho_freq=100
[SpreadPenalty]
type=@energy
damping=@
[email protected]
[email protected]
[Orbitals]
initial_type=Gaussian
initial_width=1.5
overallocate_factor=@2.
[ProjectedMatrices]
solver=@short_sighted
[LocalizationRegions]
radius=@8.
auxiliary_radius=@
[email protected]
[Restart]
input_filename=wave.out
input_level=3
interval=@
"""
md_template_H2O_64 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=128
ny=128
nz=128
[Domain]
ox=0.
oy=0.
oz=0.
lx=23.4884
ly=23.4884
lz=23.4884
[Potentials]
pseudopotential=pseudo.O_ONCV_PBE_SG15
pseudopotential=pseudo.D_ONCV_PBE_SG15
[Poisson]
solver=@
max_steps=@
[Run]
type=MD
[Quench]
max_steps=1000
atol=1.e-@
[MD]
type=@
num_steps=@
dt=10.
print_interval=5
[XLBOMD]
dissipation=@
align=@
[Restart]
input_filename=wave.out
input_level=4
output_level=4
interval=@
"""
quench_template_H2O_64 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=128
ny=128
nz=128
[Domain]
ox=0.
oy=0.
oz=0.
lx=23.4884
ly=23.4884
lz=23.4884
[Potentials]
pseudopotential=pseudo.O_ONCV_PBE_SG15
pseudopotential=pseudo.D_ONCV_PBE_SG15
[Run]
type=QUENCH
[Quench]
max_steps=1000
atol=1.e-8
[Orbitals]
initial_type=Fourier
[Restart]
output_level=4
"""
quench_template_d144 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=160
ny=80
nz=80
[Domain]
ox=0.
oy=0.
oz=0.
lx=42.4813
ly=21.2406
lz=21.2406
[Potentials]
pseudopotential=pseudo.D_tm_pbe
[Poisson]
solver=@
max_steps_initial=@50
max_steps=@50
bcx=periodic
bcy=periodic
bcz=periodic
[Run]
type=QUENCH
[Quench]
max_steps=200
atol=1.e-7
num_lin_iterations=3
ortho_freq=100
[SpreadPenalty]
type=@energy
damping=@
[email protected]
[email protected]
[Orbitals]
initial_type=Gaussian
initial_width=1.5
[ProjectedMatrices]
solver=@short_sighted
[LocalizationRegions]
radius=@8.
[Restart]
output_type=distributed
"""
H2O_64_params={
'nodes': '32',
'ntasks': '256',
'omp_num_threads': 8 if omp_num_threads == 4 else omp_num_threads,
'cores_per_task': '2',
'potentials': 'ln -s $maindir/potentials/pseudo.O_ONCV_PBE_SG15\nln -s $maindir/potentials/pseudo.D_ONCV_PBE_SG15',
'lrs': '',
'jobname': 'H2O_64',
}
d144_params={
'nodes': '8',
'walltime': '01:30:00',
'ntasks': '125',
'omp_num_threads': omp_num_threads,
'cores_per_task': '1',
'potentials': 'ln -s $maindir/potentials/pseudo.D_tm_pbe',
'lrs': '-l lrs.in',
'jobname': 'd144',
}
vulcan_params={
'queue': 'psmall',
'scratch_path': '/p/lscratchv/mgmolu/dunn27/mgmol/',
'gres': 'lscratchv',
'exe': 'mgmol-bgq',
}
cab_params={
'queue': 'pbatch',
'scratch_path': '/p/lscratchd/dunn27/mgmol/',
'gres': 'lscratchd',
'omp_num_threads': '1',
'exe': 'mgmol-pel',
'walltime': '01:30:00',
}
runfile_quench_template="""#!/bin/tcsh
#MSUB -l nodes={nodes},walltime={walltime}
#MSUB -o mgmol.out
#MSUB -q {queue}
#MSUB -A comp
#MSUB -l gres={gres}
#MSUB -N {jobname}
rm -f queued
echo ' ' > running
use boost-nompi-1.55.0
export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0
export Boost_NO_SYSTEM_PATHS=ON
setenv OMP_NUM_THREADS {omp_num_threads}
set ntasks = {ntasks}
set maindir = $home/mgmol
set exe = $maindir/bin/{exe}
set datadir = `pwd`
set scratchdir = {scratch_path}`basename $datadir`
mkdir $scratchdir
cd $scratchdir
echo ' ' > running
set cfg_quench = mgmol_quench.cfg
cp $datadir/$cfg_quench .
cp $datadir/coords.in .
cp $datadir/lrs.in .
{potentials}
#1st run
srun -n $ntasks -c {cores_per_task} $exe -c $cfg_quench -i coords.in {lrs}
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
rm -f running
echo ' ' > queued
"""
runfile_md_template="""#!/bin/tcsh
#MSUB -l nodes={nodes},walltime={walltime}
#MSUB -o mgmol.out
#MSUB -q {queue}
#MSUB -A comp
#MSUB -l gres={gres}
#MSUB -N {jobname}
rm -f queued
echo ' ' > running
use boost-nompi-1.55.0
export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0
export Boost_NO_SYSTEM_PATHS=ON
setenv OMP_NUM_THREADS {omp_num_threads}
set ntasks = {ntasks}
set maindir = $home/mgmol
set exe = $maindir/bin/{exe}
set datadir = `pwd`
set scratchdir = {scratch_path}`basename $datadir`
mkdir $scratchdir
cd $scratchdir
echo ' ' > running
set cfg_md = mgmol_md.cfg
cp $datadir/$cfg_md .
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
#MD run
srun -n $ntasks -c {cores_per_task} $exe -c $cfg_md
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
rm -f running
echo ' ' > queued
"""
| 1.078125 | 1 |
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py | martihmy/Compliant_control | 0 | 4892 | #! /usr/bin/env python
import copy
from copy import deepcopy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point
from visualization_msgs.msg import *
from franka_interface import ArmInterface
from panda_robot import PandaArm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation
np.set_printoptions(precision=2)
"""
This is a FORCE-BASED VARIABLE IMPEDANCE CONTROLLER based on [Huang1992: Compliant Motion Control of Robots by Using Variable Impedance]
To achieve force tracking, the apparent stiffness (K) and damping (B) is dynamically adjusted through functions dependent on the error in position, velocity and force
About the code/controller:
1] Only stiffness and damping in the 'z'-direction is adaptive, the rest are static
2] Due to the faulted joint velocities (read from rostopics), the more noisy,
numerically derived derivatives of the joint position are prefered to be
used in the controller { get_x_dot(..., numerically = True) }
3] You can now choose between perform_torque_Huang1992() and perform_torque_DeSchutter()
- DeSchutter's control-law offers geometrically consitent stiffness and is more computationally expensive
4] The default desired motion- and force-trajectories are now made in a time-consistent matter, so that the PUBLISH RATE can be altered without messing up the desired behaviour.
The number of iterations is calculated as a function of the controller's control-cycle, T: (max_num_it = duration(=15 s) / T)
"""
# --------- Constants -----------------------------
#print(robot.joint_ordered_angles()) #Read the robot's joint-angles
#new_start = {'panda_joint1': 1.938963389436404, 'panda_joint2': 0.6757504724282993, 'panda_joint3': -0.43399745125475564, 'panda_joint4': -2.0375275954865573, 'panda_joint5': -0.05233040021194351, 'panda_joint6': 3.133254153457202, 'panda_joint7': 1.283328743909796}
# Stiffness
Kp = 30
Kpz = 30 #initial value (adaptive)
Ko = 900
K = np.array([[Kp, 0, 0, 0, 0, 0],
[0, Kp, 0, 0, 0, 0],
[0, 0, Kpz, 0, 0, 0],
[0, 0, 0, Ko, 0, 0],
[0, 0, 0, 0, Ko, 0],
[0, 0, 0, 0, 0, Ko]])
# Damping
Bp = Kp/7
Bpz = Bp # #initial value (adaptive)
Bo = 50
B = np.array([[Bp, 0, 0, 0, 0, 0],
[0, Bp, 0, 0, 0, 0],
[0, 0, Bpz, 0, 0, 0],
[0, 0, 0, Bo, 0, 0],
[0, 0, 0, 0, Bo, 0],
[0, 0, 0, 0, 0, Bo]])
# Apparent inertia
Mp = 10
Mo = 10
M_diag = np.array([Mp,Mp,Mp,Mo,Mo,Mo])
M = np.diagflat(M_diag)
# Constant matrices appearing in equation (50) of [Huang1992]
K_v = np.identity(6)
P = np.identity(6)
gamma = np.identity(18)
#gamma_M = 12
gamma_B = 0.001 #2 # The damping's rate of adaptivity (high value = slow changes)
gamma_K = 0.0005 #1 # The stiffness' rate of adaptivity (high value = slow changes)
#gamma[2,2] = gamma_M
gamma[8,8] = gamma_B
gamma[14,14] = gamma_K
duration = 15 #seconds SHOULD NOT BE ALTERED
"""Functions for generating desired MOTION trajectories"""
#1 Generate a desired trajectory for the manipulator to follow
def generate_desired_trajectory(iterations,T):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
if iterations > 300:
a[2,0:100]=-0.00001/T**2
a[2,250:350]=0.00001/T**2
if iterations > 6500:
a[0,4500:4510]=0.00001/T**2
a[0,6490:6500]=-0.00001/T**2
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
#2 Generate a desired trajectory for the manipulator to follow
def generate_desired_trajectory_express(iterations,T):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
if iterations > 175:
a[2,0:50]=-0.00002/T**2
a[2,125:175]=0.00002/T**2
if iterations > 3250:
a[0,2250:2255]=0.00002/T**2
a[0,3245:3250]=-0.00002/T**2
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
#3 Generate a (time-consistent) desired motion trajectory
def generate_desired_trajectory_tc(iterations,T,move_in_x=False):
a = np.zeros((6,iterations))
v = np.zeros((6,iterations))
p = np.zeros((3,iterations))
p[:,0] = get_p()
a[2,0:int(iterations/75)]=-1.25
a[2,int(iterations*2/75):int(iterations/25)]= 1.25
if move_in_x:
a[0,int(iterations*3/5):int(iterations*451/750)]=1.25
a[0,int(iterations*649/750):int(iterations*13/15)]=-1.25
for i in range(max_num_it):
if i>0:
v[:,i]=v[:,i-1]+a[:,i-1]*T
p[:,i]=p[:,i-1]+v[:3,i-1]*T
return a,v,p
"""Functions for generating desired FORCE trajectories"""
#1 Generate a desired force trajectory
def generate_F_d(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:100] = 0.0005/T**2
a[2,100:200] = - 0.0005/T**2
if max_num_it > 1100:
a[2,500:550] = 0.0002/T**2
if max_num_it >4001:
a[2,1500:1550]=-0.0002/T**2
it = 2000
while it <= 4000:
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,4001]=0.0001/T**2
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
#2 Generate an efficient desired force trajectory
def generate_F_d_express(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:50] = 0.0010/T**2
a[2,100:150] = - 0.0010/T**2
if max_num_it > 275:
a[2,250:275] = 0.0008/T**2
if max_num_it >2001:
a[2,750:775]=-0.0008/T**2
it = 1000
while it <= 2000:
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,2001]=0.0001/T**2
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
#3 Generate a (time-consistent) desired force trajectory
def generate_F_d_tc(max_num_it,T):
a = np.zeros((6,max_num_it))
v = np.zeros((6,max_num_it))
s = np.zeros((6,max_num_it))
a[2,0:int(max_num_it/75)] = 62.5
a[2,int(max_num_it/37.5):int(max_num_it/25)] = - 62.5
if max_num_it > 275:
a[2,int(max_num_it/15):int(max_num_it*11/150)] = 50
if max_num_it >2001:
a[2,int(max_num_it/5):int(max_num_it*31/150)]=-50
it = int(max_num_it*4/15)
while it <= int(max_num_it*8/15):
a[2,it]= (-9*(np.pi**2)*(T/4)**2*np.sin(2*it*T/4*2*np.pi+np.pi/2))/T**2
it+=1
a[2,int(max_num_it*8/15+1)]=6.25
for i in range(max_num_it):
if i>0:
v[2,i]=v[2,i-1]+a[2,i-1]*T
s[2,i]=s[2,i-1]+v[2,i-1]*T
return s
# ------------ Helper functions --------------------------------
# Calculate the numerical derivative of a each row in a vector
def get_derivative_of_vector(history,iteration,T):
size = history.shape[0]
if iteration > 0:
return np.subtract(history[:,iteration],history[:,iteration-1])/T
else:
return np.zeros(size)
# Saturation-function
def ensure_limits(lower,upper,matrix):
for i in range(6):
if matrix[i,i] > upper:
matrix[i,i] = upper
elif matrix[i,i] < lower:
matrix[i,i] = lower
# Return the cartesian (task-space) inertia of the manipulator [alternatively the inverse of it]
def get_W(inv = False):
W = np.linalg.multi_dot([robot.jacobian(),np.linalg.inv(robot.joint_inertia_matrix()),robot.jacobian().T])
if inv == True:
return np.linalg.inv(W)
else:
return W
# Return the external forces (everything except for z-force is set to 0 due to offsets)
def get_F_ext(two_dim = False):
if two_dim == True:
return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0]).reshape([6,1])
else:
return np.array([0,0,robot.endpoint_effort()['force'][2],0,0,0])
# Return the position and (relative) orientation
def get_x(goal_ori):
pos_x = robot.endpoint_pose()['position']
rel_ori = quatdiff_in_euler_radians(goal_ori, np.asarray(robot.endpoint_pose()['orientation']))
return np.append(pos_x,rel_ori)
# Return the linear and angular velocities
# Numerically = True -> return the derivarive of the state-vector
# Numerically = False -> read values from rostopic (faulty in sim when interacting with the environment)
def get_x_dot(x_hist,i,T, numerically=False):
if numerically == True:
return get_derivative_of_vector(x_hist,i,T)
else:
return np.append(robot.endpoint_velocity()['linear'],robot.endpoint_velocity()['angular'])
# Return the error in position and orientation
def get_delta_x(goal_ori, p_d, two_dim = False):
delta_pos = p_d - robot.endpoint_pose()['position']
delta_ori = quatdiff_in_euler_radians(np.asarray(robot.endpoint_pose()['orientation']), goal_ori)
if two_dim == True:
return np.array([np.append(delta_pos,delta_ori)]).reshape([6,1])
else:
return np.append(delta_pos,delta_ori)
# Return the error in linear and angular velocities
def get_x_dot_delta(x_d_dot,x_dot, two_dim = True):
if two_dim == True:
return (x_d_dot - x_dot).reshape([6,1])
else:
return x_d_dot - x_dot
# Return the error in linear and angular acceleration
def get_x_ddot_delta(x_d_ddot,v_history,i,T):
a = get_derivative_of_vector(v_history,i,T)
return x_d_ddot-a
# Return the cartesian (task-space) position
def get_p(two_dim=False):
if two_dim == True:
return robot.endpoint_pose()['position'].reshape([3,1])
else:
return robot.endpoint_pose()['position']
# Compute difference between quaternions and return Euler angle in radians as difference
def quatdiff_in_euler_radians(quat_curr, quat_des):
curr_mat = quaternion.as_rotation_matrix(quat_curr)
des_mat = quaternion.as_rotation_matrix(quat_des)
rel_mat = des_mat.T.dot(curr_mat)
rel_quat = quaternion.from_rotation_matrix(rel_mat)
vec = quaternion.as_float_array(rel_quat)[1:]
if rel_quat.w < 0.0:
vec = -vec
return -des_mat.dot(vec)
# -------------- Main functions --------------------
# Get xi as it is described in equation (44) in [Huang1992]
def get_xi(goal_ori, p_d, x_dot, x_d_dot, x_d_ddot, v_history, i, T):
E = -get_delta_x(goal_ori, p_d)
E_dot = -get_x_dot_delta(x_d_dot,x_dot, two_dim = False)
E_ddot = -get_x_ddot_delta(x_d_ddot,v_history,i,T)
E_diag = np.diagflat(E)
E_dot_diag = np.diagflat(E_dot)
E_ddot_diag = np.diagflat(E_ddot)
return np.block([E_ddot_diag,E_dot_diag,E_diag])
# Calculate lambda_dot as in equation (50) in [Huang1992]
def get_lambda_dot(gamma,xi,K_v,P,F_d):
return np.linalg.multi_dot([-np.linalg.inv(gamma),xi.T,np.linalg.inv(K_v),P,get_F_ext(two_dim=True)-F_d.reshape([6,1])])
# Return the updated (adapted) Inertia, Damping and Stiffness matrices.
def update_MBK_hat(lam,M,B,K):
M_hat = M # + np.diagflat(lam[0:6]) M is chosen to be constant
B_hat = B + np.diagflat(lam[6:12])
K_hat = K + np.diagflat(lam[12:18])
#ensure_limits(1,5000,M_hat)
ensure_limits(1,5000,B_hat)
ensure_limits(1,5000,K_hat)
return M_hat, B_hat, K_hat
# Calculate and perform the torque as in equation (10) in [Huang1992]
def perform_torque_Huang1992(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, goal_ori):
a = np.linalg.multi_dot([robot.jacobian().T,get_W(inv=True),np.linalg.inv(M)])
b = np.array([np.dot(M,x_d_ddot)]).reshape([6,1]) + np.array([np.dot(B,get_x_dot_delta(x_d_dot,x_dot))]).reshape([6,1]) + np.array([np.dot(K,get_delta_x(goal_ori,p_d,two_dim = True))]).reshape([6,1])
c = robot.coriolis_comp().reshape([7,1])
d = (np.identity(6)-np.dot(get_W(inv=True),np.linalg.inv(M))).reshape([6,6])
total_torque = np.array([np.dot(a,b)]).reshape([7,1]) + c + np.array([np.linalg.multi_dot([robot.jacobian().T,d,get_F_ext()])]).reshape([7,1])
robot.set_joint_torques(dict(list(zip(robot.joint_names(),total_torque))))
"""
TESTING AREA (Functions needed to run an adaptive version of DeSchutter's impedance controller)
[with geometrically consistent stiffness]
"""
def skew(vector):
return np.array([[0, -vector[2], vector[1]],
[vector[2], 0, -vector[0]],
[-vector[1], vector[0], 0]])
def from_three_to_six_dim(matrix):
return np.block([[matrix,np.zeros((3,3))],[np.zeros((3,3)),matrix]])
def get_K_Pt_dot(R_d,K_pt,R_e):
return np.array([0.5*np.linalg.multi_dot([R_d,K_pt,R_d.T])+0.5*np.linalg.multi_dot([R_e,K_pt,R_e.T])])
def get_K_Pt_ddot(p_d,R_d,K_pt):
return np.array([0.5*np.linalg.multi_dot([skew(p_d-robot.endpoint_pose()['position']),R_d,K_pt,R_d.T])])
def E_quat(quat_n,quat_e):
return np.dot(quat_n,np.identity(3))-skew(quat_e)
def get_K_Po_dot(quat_n,quat_e,R_e,K_po):
return np.array([2*np.linalg.multi_dot([E_quat(quat_n,quat_e).T,R_e,K_po,R_e.T])])
def get_h_delta(K_pt_dot,K_pt_ddot,p_delta,K_po_dot,quat_e):
f_delta_t = np.array([np.dot(K_pt_dot,p_delta)])
m_delta_t = np.array([np.dot(K_pt_ddot,p_delta)])
null = np.zeros((3,1))
m_delta_o = np.array([np.dot(K_po_dot,quat_e)])
return np.array([np.append(f_delta_t.T,m_delta_t.T)]).T + np.array([np.append(null.T,m_delta_o.T)]).T
def perform_torque_DeSchutter(M, B, K, x_d_ddot, x_d_dot,x_dot, p_d, Rot_d): # must include Rot_d
J = robot.jacobian()
Rot_e = robot.endpoint_pose()['orientation_R']
Rot_e_bigdim = from_three_to_six_dim(Rot_e)
Rot_e_dot = np.dot(skew(robot.endpoint_velocity()['angular']),Rot_e) #not a 100 % sure about this one
Rot_e_dot_bigdim = from_three_to_six_dim(Rot_e_dot)
quat = quaternion.from_rotation_matrix(np.dot(Rot_e.T,Rot_d)) #orientational displacement represented as a unit quaternion
#quat = robot.endpoint_pose()['orientation']
quat_e_e = np.array([quat.x,quat.y,quat.z]) # vector part of the unit quaternion in the frame of the end effector
quat_e = np.dot(Rot_e.T,quat_e_e) # ... in the base frame
quat_n = quat.w
p_delta = p_d-robot.endpoint_pose()['position']
K_Pt_dot = get_K_Pt_dot(Rot_d,K[:3,:3],Rot_e)
K_Pt_ddot = get_K_Pt_ddot(p_d,Rot_d,K[:3,:3])
K_Po_dot = get_K_Po_dot(quat_n,quat_e,Rot_e,K[3:,3:])
h_delta_e = np.array(np.dot(Rot_e_bigdim,get_h_delta(K_Pt_dot,K_Pt_ddot,p_delta,K_Po_dot,quat_e))).reshape([6,1])
h_e = get_F_ext(two_dim=True)
h_e_e = np.array(np.dot(Rot_e_bigdim,h_e))
a_d_e = np.dot(Rot_e_bigdim,x_d_ddot).reshape([6,1])
v_d_e = np.dot(Rot_e_bigdim,x_d_dot).reshape([6,1])
alpha_e = a_d_e + np.dot(np.linalg.inv(M),(np.dot(B,v_d_e.reshape([6,1])-np.dot(Rot_e_bigdim,x_dot).reshape([6,1]))+h_delta_e-h_e_e)).reshape([6,1])
alpha = np.dot(Rot_e_bigdim.T,alpha_e).reshape([6,1])+np.dot(Rot_e_dot_bigdim.T,np.dot(Rot_e_bigdim,x_dot)).reshape([6,1])
torque = np.linalg.multi_dot([J.T,get_W(inv=True),alpha]).reshape((7,1)) + np.array(robot.coriolis_comp().reshape((7,1))) + np.dot(J.T,h_e).reshape((7,1))
robot.set_joint_torques(dict(list(zip(robot.joint_names(),torque))))
"""
TESTING AREA
"""
# -------------- Plotting ------------------------
def plot_result(v_num, v,p,p_d, delta_x, F_ext,F_d, z_dynamics,M,B,K, T):
time_array = np.arange(len(p[0]))*T
plt.subplot(211)
plt.title("External force")
plt.plot(time_array, F_ext[2], label="force z [N]")
plt.plot(time_array, F_d[2], label="desired force z [N]", color='b',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(212)
plt.title("Position")
plt.plot(time_array, p[0,:], label = "true x [m]")
plt.plot(time_array, p[1,:], label = "true y [m]")
plt.plot(time_array, p[2,:], label = "true z [m]")
plt.plot(time_array, p_d[0,:], label = "desired x [m]", color='b',linestyle='dashed')
plt.plot(time_array, p_d[1,:], label = "desired y [m]", color='C1',linestyle='dashed')
plt.plot(time_array, p_d[2,:], label = "desired z [m]", color='g',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
"""
plt.subplot(233)
plt.title("Orientation error in Euler")
plt.plot(time_array, delta_x[3]*(180/np.pi), label = "error Ori_x [degrees]")
plt.plot(time_array, delta_x[4]*(180/np.pi), label = "error Ori_y [degrees]")
plt.plot(time_array, delta_x[5]*(180/np.pi), label = "error Ori_z [degrees]")
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(234)
plt.title("Adaptive dynamics along the z-axis")
plt.plot(time_array, z_dynamics[0], label = "inertia (M_z)")
plt.plot(time_array, z_dynamics[1], label = "damping (B_z)")
plt.plot(time_array, z_dynamics[2], label = "stiffness (K_z)")
plt.axhline(y=M[2][2], label = "initial inertia (M_z)", color='b',linestyle='dashed')
plt.axhline(y=B[2][2], label = "initial damping (B_z)", color='C1',linestyle='dashed')
plt.axhline(y=K[2][2], label = "initial stiffness (K_z)", color='g',linestyle='dashed')
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(235)
plt.title("velocity read from rostopic")
plt.plot(time_array, v[0], label = "vel x")
plt.plot(time_array, v[1], label = "vel y")
plt.plot(time_array, v[2], label = "vel z")
plt.plot(time_array, v[3], label = "ang x")
plt.plot(time_array, v[4], label = "ang y")
plt.plot(time_array, v[5], label = "ang z")
plt.xlabel("Real time [s]")
plt.legend()
plt.subplot(236)
plt.title("numerically calculated velocity")
plt.plot(time_array, v_num[0], label = "vel x")
plt.plot(time_array, v_num[1], label = "vel y")
plt.plot(time_array, v_num[2], label = "vel z")
plt.plot(time_array, v_num[3], label = "ang x")
plt.plot(time_array, v_num[4], label = "ang y")
plt.plot(time_array, v_num[5], label = "ang z")
plt.xlabel("Real time [s]")
plt.legend()
"""
plt.show()
if __name__ == "__main__":
# ---------- Initialization -------------------
rospy.init_node("impedance_control")
robot = PandaArm()
publish_rate = 250
rate = rospy.Rate(publish_rate)
T = 0.001*(1000/publish_rate)
max_num_it = int(duration /T)
#robot.move_to_joint_positions(new_start)
robot.move_to_neutral()
# List used to contain data needed for calculation of the torque output
lam = np.zeros(18)
v_history = np.zeros((6,max_num_it))
# Lists providing data for plotting
p_history = np.zeros((3,max_num_it))
v_history_num = np.zeros((6,max_num_it))
x_history = np.zeros((6,max_num_it))
delta_x_history = np.zeros((6,max_num_it))
F_ext_history = np.zeros((6,max_num_it))
z_dynamics_history = np.zeros((3,max_num_it))
# Specify the desired behaviour of the robot
x_d_ddot, x_d_dot, p_d = generate_desired_trajectory_tc(max_num_it,T,move_in_x = True)
goal_ori = np.asarray(robot.endpoint_pose()['orientation']) # goal orientation = current (initial) orientation [remains the same the entire duration of the run]
Rot_d = robot.endpoint_pose()['orientation_R'] # used by the DeSchutter implementation
F_d = generate_F_d_tc(max_num_it,T)
# ----------- The control loop -----------
for i in range(max_num_it):
# update state-lists
p_history[:,i] = get_p()
x_history[:,i] = get_x(goal_ori)
delta_x_history[:,i] = get_delta_x(goal_ori,p_d[:,i])
F_ext_history[:,i] = get_F_ext()
x_dot = get_x_dot(x_history,i,T, numerically=False) #chose 'numerically' either 'True' or 'False'
v_history_num[:,i] = get_x_dot(x_history,i,T, numerically=True) # only for plotting
v_history[:,i] = get_x_dot(x_history,i,T) # for calculating error in acceleration
# adapt M,B and K
xi = get_xi(goal_ori, p_d[:,i],x_dot, x_d_dot[:,i], x_d_ddot[:,i], v_history, i, T)
lam = lam.reshape([18,1]) + get_lambda_dot(gamma,xi,K_v,P,F_d[:,i]).reshape([18,1])*T
M_hat,B_hat,K_hat = update_MBK_hat(lam,M,B,K)
# Apply the resulting torque to the robot
"""CHOOSE ONE OF THE TWO CONTROLLERS BELOW"""
perform_torque_Huang1992(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], goal_ori)
#perform_torque_DeSchutter(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], Rot_d)
rate.sleep()
# plotting and printing
z_dynamics_history[0][i]=M_hat[2][2]
z_dynamics_history[1][i]=B_hat[2][2]
z_dynamics_history[2][i]=K_hat[2][2]
# Live printing to screen when the controller is running
if i%100 == 0:
print(i,'/',max_num_it,' = ',T*i,' [s] ) Force in z: ',F_ext_history[2,i])
print(K_hat[2][2])
print('')
#Uncomment the block below to save plotting-data
"""
np.save('VIC_p_d.npy',p_d)
np.save('VIC_p.npy',p_history)
np.save('VIC_Fz_d.npy',F_d)
np.save('VIC_Fz.npy',F_ext_history[2])
np.save('VIC_delta_x.npy',delta_x_history) #orientation error in radians
np.save('VIC_adaptive_gains.npy',z_dynamics_history)
"""
plot_result(v_history_num,v_history, p_history, p_d, delta_x_history, F_ext_history, F_d, z_dynamics_history,M,B,K, T)
| 2.625 | 3 |
tests/migrations/0010_modeltest_datetime_field1.py | intellineers/django-bridger | 2 | 4893 | # Generated by Django 2.2.9 on 2020-01-28 14:50
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("tests", "0009_auto_20200113_1239"),
]
operations = [
migrations.AddField(
model_name="modeltest",
name="datetime_field1",
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| 1.75 | 2 |
pycmap/common.py | mdashkezari/pycmap | 4 | 4894 | <reponame>mdashkezari/pycmap
"""
Author: <NAME> <<EMAIL>>
Date: 2019-06-28
Function: Host a collection of shared multi-purpose helper functions.
"""
import os
import sys
from tqdm import tqdm
from colorama import Fore, Back, Style, init
import numpy as np
import pandas as pd
import webbrowser
import IPython
MAX_ROWS = 2000000
MAX_SAMPLE_SOURCE = 500000
def halt(msg):
"""Prints an error message and terminates the program."""
msg = '\n' + msg
init(convert=True)
print(Fore.RED + msg, file=sys.stderr)
print(Style.RESET_ALL, end='')
sys.exit(1)
return
def print_tqdm(msg, err=False):
"""Print helper function compatible with tqdmm progressbar."""
# init()
msg = '\n' + msg
if err:
tqdm.write(Fore.RED + msg)
else:
tqdm.write(msg)
tqdm.write(Style.RESET_ALL, end='')
return
def get_base_url():
"""Returns API root endpoint."""
return os.environ.get(
'CMAP_API_BASE_URL', 'https://simonscmap.com').rstrip('/')
def jupytered():
"""Returns True if jupyter notebook has invoked the package."""
jup = False
import __main__ as main
if not hasattr(main, '__file__'):
jup = True
return jup
def inline():
"""
Checks if the package results should get prepared for an "inline" context.
Currently, just calls the jupytered function.
"""
return jupytered()
def make_filename_by_table_var(table, variable, prefix=''):
"""Generate a filename (without extention) using table and variable names."""
if prefix != '': prefix += '_'
return prefix + variable + '_' + table
def canvas_rect(dw, dh):
"""Resizes a canvas dimensions so that it better fits on client browser."""
ar = dw / dh
h = 400 if ar > 3 else 500
w_min = 300
w_max = 1000
w = int(ar * h)
if w > w_max: w = w_max
if w < w_min: w = w_min
return w, h
def get_data_limits(data, quant=0.05):
"""Returns low and high quantile limits of a numeric array."""
data = np.array(data).flatten()
return np.nanquantile(data, quant), np.nanquantile(data, 1-quant)
# def get_token(token=None):
# token = token or os.environ.get('CMAP_API_KEY')
# if token in [None, '']:
# halt('API Key must be specified to access CMAP API')
# return token
def config_path():
"""Returns the path to the config spreadsheet file."""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.csv')
def initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir):
"""Creates a .csv file hosting the primary project configs """
if vizEngine is None: vizEngine = 'plotly'
if exportDir is None: exportDir = './export/'
if exportFormat is None: exportFormat = '.csv'
if figureDir is None: figureDir = './figure/'
config = {
'token': [token],
'vizEngine': [vizEngine],
'exportDir': [exportDir],
'exportFormat': [exportFormat],
'figureDir': [figureDir]
}
pd.DataFrame(config).to_csv(config_path(), index=False)
return
def remove_angle_brackets(token):
"""Removes angle brackets at start and end of the token, if exist."""
if token is not None:
if token[0] == '<': token = token[1:]
if token[-1] == '>': token = token[:-1]
return token
def save_config(token=None, vizEngine=None, exportDir=None, exportFormat=None, figureDir=None):
"""Updates the project's configs at the config spreadsheet."""
configPath = config_path()
if not os.path.isfile(configPath):
initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir)
df = pd.read_csv(configPath)
if token is not None:
df['token'] = remove_angle_brackets(token)
if vizEngine is not None:
supportedVizEngines = ['bokeh', 'plotly']
if vizEngine not in supportedVizEngines:
halt('%s is not a supported visualization library' % vizEngine)
df['vizEngine'] = vizEngine
if exportDir is not None:
df['exportDir'] = exportDir
if exportFormat is not None:
df['exportFormat'] = exportFormat
if figureDir is not None:
df['figureDir'] = figureDir
df.to_csv(configPath, index=False)
return
def load_config():
"""Loads the config spreadsheet and returns it as a dataframe."""
configPath = config_path()
if not os.path.isfile(configPath):
msg = '\nAPI key not found!\n'
msg = msg + 'Please pass the API key using the following code:\n'
msg = msg + 'import pycmap\n'
msg = msg + 'pycmap.API(<api_key>)\n'
halt(msg)
return pd.read_csv(configPath)
def get_token():
"""Returns the API key."""
return remove_angle_brackets(load_config()['token'][0])
def get_vizEngine():
"""Returns the visualization library name."""
return load_config()['vizEngine'][0]
def get_export_dir():
"""Returns the path to the export directory."""
return load_config()['exportDir'][0]
def get_export_format():
"""Returns the file format of the exported files."""
return load_config()['exportFormat'][0]
def get_figure_dir():
"""Returns the path to the figure directory."""
return load_config()['figureDir'][0]
def get_bokeh_tools():
"""Returns a list tools used along with a bokeh graph."""
return 'crosshair,pan,zoom_in,wheel_zoom,zoom_out,box_zoom,reset,save'
def normalize(vals, min_max=False):
"""Takes an array and either normalize to min/max, standardize it (remove the mean and divide by standard deviation)."""
if min_max:
normalized_vals=(vals-np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals))
else:
normalized_vals=(vals-np.nanmean(vals))/np.nanstd(vals)
return normalized_vals
def open_HTML(path):
"""Display HTML file by defaut browser or inline in case jupyter is the caller."""
if jupytered():
vObj = IPython.display.IFrame(path, width=800, height=400)
IPython.display.display(vObj)
else:
path = 'file://' + os.path.realpath(path)
webbrowser.open(path, new=2)
return
| 2.4375 | 2 |
tests/test_device.py | michaelwoods/home-assistant-cli | 0 | 4895 | """Testing Device operations."""
import json
import unittest.mock as mock
from click.testing import CliRunner
import homeassistant_cli.cli as cli
def test_device_list(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 23
def test_device_list_filter(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list", "table"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 2
assert data[0]['name'] == "Kitchen table left"
assert data[1]['name'] == "Kitchen table right"
def test_device_assign(default_areas, default_devices) -> None:
"""Test basic device assign."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
with mock.patch(
'homeassistant_cli.remote.get_areas', return_value=default_areas
):
with mock.patch(
'homeassistant_cli.remote.assign_area',
return_value={'success': True},
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["device", "assign", "Kitchen", "Kitchen table left"],
catch_exceptions=False,
)
print(result.output)
assert result.exit_code == 0
expected = (
"Successfully assigned 'Kitchen'"
" to 'Kitchen table left'\n"
)
assert result.output == expected
| 2.796875 | 3 |
widgets/tree_item.py | tarsa129/j3d-animation-editor | 6 | 4896 | <filename>widgets/tree_item.py<gh_stars>1-10
from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
import animations.general_animation as j3d
from widgets.yaz0 import compress, compress_slow, compress_fast
from io import BytesIO
class tree_item(QTreeWidgetItem):
def __init__(self, parent):
QTreeWidgetItem.__init__(self, parent,1000)
self.display_info = []
self.filepath = ""
self.compressed = 1
self.bmd_file = None
self.sound_data = None
self.changed = False
def set_values(self, display_info, filepath, compressed ):
self.display_info = display_info
self.filepath = filepath.replace("|", ".")
self.compressed = compressed
forward_i = filepath.rfind("/") + 1
backwad_i = filepath.rfind("\\") + 1
self.setText(0, self.filepath[max(forward_i, backwad_i):])
def set_sound(self, sound_data):
self.sound_data = sound_data
if sound_data is not None:
icon = QIcon("icons/sound.png")
self.setIcon(0, icon)
else:
self.setIcon(0, QIcon() )
def save_animation(self, other_filepath = "", compress_dis = 1, save_all = False):
if save_all and not self.changed:
print("skipping " + self.filepath + " because nothing has changed")
return
if other_filepath != "":
working_filepath = other_filepath
else:
working_filepath = self.filepath
if (working_filepath.endswith("a") and not working_filepath.endswith(".bva") ):
info = j3d.fix_array( self.display_info)
self.convert_to_a(info)
else:
info = j3d.fix_array( self.display_info)
j3d.sort_filepath(working_filepath, info, self.sound_data)
compress_status = self.compressed
if compress_dis != 0:
compress_status = compress_dis
print(compress_status)
if compress_status > 1:
out = BytesIO()
with open(working_filepath, "rb") as f:
if compress_status == 2:
out = compress_fast(f)
elif compress_status == 3:
out = compress(f)
elif compress_status == 4:
out = compress_slow(f)
with open(working_filepath, "wb") as f:
f.write(out.getbuffer())
self.changed = False
def convert_to_k(self):
filepath = self.filepath[:-1] + "k"
info = j3d.fix_array(self.display_info)
if self.filepath.endswith(".bca"):
bck = j3d.sort_filepath(filepath, info)
elif filepath.endswith(".bla"):
blk = j3d.sort_filepath(filepath, info)
def convert_to_a(self, info):
info = j3d.fix_array( info )
if self.filepath.endswith(".bck") or self.filepath.endswith(".bca"):
bca = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving
filepath = self.filepath[:-1] + "a"
with open(filepath, "wb") as f:
bca.write_bca(f)
f.close()
elif self.filepath.endswith(".blk") or self.filepath.endswith(".bla"):
bla = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving
filepath = self.filepath[:-1] + "a"
with open(filepath, "wb") as f:
bla.write_bla(f)
f.close()
def export_anim(self):
info = j3d.fix_array(self.display_info)
filepath = self.filepath[0:-4] + ".anim"
if self.bmd_file is None:
bmd_file, choosentype = QFileDialog.getOpenFileName( None, "Open File","" , "Model files (*.bmd *.bdl)")
if bmd_file:
bck = j3d.export_anim(filepath, info, bmd_file)
else:
bck = j3d.export_anim(filepath, info, self.bmd_file)
def add_children(self, strings):
self.takeChildren()
for name in strings:
child = QTreeWidgetItem(self)
child.setText(0, name)
child.setDisabled(True) | 2.515625 | 3 |
Wheels.py | edhosken/WheelsSong | 0 | 4897 | <gh_stars>0
#Create the pre-defined song values and empty variables...Correct names not used so each starting letter would be unique
numbers = (1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 )
letters = ['a ','b ','c ','d ','e ','f ','g ','h ','i ','j ','k ','l ','m ','n ','o ','p ','q ','r ']
roman = ['I ', 'II ', 'III ', 'IV ', 'V ', 'VI ', 'VII ', 'VIII ', 'IX ', 'X ', 'XI ', 'XII ', 'XIII ', 'XIV ', 'XV ', 'XVI ', 'XVII ', 'XVIII']
military = ['alpha ', 'bravo ', 'charlie ', 'delta ', 'echo ', 'foxtrot ', 'golf ', 'hotel ', 'india ', 'juliet ', 'kilo ', 'lima ', 'mike ', 'november ', 'oscar ', 'papa ', 'quebec ', 'romeo ']
german = ['eins', 'zwei', 'drei', 'vier', 'fünf', 'sechs', 'sieben', 'acht', 'neun', 'zehn', 'elf', 'zwölf', 'dreizehn', 'vierzehn', 'fünfzehn', 'sechzehn', 'siebzehn', 'achtzehn']
pi = ['3 ','point ','1 ','4 ','1 ','5 ','9 ','2 ','6 ','5 ','3 ','5 ','8 ','9 ','7 ','9 ','3 ','2 ']
##Build morse code sequences
t = 'dot'
s = 'dash'
m1 = t, s, s, s, s
m2 = t, t, s, s, s
m3 = t, t, t, s, s
m4 = t, t, t, t, s
m5 = t, t, t, t, t
m6 = s, t, t, t, t
m7 = s, s, t, t, t
m8 = s, s, s, t, t
m9 = s, s, s, s, t
m0 = s, s, s, s, s
code = [m1, m2, m3, m4, m5, m6, m7, m8, m9, m1 + m0, m1 + m1, m1 + m2, m1 + m3, m1 + m4, m1 + m5, m1 + m6, m1 + m7, m1 + m8]
##Other ideas: piglatin, japanese, spanish, prime, tau, e, ...
##NEED TO ADD INVALID ENTRY CATCHES
print("Hello, let's sing a song that everybody loves!\n")
sing = 'y'
while sing == 'y':
user = []
variation = input ("Please input what variation you wish to perform be entering 'numbers', 'letters', 'roman', 'military', 'pi', 'german', 'code', or 'user' to make your own song: \n").lower().strip()
##Seeming silly switching of strings to list types
if variation == "numbers" or variation == "n":
variation = numbers
elif variation == "letters" or variation == "l":
variation = letters
elif variation == "roman" or variation == "r":
variation = roman
elif variation == "military" or variation == "m":
variation = military
elif variation == "pi" or variation == "p":
variation = pi
elif variation == "german" or variation == "g":
variation = german
elif variation == "code" or variation == "c":
variation = code
elif variation == "user" or variation == "u":
while len(user) < 18:
user.append(input ("Enter a word: "))
#User input to select the song pattern
pattern = input ("\nNow please tell me what pattern to use by entering 'forward', 'backward', 'even', or 'odd':\n")
print ("\nHere we go: \n\n")
#Asemble the song...IMPROVE FORMAT SO OUTPUT IS EASIER TO READ
song1 = "Oh, there are "
song2 = " wheels on a big rig truck!"
a = song1, variation[::], song2
b = song1, variation[::-1], song2
c = song1, variation[::2], song2
d = song1, variation[1::2], song2
##Use pattern.startswith()?...Also, might be better to seperate forward/backward and even/odd choices.
if pattern == 'forward' or pattern == 'f':
print (a)
elif pattern == 'backward' or pattern == 'b':
print (b)
elif pattern == 'odd' or pattern == 'o':
print (c)
elif pattern == 'even' or pattern == 'e':
print (d)
sing = input('\n\nWould you like to sing it again? (y/n) ').lower()
## This is the end of the while loop
else:
print ("\nOK, Goodbye!")
| 2.640625 | 3 |
tests/test_config.py | dfroger/conda | 0 | 4898 | # (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
import os
from os.path import dirname, join, exists
import unittest
import pytest
import conda.config as config
from conda.utils import get_yaml
from conda.compat import iterkeys
from tests.helpers import run_conda_command
yaml = get_yaml()
# use condarc from source tree to run these tests against
config.rc_path = join(dirname(__file__), 'condarc')
def _get_default_urls():
return ['http://repo.continuum.io/pkgs/free',
'http://repo.continuum.io/pkgs/pro']
config.get_default_urls = _get_default_urls
# unset CIO_TEST. This is a Continuum-internal variable that draws packages from an internal server instead of
# repo.continuum.io
try:
del os.environ['CIO_TEST']
except KeyError:
pass
class TestConfig(unittest.TestCase):
# These tests are mostly to ensure API stability
def __init__(self, *args, **kwargs):
config.rc = config.load_condarc(config.rc_path)
# Otherwise normalization tests will fail if the user is logged into
# binstar.
config.rc['add_binstar_token'] = False
super(TestConfig, self).__init__(*args, **kwargs)
def test_globals(self):
self.assertTrue(config.root_dir)
self.assertTrue(config.pkgs_dirs)
self.assertTrue(config.envs_dirs)
self.assertTrue(config.default_prefix)
self.assertTrue(config.platform)
self.assertTrue(config.subdir)
self.assertTrue(config.arch_name)
self.assertTrue(config.bits in (32, 64))
def test_pkgs_dir_from_envs_dir(self):
root_dir = config.root_dir
root_pkgs = join(root_dir, 'pkgs')
for pi, po in [
(join(root_dir, 'envs'), root_pkgs),
('/usr/local/foo/envs' if config.platform != 'win' else 'C:\envs',
'/usr/local/foo/envs/.pkgs' if config.platform != 'win' else 'C:\envs\.pkgs'),
]:
self.assertEqual(config.pkgs_dir_from_envs_dir(pi), po)
def test_proxy_settings(self):
self.assertEqual(config.get_proxy_servers(),
{'http': 'http://user:[email protected]:8080',
'https': 'https://user:[email protected]:8080'})
def test_normalize_urls(self):
current_platform = config.subdir
assert config.DEFAULT_CHANNEL_ALIAS == 'https://conda.anaconda.org/'
assert config.rc.get('channel_alias') == 'https://your.repo/'
for channel in iterkeys(config.normalize_urls(['defaults', 'system',
'https://anaconda.org/username', 'file:///Users/username/repo',
'username'])):
assert (channel.endswith('/%s/' % current_platform) or
channel.endswith('/noarch/'))
self.assertEqual(config.normalize_urls([
'defaults', 'system', 'https://conda.anaconda.org/username',
'file:///Users/username/repo', 'username'
], 'osx-64'),
{'file:///Users/username/repo/noarch/': ('file:///Users/username/repo', 6),
'file:///Users/username/repo/osx-64/': ('file:///Users/username/repo', 6),
'http://repo.continuum.io/pkgs/free/noarch/': (None, 1),
'http://repo.continuum.io/pkgs/free/osx-64/': (None, 1),
'http://repo.continuum.io/pkgs/pro/noarch/': (None, 1),
'http://repo.continuum.io/pkgs/pro/osx-64/': (None, 1),
'http://some.custom/channel/noarch/': ('http://some.custom/channel', 3),
'http://some.custom/channel/osx-64/': ('http://some.custom/channel', 3),
'https://conda.anaconda.org/username/noarch/': ('https://conda.anaconda.org/username', 5),
'https://conda.anaconda.org/username/osx-64/': ('https://conda.anaconda.org/username', 5),
'https://your.repo/binstar_username/noarch/': ('binstar_username', 2),
'https://your.repo/binstar_username/osx-64/': ('binstar_username', 2),
'https://your.repo/username/noarch/': ('username', 7),
'https://your.repo/username/osx-64/': ('username', 7)})
test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc')
def _read_test_condarc():
with open(test_condarc) as f:
return f.read()
# Tests for the conda config command
# FIXME This shoiuld be multiple individual tests
@pytest.mark.slow
def test_config_command_basics():
try:
# Test that creating the file adds the defaults channel
assert not os.path.exists('test_condarc')
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# When defaults is explicitly given, it should not be added
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test', '--add', 'channels', 'defaults')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- defaults
- test
"""
os.unlink(test_condarc)
# Duplicate keys should not be added twice
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == stderr == ''
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
assert stdout == ''
assert stderr == "Skipping channels: test, item already exists"
assert _read_test_condarc() == """\
channels:
- test
- defaults
"""
os.unlink(test_condarc)
# Test creating a new file with --set
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'true')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
always_yes: true
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_get():
try:
# Test --get
with open(test_condarc, 'w') as f:
f.write("""\
channels:
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: no
always_yes: true
invalid_key: yes
channel_alias: http://alpha.conda.anaconda.org
""")
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes True
--set changeps1 no
--set channel_alias http://alpha.conda.anaconda.org
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'\
"""
assert stderr == "unknown key invalid_key"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'channels')
assert stdout == """\
--add channels 'defaults'
--add channels 'test'\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1')
assert stdout == """\
--set changeps1 no\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'changeps1', 'channels')
assert stdout == """\
--set changeps1 no
--add channels 'defaults'
--add channels 'test'\
"""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'allow_softlinks')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'track_features')
assert stdout == ""
assert stderr == ""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'invalid_key')
assert stdout == ""
assert "invalid choice: 'invalid_key'" in stderr
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--get', 'not_valid_key')
assert stdout == ""
assert "invalid choice: 'not_valid_key'" in stderr
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_parser():
try:
# Now test the YAML "parser"
# Channels is normal content.
# create_default_packages has extra spaces in list items
condarc = """\
channels:
- test
- defaults
create_default_packages :
- ipython
- numpy
changeps1: false
# Here is a comment
always_yes: yes
"""
# First verify that this itself is valid YAML
assert yaml.load(condarc, Loader=yaml.RoundTripLoader) == {'channels': ['test', 'defaults'],
'create_default_packages': ['ipython', 'numpy'], 'changeps1':
False, 'always_yes': 'yes'}
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--get')
assert stdout == """\
--set always_yes yes
--set changeps1 False
--add channels 'defaults'
--add channels 'test'
--add create_default_packages 'numpy'
--add create_default_packages 'ipython'\
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'mychannel')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- mychannel
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: false
# Here is a comment
always_yes: 'yes'
"""
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'changeps1', 'true')
assert stdout == stderr == ''
assert _read_test_condarc() == """\
channels:
- mychannel
- test
- defaults
create_default_packages:
- ipython
- numpy
changeps1: true
# Here is a comment
always_yes: 'yes'
"""
os.unlink(test_condarc)
# Test adding a new list key. We couldn't test this above because it
# doesn't work yet with odd whitespace
condarc = """\
channels:
- test
- defaults
always_yes: true
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'disallow', 'perl')
assert stdout == stderr == ''
assert _read_test_condarc() == condarc + """\
disallow:
- perl
"""
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_remove_force():
try:
# Finally, test --remove, --remove-key
run_conda_command('config', '--file', test_condarc, '--add',
'channels', 'test')
run_conda_command('config', '--file', test_condarc, '--set',
'always_yes', 'true')
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults'],
'always_yes': True}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'channels', 'test', '--force')
assert stdout == ''
assert stderr == "Error: 'test' is not in the 'channels' key of the config file"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove', 'disallow', 'python', '--force')
assert stdout == ''
assert stderr == "Error: key 'disallow' is not in the config file"
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == stderr == ''
assert yaml.load(_read_test_condarc(), Loader=yaml.RoundTripLoader) == {'channels': ['defaults']}
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--remove-key', 'always_yes', '--force')
assert stdout == ''
assert stderr == "Error: key 'always_yes' is not in the config file"
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
# FIXME Break into multiple tests
@pytest.mark.slow
def test_config_command_bad_args():
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--add',
'notarealkey', 'test')
assert stdout == ''
assert not exists(test_condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc, '--set',
'notarealkey', 'yes')
assert stdout == ''
assert not exists(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_invalid_rc():
# Some tests for unexpected input in the condarc, like keys that are the
# wrong type
try:
condarc = """\
channels:
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--add', 'channels', 'test')
assert stdout == ''
assert stderr == """\
Error: Could not parse the yaml file. Use -f to use the
yaml parser (this will remove any structure or comments from the existing
.condarc file). Reason: key 'channels' should be a list, not NoneType."""
assert _read_test_condarc() == condarc
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_set():
# Test the config set command
# Make sure it accepts only boolean values for boolean keys and any value for string keys
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'yep')
assert stdout == ''
assert stderr == 'Error: Key: always_yes; yep is not a YAML boolean.'
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
def test_set_rc_string():
# Test setting string keys in .condarc
# We specifically test ssl_verify since it can be either a boolean or a string
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'yes')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'yes'
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'test_string.crt')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'test_string.crt'
os.unlink(test_condarc)
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
| 1.882813 | 2 |
malaya/transformers/babble.py | ahmed3991/malaya | 1 | 4899 | <filename>malaya/transformers/babble.py
# Bert has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model,
# by <NAME>, <NAME>, NeuralGen 2019
# https://colab.research.google.com/drive/1MxKZGtQ9SSBjTK5ArsZ5LKhkztzg52RV
# https://arxiv.org/abs/1902.04094
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import math
from malaya.text.bpe import merge_sentencepiece_tokens, merge_wordpiece_tokens
CLS = '[CLS]'
SEP = '[SEP]'
MASK = '[MASK]'
def topk_distributions(logits, top_k):
with tf.InteractiveSession().as_default():
logits = tf.convert_to_tensor(logits)
kth_vals, kth_idx = tf.nn.top_k(logits, k = top_k)
dist = tfp.distributions.categorical.Categorical(logits = kth_vals)
idx = tf.gather(
kth_idx, tf.expand_dims(dist.sample(), -1), batch_dims = 1
)
idx = tf.squeeze(idx, axis = -1)
return idx.eval()
def distributions(logits):
with tf.InteractiveSession().as_default():
logits = tf.convert_to_tensor(logits)
dist = tfp.distributions.categorical.Categorical(logits = logits)
return dist.sample().eval()
def generate_step(
logits,
gen_idx,
top_k = 0,
temperature = 1.0,
sample = False,
return_list = True,
):
logits = logits[:, gen_idx]
logits = logits / temperature
if top_k > 0:
idx = topk_distributions(logits, top_k)
elif sample:
idx = distributions(logits)
else:
idx = np.argmax(logits, axis = -1)
return idx.tolist() if return_list else idx
def tokenize_batch(batch, tokenizer):
return [tokenizer.convert_tokens_to_ids(sent) for sent in batch]
def untokenize_batch(batch, tokenizer):
return [tokenizer.convert_ids_to_tokens(sent) for sent in batch]
def get_init_text(seed_text, max_len, tokenizer, batch_size = 1):
batch = [seed_text + [MASK] * max_len + [SEP] for _ in range(batch_size)]
return tokenize_batch(batch, tokenizer)
def sequential_generation(
seed_text,
model,
batch_size = 5,
max_len = 15,
leed_out_len = 1,
temperature = 1.0,
top_k = 100,
burnin = 20,
):
mask_id = model._tokenizer.vocab['[MASK]']
sep_id = model._tokenizer.vocab['[SEP]']
seed_text = model._tokenizer.tokenize(seed_text)
seed_len = len(seed_text)
batch = get_init_text(
seed_text, max_len, model._tokenizer, batch_size = batch_size
)
for ii in range(max_len):
inp = [sent[: seed_len + ii] + [sep_id] for sent in batch]
batch = np.array(batch)
masks = np.ones(batch.shape)
segments = np.zeros(batch.shape)
out = model._sess.run(
model._logits,
feed_dict = {
model.X: batch,
model.MASK: masks,
model.segment_ids: segments,
},
)
topk = top_k if (ii >= burnin) else 0
idxs = generate_step(
out,
gen_idx = seed_len + ii,
top_k = topk,
temperature = temperature,
sample = (ii < burnin),
)
for jj in range(batch_size):
batch[jj][seed_len + ii] = idxs[jj]
results = untokenize_batch(batch.tolist(), model._tokenizer)
if hasattr(model._tokenizer, 'sp_model'):
merge_function = merge_sentencepiece_tokens
else:
merge_function = merge_wordpiece_tokens
outputs = []
for r in results:
r = [(t, 0) for t in r]
r = merge_function(r)
r = [t[0] for t in r]
outputs.append(' '.join(r))
return outputs
| 2.46875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.