code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
try:
import bitly_api # pip install bitly_api
except ImportError:
raise ImportError(
"Can't load BitlyPlugin, since the bitly_api python module isn't installed.\n"
"To install it, run:\n"
" pip install bitly_api"
)
# use oauth2 endpoints
c = bitly_api.Connection(access_token=settings.BITLY_ACCESS_TOKEN)
response = c.shorten(uri=long_url)
short_url = response['url']
self.say("Shorten URL: %s" % short_url, message=message) | def say_bitly_short_url(self, message, long_url=None) | bitly ___: Shorten long_url using bitly service. | 3.786613 | 3.745586 | 1.010953 |
parsed_time = self.parse_natural_time(remind_time)
natural_datetime = self.to_natural_day_and_time(parsed_time)
if to_string:
formatted_to_string = to_string
else:
formatted_to_string = ""
formatted_reminder_text = "%(mention_handle)s, you asked me to remind you%(to_string)s %(reminder_text)s" % {
"mention_handle": message.sender.mention_handle,
"from_handle": message.sender.handle,
"reminder_text": reminder_text,
"to_string": formatted_to_string,
}
self.schedule_say(formatted_reminder_text, parsed_time, message=message, notify=True)
self.say("%(reminder_text)s %(natural_datetime)s. Got it." % locals(), message=message) | def remind_me_at(self, message, reminder_text=None, remind_time=None, to_string="") | remind me to ___ at ___: Set a reminder for a thing, at a time. | 3.415346 | 3.410506 | 1.001419 |
logging.debug("Publishing topic (%s): \n%s" % (topic, obj))
e = Event(
data=obj,
type=topic,
)
if hasattr(obj, "sender"):
e.sender = obj.sender
if reference_message:
original_incoming_event_hash = None
if hasattr(reference_message, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.original_incoming_event_hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "hash"):
original_incoming_event_hash = reference_message.source.hash
elif hasattr(reference_message, "source") and hasattr(reference_message.source, "original_incoming_event_hash"):
original_incoming_event_hash = reference_message.source.original_incoming_event_hash
elif hasattr(reference_message, "hash"):
original_incoming_event_hash = reference_message.hash
if original_incoming_event_hash:
e.original_incoming_event_hash = original_incoming_event_hash
return self.publish_to_backend(
self._localize_topic(topic),
self.encrypt(e)
) | def publish(self, topic, obj, reference_message=None) | Sends an object out over the pubsub connection, properly formatted,
and conforming to the protocol. Handles pickling for the wire, etc.
This method should *not* be subclassed. | 2.258269 | 2.253966 | 1.001909 |
try:
m = self.get_from_backend()
if m and m["type"] not in SKIP_TYPES:
return self.decrypt(m["data"])
except AttributeError:
raise Exception("Tried to call get message without having subscribed first!")
except (KeyboardInterrupt, SystemExit):
pass
except:
logging.critical("Error in watching pubsub get message: \n%s" % traceback.format_exc())
return None | def get_message(self) | Gets the latest object from the backend, and handles unpickling
and validation. | 6.65688 | 6.352087 | 1.047983 |
"play a word game: Play a game where you think of words that start with a letter and fit a topic."
letter = random.choice(string.ascii_uppercase)
topics = []
while len(topics) < 10:
new_topic = random.choice(WORD_GAME_TOPICS)
if new_topic not in topics:
topics.append({
"index": len(topics) + 1,
"topic": new_topic
})
context = {
"letter": letter,
"topics": topics
}
self.say(rendered_template("word_game.html", context), message=message) | def word_game_round(self, message) | play a word game: Play a game where you think of words that start with a letter and fit a topic. | 4.402431 | 2.779119 | 1.58411 |
contacts = self.load("contact_info", {})
contacts[message.sender.handle] = {
"info": contact_info,
"name": message.sender.name,
}
self.save("contact_info", contacts)
self.say("Got it.", message=message) | def set_my_info(self, message, contact_info="") | set my contact info to ____: Set your emergency contact info. | 4.102886 | 4.05104 | 1.012798 |
contacts = self.load("contact_info", {})
context = {
"contacts": contacts,
}
contact_html = rendered_template("contact_info.html", context)
self.say(contact_html, message=message) | def respond_to_contact_info(self, message) | contact info: Show everyone's emergency contact info. | 5.655221 | 5.335521 | 1.059919 |
r = requests.get("http://www.chatoms.com/chatom.json?Normal=1&Fun=2&Philosophy=3&Out+There=4")
data = r.json()
self.set_topic(data["text"], message=message) | def give_us_somethin_to_talk_about(self, message) | new topic: set the room topic to a random conversation starter. | 12.796915 | 11.377954 | 1.124711 |
context = {"rooms": self.available_rooms.values(), }
self.say(rendered_template("rooms.html", context), message=message, html=True) | def list_rooms(self, message) | what are the rooms?: List all the rooms I know about. | 9.0138 | 8.948852 | 1.007258 |
room = self.get_room_from_message(message)
context = {"participants": room.participants, }
self.say(rendered_template("participants.html", context), message=message, html=True) | def participants_in_room(self, message) | who is in this room?: List all the participants of this room. | 5.675966 | 5.752838 | 0.986637 |
# help_data = self.load("help_files")
selected_modules = help_modules = self.load("help_modules")
self.say("Sure thing, %s." % message.sender.handle)
help_text = "Here's what I know how to do:"
if plugin and plugin in help_modules:
help_text = "Here's what I know how to do about %s:" % plugin
selected_modules = dict()
selected_modules[plugin] = help_modules[plugin]
for k in sorted(selected_modules, key=lambda x: x[0]):
help_data = selected_modules[k]
if help_data:
help_text += "<br/><br/><b>%s</b>:" % k
for line in help_data:
if line:
if ":" in line:
line = " <b>%s</b>%s" % (line[:line.find(":")], line[line.find(":"):])
help_text += "<br/> %s" % line
self.say(help_text, html=True) | def help(self, message, plugin=None) | help: the normal help you're reading. | 3.405241 | 3.387643 | 1.005195 |
all_regexes = self.load("all_listener_regexes")
help_text = "Here's everything I know how to listen to:"
for r in all_regexes:
help_text += "\n%s" % r
self.say(help_text, message=message) | def help(self, message) | programmer help: Advanced programmer-y help. | 6.259197 | 6.366673 | 0.983119 |
location = get_location(place)
if location is not None:
tz = get_timezone(location.lat, location.long)
if tz is not None:
ct = datetime.datetime.now(tz=pytz.timezone(tz))
self.say("It's %(time)s in %(place)s." % {'time': self.to_natural_day_and_time(ct),
'place': location.name}, message=message)
else:
self.say("I couldn't find timezone for %(place)s." % {'place': location.name}, message=message)
else:
self.say("I couldn't find anywhere named %(place)s." % {'place': location.name}, message=message) | def what_time_is_it_in(self, message, place) | what time is it in ___: Say the time in almost any city on earth. | 2.51202 | 2.463709 | 1.019609 |
now = datetime.datetime.now()
self.say("It's %s." % self.to_natural_day_and_time(now, with_timezone=True), message=message) | def what_time_is_it(self, message) | what time is it: Say the time where I am. | 5.711086 | 5.763324 | 0.990936 |
r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20")
xmldoc = minidom.parseString(r.text)
item_list = xmldoc.getElementsByTagName("suggestion")
context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]}
self.say(rendered_template("gpoem.html", context), message, html=True) | def google_poem(self, message, topic) | make a poem about __: show a google poem about __ | 4.396176 | 4.326595 | 1.016082 |
from .edgerc import EdgeRc
if isinstance(rcinput, EdgeRc):
rc = rcinput
else:
rc = EdgeRc(rcinput)
return EdgeGridAuth(
client_token=rc.get(section, 'client_token'),
client_secret=rc.get(section, 'client_secret'),
access_token=rc.get(section, 'access_token'),
headers_to_sign=rc.getlist(section, 'headers_to_sign'),
max_body=rc.getint(section, 'max_body')
) | def from_edgerc(rcinput, section='default') | Returns an EdgeGridAuth object from the configuration from the given section of the
given edgerc file.
:param filename: path to the edgerc file
:param section: the section to use (this is the [bracketed] part of the edgerc,
default is 'default') | 2.364875 | 2.213333 | 1.068468 |
value = self.get(section, option)
if value:
return value.split(',')
else:
return None | def getlist(self, section, option) | returns the named option as a list, splitting the original value
by ',' | 3.001525 | 2.669424 | 1.124409 |
try:
result = int(strings)
except ValueError:
result = float(strings)
return result | def covstr(strings) | convert string to int or float. | 4.792839 | 3.224223 | 1.48651 |
try:
unch = sum([covstr(self.__raw[3]), covstr(self.__raw[4])]) / 2
result = {
'name': unicode(self.__raw[36].replace(' ', ''), 'cp950'),
'no': self.__raw[0],
'range': self.__raw[1], # 漲跌價
'time': self.__raw[2], # 取得時間
'max': self.__raw[3], # 漲停價
'min': self.__raw[4], # 跌停價
'unch': '%.2f' % unch, # 昨日收盤價
'pp': '%.2f' % ((covstr(self.__raw[8]) - unch) / unch * 100),
# 漲跌幅 %
'o': self.__raw[5], # 開盤價
'h': self.__raw[6], # 當日最高價
'l': self.__raw[7], # 當日最低價
'c': self.__raw[8], # 成交價/收盤價
'value': self.__raw[9], # 累計成交量
'pvalue': self.__raw[10], # 該盤成交量
'top5buy': [
(self.__raw[11], self.__raw[12]),
(self.__raw[13], self.__raw[14]),
(self.__raw[15], self.__raw[16]),
(self.__raw[17], self.__raw[18]),
(self.__raw[19], self.__raw[20])
],
'top5sell': [
(self.__raw[21], self.__raw[22]),
(self.__raw[23], self.__raw[24]),
(self.__raw[25], self.__raw[26]),
(self.__raw[27], self.__raw[28]),
(self.__raw[29], self.__raw[30])
]
}
if '-' in self.__raw[1]: # 漲跌判斷 True, False
result['ranges'] = False # price down
else:
result['ranges'] = True # price up
result['crosspic'] = ("http://chart.apis.google.com/chart?" +
"chf=bg,s,ffffff&chs=20x50&cht=ls" +
"&chd=t1:0,0,0|0,%(h)s,0|0,%(c)s,0|0,%(o)s,0|0,%(l)s,0" +
"&chds=%(l)s,%(h)s&chm=F,,1,1:4,20") % result
result['top5buy'].sort()
result['top5sell'].sort()
return result
except (IndexError, ValueError):
return False | def real(self) | Real time data
:rtype: dict
:returns:
:name: 股票名稱 Unicode
:no: 股票代碼
:range: 漲跌價
:ranges: 漲跌判斷 True, False
:time: 取得時間
:max: 漲停價
:min: 跌停價
:unch: 昨日收盤價
:pp: 漲跌幅 %
:o: 開盤價
:h: 當日最高價
:l: 當日最低價
:c: 成交價/收盤價
:value: 累計成交量
:pvalue: 該盤成交量
:top5buy: 最佳五檔買進價量資訊
:top5sell: 最佳五檔賣出價量資訊
:crosspic: K線圖 by Google Chart | 2.793689 | 1.976004 | 1.413807 |
''' Get realtime data
:rtype: dict
:returns: 代碼可以參考:http://goristock.appspot.com/API#apiweight
'''
result = self.__raw['1'].copy()
result['c'] = self.__raw['1']['value']
result['value'] = self.__raw['200']['v2']
result['date'] = self.__raw['0']['time']
return result | def real(self) | Get realtime data
:rtype: dict
:returns: 代碼可以參考:http://goristock.appspot.com/API#apiweight | 14.048004 | 4.271156 | 3.28904 |
''' Display Taiwan Time now
顯示台灣此刻時間
'''
utcnow = datetime.utcnow()
return utcnow + timedelta(hours=self.time_zone) | def now(self) | Display Taiwan Time now
顯示台灣此刻時間 | 15.07344 | 4.67554 | 3.223892 |
''' Display Taiwan date now
顯示台灣此刻日期
'''
utcnow = datetime.utcnow()
return (utcnow + timedelta(hours=self.time_zone)).date() | def date(self) | Display Taiwan date now
顯示台灣此刻日期 | 16.669323 | 5.076414 | 3.283681 |
''' nextday: 下一個日期
:rtype: datetime
:returns: 下一個預設時間日期
'''
nextday = self.__zero.date() + timedelta(days=1)
return datetime.combine(nextday, time()) | def nextday(self) | nextday: 下一個日期
:rtype: datetime
:returns: 下一個預設時間日期 | 8.206224 | 4.661338 | 1.760487 |
''' exptime: 下一個日期時間
:returns: 下一個預設時間
'''
return self.nextday + timedelta(hours=self.__hour - 8,
minutes=self.__minutes) | def exptime(self) | exptime: 下一個日期時間
:returns: 下一個預設時間 | 17.755081 | 7.494107 | 2.369206 |
return self.data.check_moving_average_bias_ratio(
self.data.moving_average_bias_ratio(3, 6)[0],
positive_or_negative=positive_or_negative)[0] | def bias_ratio(self, positive_or_negative=False) | 判斷乖離
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False | 6.46772 | 7.695741 | 0.840428 |
result = self.data.value[-1] > self.data.value[-2] and \
self.data.price[-1] > self.data.openprice[-1]
return result | def best_buy_1(self) | 量大收紅
:rtype: bool | 5.979632 | 6.111675 | 0.978395 |
result = self.data.value[-1] < self.data.value[-2] and \
self.data.price[-1] > self.data.price[-2]
return result | def best_buy_2(self) | 量縮價不跌
:rtype: bool | 4.566998 | 4.193806 | 1.088986 |
result = self.data.value[-1] > self.data.value[-2] and \
self.data.price[-1] < self.data.openprice[-1]
return result | def best_sell_1(self) | 量大收黑
:rtype: bool | 5.865978 | 6.37634 | 0.91996 |
result = self.data.value[-1] < self.data.value[-2] and \
self.data.price[-1] < self.data.price[-2]
return result | def best_sell_2(self) | 量縮價跌
:rtype: bool | 4.349423 | 4.216473 | 1.031531 |
result = []
if self.check_mins_bias_ratio() and \
(self.best_buy_1() or self.best_buy_2() or self.best_buy_3() or \
self.best_buy_4()):
if self.best_buy_1():
result.append(self.best_buy_1.__doc__.strip().decode('utf-8'))
if self.best_buy_2():
result.append(self.best_buy_2.__doc__.strip().decode('utf-8'))
if self.best_buy_3():
result.append(self.best_buy_3.__doc__.strip().decode('utf-8'))
if self.best_buy_4():
result.append(self.best_buy_4.__doc__.strip().decode('utf-8'))
result = ', '.join(result)
else:
result = False
return result | def best_four_point_to_buy(self) | 判斷是否為四大買點
:rtype: str or False | 1.971616 | 1.892079 | 1.042037 |
''' import data from csv '''
csv_path = os.path.join(os.path.dirname(__file__), self.stock_no_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
for i in csv_data:
try:
result[i[0]] = str(i[1]).decode('utf-8')
except ValueError:
if i[0] == 'UPDATE':
self.last_update = str(i[1]).decode('utf-8')
else:
pass
return result | def importcsv(self) | import data from csv | 3.235234 | 3.250178 | 0.995402 |
''' import industry_code '''
csv_path = os.path.join(os.path.dirname(__file__),
self.industry_code_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
for i in csv_data:
result[i[0]] = i[1].decode('utf-8')
return result | def __industry_code(self) | import industry_code | 3.419703 | 3.047496 | 1.122135 |
''' import industry comps '''
csv_path = os.path.join(os.path.dirname(__file__), self.stock_no_files)
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
check_words = re.compile(r'^[\d]{2,}[\w]?')
for i in csv_data:
if check_words.match(i[2]):
try:
result[i[2]].append(i[0].decode('utf-8'))
except (ValueError, KeyError):
try:
result[i[2]] = [i[0].decode('utf-8')]
except KeyError:
pass
return result | def __loadindcomps(self) | import industry comps | 3.674583 | 3.377038 | 1.088108 |
pattern = re.compile(name)
result = {}
for i in self.__allstockno:
query = re.search(pattern, self.__allstockno[i])
if query:
query.group()
result[i] = self.__allstockno[i]
return result | def search(self, name) | 搜尋股票名稱 by unicode
:param str name: 欲搜尋的字串
:rtype: dict | 4.110298 | 4.008659 | 1.025355 |
pattern = re.compile(str(no))
result = {}
for i in self.__allstockno:
query = re.search(pattern, str(i))
if query:
query.group()
result[i] = self.__allstockno[i]
return result | def searchbyno(self, no) | 搜尋股票代碼
:param str no: 欲搜尋的字串
:rtype: dict | 4.142098 | 4.257506 | 0.972893 |
code_list = self.industry_code
stock_comps_list = {}
for i in code_list:
if len(i) == 2 and i.isdigit():
stock_comps_list.update({i: code_list[i]})
return stock_comps_list | def get_stock_comps_list(self) | 回傳日常交易的類別代碼與名稱
:rtype: dict
.. versionadded:: 0.5.6 | 3.67744 | 3.806528 | 0.966088 |
all_stock = self.all_stock
industry_comps = self.industry_comps
result = {}
for comps_no in self.get_stock_comps_list():
if comps_no in industry_comps:
for stock_no in industry_comps[comps_no]:
result.update({stock_no: all_stock[stock_no]})
return result | def get_stock_list(self) | 回傳日常交易的代碼與名稱
:rtype: dict
.. versionadded:: 0.5.6 | 3.402242 | 3.464309 | 0.982084 |
''' 指定日期
:param datetime time: 欲判斷的日期
:rtype: bool
:returns: True 為開市、False 為休市
'''
if type(time) == type(TWTime().now):
self.twtime = TWTime().now
elif type(time) == type(TWTime().date):
self.twtime = TWTime().date
else:
pass
return self.caldata(time) | def d_day(self, time) | 指定日期
:param datetime time: 欲判斷的日期
:rtype: bool
:returns: True 為開市、False 為休市 | 5.807332 | 3.384517 | 1.715853 |
''' 載入檔案
檔案依據 http://www.twse.com.tw/ch/trading/trading_days.php
'''
csv_path = os.path.join(os.path.dirname(__file__), 'opendate.csv')
with open(csv_path) as csv_file:
csv_data = csv.reader(csv_file)
result = {}
result['close'] = []
result['open'] = []
for i in csv_data:
if i[1] == '0': # 0 = 休市
result['close'].append(datetime.strptime(i[0],
'%Y/%m/%d').date())
elif i[1] == '1': # 1 = 開市
result['open'].append(datetime.strptime(i[0],
'%Y/%m/%d').date())
else:
pass
return result | def __loaddate() | 載入檔案
檔案依據 http://www.twse.com.tw/ch/trading/trading_days.php | 2.738099 | 2.069422 | 1.323123 |
''' Market open or not.
:param datetime time: 欲判斷的日期
:rtype: bool
:returns: True 為開市、False 為休市
'''
if time.date() in self.__ocdate['close']: # 判對是否為法定休市
return False
elif time.date() in self.__ocdate['open']: # 判對是否為法定開市
return True
else:
if time.weekday() <= 4: # 判斷是否為平常日開市
return True
else:
return False | def caldata(self, time) | Market open or not.
:param datetime time: 欲判斷的日期
:rtype: bool
:returns: True 為開市、False 為休市 | 5.325161 | 3.097889 | 1.718964 |
result = ()
self.__get_mons = month
self.__get_no = stock_no
self._twse = twse
for i in range(month):
nowdatetime = datetime.today() - relativedelta(months=i)
tolist = self.to_list(self.fetch_data(stock_no, nowdatetime))
result = tolist + result
return tuple(result) | def serial_fetch(self, stock_no, month, twse=None) | 串接每月資料 舊→新
:param str stock_no: 股票代碼
:param int month: 擷取 n 個月的資料
:param bool twse: 指定是否為上市資料
:rtype: tuple | 6.096169 | 5.206575 | 1.17086 |
tolist = []
for i in csv_file:
i = [value.strip().replace(',', '') for value in i]
try:
for value in (1, 2, 3, 4, 5, 6, 8):
i[value] = float(i[value])
except (IndexError, ValueError):
pass
tolist.append(i)
if self._twse:
if tolist:
_stock_info = tolist[0][0].split(' ')[1].strip()
self.__info = (_stock_info[:4],
_stock_info[4:].decode('utf-8'))
self.__raw_rows_name = tolist[1]
return tuple(tolist[2:])
return tuple([])
else:
if len(tolist) > 6:
self.__raw_rows_name = tolist[4]
self.__info = (self.__get_no, OTCNo().all_stock[self.__get_no])
if len(tolist[5:]) > 1:
return tuple(tolist[5:-1])
return tuple([]) | def to_list(self, csv_file) | 串接每日資料 舊→新
:param csv csv_file: csv files
:rtype: list | 4.269334 | 4.304681 | 0.991789 |
result = []
exist_mons = self.__get_mons
oldraw = list(self.__raw_data)
for i in range(month):
nowdatetime = datetime.today() - relativedelta(months=exist_mons) -\
relativedelta(months=i)
tolist = self.to_list(self.fetch_data(self.__info[0], nowdatetime))
result = list(tolist) + result
result = result + oldraw
self.__get_mons = exist_mons + month
return tuple(result) | def plus_mons(self, month) | 增加 n 個月的資料
:param int month: 增加 n 個月的資料
:rtype: tuple | 6.233823 | 5.582201 | 1.116732 |
url = (
'/ch/stock/aftertrading/' +
'daily_trading_info/st43_download.php?d=%(year)d/%(mon)02d&' +
'stkno=%(stock)s&r=%(rand)s') % {
'year': nowdatetime.year - 1911,
'mon': nowdatetime.month,
'stock': stock_no,
'rand': random.randrange(1, 1000000)}
logging.info(url)
result = GRETAI_CONNECTIONS.urlopen('GET', url)
csv_files = csv.reader(StringIO(result.data))
self.__url.append(GRETAI_HOST + url)
return csv_files | def fetch_data(self, stock_no, nowdatetime) | Fetch data from gretai.org.tw(OTC)
return list.
從 gretai.org.tw 下載資料,回傳格式為 csv.reader
0. 日期
1. 成交股數
2. 成交金額
3. 開盤價
4. 最高價(續)
5. 最低價
6. 收盤價
7. 漲跌價差
8. 成交筆數
:param str stock_no: 股票代碼
:param datetime nowdatetime: 此刻時間
:rtype: list | 5.973102 | 5.017695 | 1.190407 |
result = TWSE_CONNECTIONS.request('POST',
'/ch/trading/exchange/STOCK_DAY/STOCK_DAYMAIN.php',
fields={'download': 'csv',
'query_year': nowdatetime.year,
'query_month': nowdatetime.month,
'CO_ID': stock_no})
_de = result.data.decode('cp950', 'ignore')
csv_files = csv.reader(StringIO(_de.encode('utf-8')))
return csv_files | def fetch_data(self, stock_no, nowdatetime) | Fetch data from twse.com.tw
return list.
從 twse.com.tw 下載資料,回傳格式為 csv.reader
0. 日期
1. 成交股數
2. 成交金額
3. 開盤價
4. 最高價(續)
5. 最低價
6. 收盤價
7. 漲跌價差
8. 成交筆數
:param str stock_no: 股票代碼
:param datetime nowdatetime: 此刻時間
:rtype: list | 7.274076 | 6.429587 | 1.131344 |
with open(fpath, 'w') as csv_file:
output = csv.writer(csv_file)
output.writerows(self.__raw_data) | def out_putfile(self, fpath) | 輸出成 CSV 檔
:param path fpath: 檔案輸出位置
.. todo:: files output using `with` syntax. | 3.439693 | 3.707994 | 0.927642 |
result = (float(i[rows]) for i in self.__raw_data)
return list(result) | def __serial_price(self, rows=6) | 取出某一價格序列 *(舊→新)*
預設序列收盤價 *(self.__serial_price(6))*
:rtype: list
:returns: 預設序列收盤價 *(self.__serial_price(6))* | 8.220769 | 10.122379 | 0.812138 |
cal_data = self.__serial_price(row)
result = []
for dummy in range(len(cal_data) - int(date) + 1):
result.append(round(sum(cal_data[-date:]) / date, 2))
cal_data.pop()
result.reverse()
cont = self.__cal_continue(result)
return result, cont | def __calculate_moving_average(self, date, row) | 計算移動平均數
:param int row: 收盤價(6)、成交股數(1)
:rtype: tuple (序列 舊→新, 持續天數) | 5.661951 | 5.43746 | 1.041286 |
diff_data = []
for i in range(1, len(list_data)):
if list_data[-i] > list_data[-i - 1]:
diff_data.append(1)
else:
diff_data.append(-1)
cont = 0
for value in diff_data:
if value == diff_data[0]:
cont += 1
else:
break
return cont * diff_data[0] | def __cal_continue(cls, list_data) | 計算持續天數
:rtype: int
:returns: 向量數值:正數向上、負數向下。 | 2.449049 | 2.37486 | 1.031239 |
val, conti = self.__calculate_moving_average(date, 1)
val = (round(i / 1000, 3) for i in val)
return list(val), conti | def moving_average_value(self, date) | 計算 n 日成交股數均量與持續天數
:param int date: n 日
:rtype: tuple (序列 舊→新, 持續天數) | 6.882304 | 6.23839 | 1.103218 |
data1 = self.moving_average(date1)[0]
data2 = self.moving_average(date2)[0]
cal_list = []
for i in range(1, min(len(data1), len(data2)) + 1):
cal_list.append(data1[-i] - data2[-i])
cal_list.reverse()
cont = self.__cal_continue(cal_list)
return cal_list, cont | def moving_average_bias_ratio(self, date1, date2) | 計算乖離率(均價)
date1 - date2
:param int data1: n 日
:param int data2: m 日
:rtype: tuple (序列 舊→新, 持續天數) | 2.925115 | 2.768462 | 1.056585 |
val = (round(i / 1000, 3) for i in self.__serial_price(1))
return list(val) | def value(self) | 成交量序列(張)
:rtype: list | 13.767309 | 14.011478 | 0.982574 |
sample_data = data[-sample:]
if positive_or_negative: # 正
ckvalue = max(sample_data) # 尋找最大值
preckvalue = max(sample_data) > 0 # 區間最大值必須為正
else:
ckvalue = min(sample_data) # 尋找最小值
preckvalue = max(sample_data) < 0 # 區間最大值必須為負
return (sample - sample_data.index(ckvalue) < 4 and \
sample_data.index(ckvalue) != sample - 1 and preckvalue,
sample - sample_data.index(ckvalue) - 1,
ckvalue) | def __cal_ma_bias_ratio_point(cls, data, sample=5,
positive_or_negative=False) | 判斷轉折點位置
:param list data: 計算資料
:param int sample: 計算的區間樣本數量
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False
:rtype: tuple
:returns: (True or False, 第幾個轉折日, 轉折點值) | 3.741451 | 3.229313 | 1.15859 |
return cls.__cal_ma_bias_ratio_point(data, sample,
positive_or_negative) | def check_moving_average_bias_ratio(cls, data, sample=5,
positive_or_negative=False) | 判斷正負乖離轉折點位置
:param list data: 計算資料
:param int sample: 計算的區間樣本數量
:param bool positive_or_negative: 正乖離 為 True,負乖離 為 False
:rtype: tuple
:returns: (True or False, 第幾個轉折日, 轉折點值) | 7.013767 | 7.081176 | 0.990481 |
# In case @flaky is applied to a function or class without arguments
# (and without parentheses), max_runs will refer to the wrapped object.
# In this case, the default value can be used.
wrapped = None
if hasattr(max_runs, '__call__'):
wrapped, max_runs = max_runs, None
attrib = default_flaky_attributes(max_runs, min_passes, rerun_filter)
def wrapper(wrapped_object):
for name, value in attrib.items():
setattr(wrapped_object, name, value)
return wrapped_object
return wrapper(wrapped) if wrapped is not None else wrapper | def flaky(max_runs=None, min_passes=None, rerun_filter=None) | Decorator used to mark a test as "flaky". When used in conjuction with
the flaky nosetests plugin, will cause the decorated test to be retried
until min_passes successes are achieved out of up to max_runs test runs.
:param max_runs:
The maximum number of times the decorated test will be run.
:type max_runs:
`int`
:param min_passes:
The minimum number of times the test must pass to be a success.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
Function signature is as follows:
(err, name, test, plugin) -> should_rerun
- err (`tuple` of `class`, :class:`Exception`, `traceback`):
Information about the test failure (from sys.exc_info())
- name (`unicode`):
The test name
- test (:class:`nose.case.Test` or :class:`Function`):
The test that has raised an error
- plugin (:class:`FlakyNosePlugin` or :class:`FlakyPytestPlugin`):
The flaky plugin. Has a :prop:`stream` that can be written to in
order to add to the Flaky Report.
:type rerun_filter:
`callable`
:return:
A wrapper function that includes attributes describing the flaky test.
:rtype:
`callable` | 4.389218 | 5.049659 | 0.869211 |
# pylint:disable=dangerous-default-value
super(FlakyPlugin, self).options(parser, env=env)
self.add_report_option(parser.add_option)
group = OptionGroup(
parser, "Force flaky", "Force all tests to be flaky.")
self.add_force_flaky_options(group.add_option)
parser.add_option_group(group) | def options(self, parser, env=os.environ) | Base class override.
Add options to the nose argument parser. | 4.214503 | 3.668193 | 1.148932 |
if multiprocess:
from flaky.multiprocess_string_io import MultiprocessingStringIO
return MultiprocessingStringIO()
return self._stream | def _get_stream(self, multiprocess=False) | Get the stream used to store the flaky report.
If this nose run is going to use the multiprocess plugin, then use
a multiprocess-list backed StringIO proxy; otherwise, use the default
stream.
:param multiprocess:
Whether or not this test run is configured for multiprocessing.
:type multiprocess:
`bool`
:return:
The stream to use for storing the flaky report.
:rtype:
:class:`StringIO` or :class:`MultiprocessingStringIO` | 10.049207 | 4.909749 | 2.046786 |
super(FlakyPlugin, self).configure(options, conf)
if not self.enabled:
return
is_multiprocess = int(getattr(options, 'multiprocess_workers', 0)) > 0
self._stream = self._get_stream(is_multiprocess)
self._flaky_result = TextTestResult(self._stream, [], 0)
self._flaky_report = options.flaky_report
self._flaky_success_report = options.flaky_success_report
self._force_flaky = options.force_flaky
self._max_runs = options.max_runs
self._min_passes = options.min_passes | def configure(self, options, conf) | Base class override. | 3.517383 | 3.430332 | 1.025377 |
# pylint:disable=invalid-name
want_error = self._handle_test_error_or_failure(test, err)
if not want_error and id(test) in self._tests_that_reran:
self._nose_result.addError(test, err)
return want_error or None | def handleError(self, test, err) | Baseclass override. Called when a test raises an exception.
If the test isn't going to be rerun again, then report the error
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool` | 6.620077 | 5.921855 | 1.117906 |
# pylint:disable=invalid-name
want_failure = self._handle_test_error_or_failure(test, err)
if not want_failure and id(test) in self._tests_that_reran:
self._nose_result.addFailure(test, err)
return want_failure or None | def handleFailure(self, test, err) | Baseclass override. Called when a test fails.
If the test isn't going to be rerun again, then report the failure
to the nose test result.
:param test:
The test that has raised an error
:type test:
:class:`nose.case.Test`
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool` | 6.646254 | 6.32267 | 1.051178 |
# pylint:disable=invalid-name
will_handle = self._handle_test_success(test)
test_id = id(test)
# If this isn't a rerun, the builtin reporter is going to report it as a success
if will_handle and test_id not in self._tests_that_reran:
self._tests_that_have_been_reported.add(test_id)
# If this test hasn't already been reported as successful, then do it now
if not will_handle and test_id in self._tests_that_reran and test_id not in self._tests_that_have_been_reported:
self._nose_result.addSuccess(test)
return will_handle or None | def addSuccess(self, test) | Baseclass override. Called when a test succeeds.
Count remaining retries and compare with number of required successes
that have not yet been achieved; retry if necessary.
Returning True from this method keeps the test runner from reporting
the test as a success; this way we can retry and only report as a
success if we have achieved the required number of successes.
:param test:
The test that has succeeded
:type test:
:class:`nose.case.Test`
:return:
True, if the test will be rerun; False, if nose should handle it.
:rtype:
`bool` | 4.454724 | 4.341053 | 1.026185 |
if max_runs is None:
max_runs = 2
if min_passes is None:
min_passes = 1
if min_passes <= 0:
raise ValueError('min_passes must be positive')
if max_runs < min_passes:
raise ValueError('min_passes cannot be greater than max_runs!')
return {
FlakyNames.MAX_RUNS: max_runs,
FlakyNames.MIN_PASSES: min_passes,
FlakyNames.CURRENT_RUNS: 0,
FlakyNames.CURRENT_PASSES: 0,
FlakyNames.RERUN_FILTER: FilterWrapper(rerun_filter or _true),
} | def default_flaky_attributes(max_runs=None, min_passes=None, rerun_filter=None) | Returns the default flaky attributes to set on a flaky test.
:param max_runs:
The value of the FlakyNames.MAX_RUNS attribute to use.
:type max_runs:
`int`
:param min_passes:
The value of the FlakyNames.MIN_PASSES attribute to use.
:type min_passes:
`int`
:param rerun_filter:
Filter function to decide whether a test should be rerun if it fails.
:type rerun_filter:
`callable`
:return:
Default flaky attributes to set on a flaky test.
:rtype:
`dict` | 2.259625 | 2.117369 | 1.067186 |
try:
return unicode_type(obj)
except UnicodeDecodeError:
if hasattr(obj, 'decode'):
return obj.decode('utf-8', 'replace')
return str(obj).decode('utf-8', 'replace') | def ensure_unicode_string(obj) | Return a unicode string representation of the given obj.
:param obj:
The obj we want to represent in unicode
:type obj:
varies
:rtype:
`unicode` | 2.159175 | 2.568435 | 0.840658 |
min_passes = flaky[FlakyNames.MIN_PASSES]
current_passes = flaky[FlakyNames.CURRENT_PASSES]
message = self._failure_message.format(
current_passes,
min_passes,
)
self._log_test_failure(name, err, message) | def _report_final_failure(self, err, flaky, name) | Report that the test has failed too many times to pass at
least min_passes times.
By default, this means that the test has failed twice.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode` | 4.21933 | 4.796548 | 0.87966 |
max_runs = flaky[FlakyNames.MAX_RUNS]
runs_left = max_runs - flaky[FlakyNames.CURRENT_RUNS]
message = self._retry_failure_message.format(
runs_left,
max_runs,
)
self._log_test_failure(name, err, message) | def _log_intermediate_failure(self, err, flaky, name) | Report that the test has failed, but still has reruns left.
Then rerun the test.
:param err:
Information about the test failure (from sys.exc_info())
:type err:
`tuple` of `class`, :class:`Exception`, `traceback`
:param flaky:
Dictionary of flaky attributes
:type flaky:
`dict` of `unicode` to varies
:param name:
The test name
:type name:
`unicode` | 4.278793 | 5.01651 | 0.852942 |
add_option(
'--no-flaky-report',
action='store_false',
dest='flaky_report',
default=True,
help="Suppress the report at the end of the "
"run detailing flaky test results.",
)
add_option(
'--no-success-flaky-report',
action='store_false',
dest='flaky_success_report',
default=True,
help="Suppress reporting flaky test successes"
"in the report at the end of the "
"run detailing flaky test results.",
) | def add_report_option(add_option) | Add an option to the test runner to suppress the flaky report.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable` | 2.982555 | 2.725495 | 1.094317 |
add_option(
'--force-flaky',
action="store_true",
dest="force_flaky",
default=False,
help="If this option is specified, we will treat all tests as "
"flaky."
)
add_option(
'--max-runs',
action="store",
dest="max_runs",
type=int,
default=2,
help="If --force-flaky is specified, we will run each test at "
"most this many times (unless the test has its own flaky "
"decorator)."
)
add_option(
'--min-passes',
action="store",
dest="min_passes",
type=int,
default=1,
help="If --force-flaky is specified, we will run each test at "
"least this many times (unless the test has its own flaky "
"decorator)."
) | def add_force_flaky_options(add_option) | Add options to the test runner that force all tests to be flaky.
:param add_option:
A function that can add an option to the test runner.
Its argspec should equal that of argparse.add_option.
:type add_option:
`callable` | 2.046641 | 2.076722 | 0.985515 |
value = self._stream.getvalue()
# If everything succeeded and --no-success-flaky-report is specified
# don't print anything.
if not self._flaky_success_report and not value:
return
stream.write('===Flaky Test Report===\n\n')
# Python 2 will write to the stderr stream as a byte string, whereas
# Python 3 will write to the stream as text. Only encode into a byte
# string if the write tries to encode it first and raises a
# UnicodeEncodeError.
try:
stream.write(value)
except UnicodeEncodeError:
stream.write(value.encode('utf-8', 'replace'))
stream.write('\n===End Flaky Test Report===\n') | def _add_flaky_report(self, stream) | Baseclass override. Write details about flaky tests to the test report.
:param stream:
The test stream to which the report can be written.
:type stream:
`file` | 4.53799 | 4.861109 | 0.93353 |
test_callable = cls._get_test_callable(test)
if test_callable is None:
return
for attr, value in cls._get_flaky_attributes(test_class).items():
already_set = hasattr(test, attr)
if already_set:
continue
attr_on_callable = getattr(test_callable, attr, None)
if attr_on_callable is not None:
cls._set_flaky_attribute(test, attr, attr_on_callable)
elif value is not None:
cls._set_flaky_attribute(test, attr, value) | def _copy_flaky_attributes(cls, test, test_class) | Copy flaky attributes from the test callable or class to the test.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` | 2.216294 | 2.300292 | 0.963484 |
cls._set_flaky_attribute(test_item, flaky_attribute, cls._get_flaky_attribute(test_item, flaky_attribute) + 1) | def _increment_flaky_attribute(cls, test_item, flaky_attribute) | Increments the value of an attribute on a flaky test.
:param test_item:
The test callable on which to set the attribute
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:param flaky_attribute:
The name of the attribute to set
:type flaky_attribute:
`unicode` | 2.344461 | 2.950759 | 0.794528 |
current_runs = cls._get_flaky_attribute(test, FlakyNames.CURRENT_RUNS)
return current_runs is not None | def _has_flaky_attributes(cls, test) | Returns True if the test callable in question is marked as flaky.
:param test:
The test that is being prepared to run
:type test:
:class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`bool` | 7.631846 | 9.442776 | 0.808221 |
return {
attr: cls._get_flaky_attribute(
test_item,
attr,
) for attr in FlakyNames()
} | def _get_flaky_attributes(cls, test_item) | Get all the flaky related attributes from the test.
:param test_item:
The test callable from which to get the flaky related attributes.
:type test_item:
`callable` or :class:`nose.case.Test` or :class:`Function`
:return:
:rtype:
`dict` of `unicode` to varies | 6.422382 | 6.758067 | 0.950328 |
try:
urlopen(Dataset.base_url, timeout=1)
# if an index page is ever added, this will pass through
return True
except HTTPError:
# There's no index for BASE_URL so Error 404 is expected
return True
except URLError:
# This is raised if there is no internet connection
return False | def connection_ok() | Check web connection.
Returns True if web connection is OK, False otherwise. | 8.909438 | 8.723415 | 1.021325 |
def filepath(*args):
return abspath(join(dirname(__file__), '..', 'vega_datasets', *args))
dataset_listing = {}
for name in DATASETS_TO_DOWNLOAD:
data = Dataset(name)
url = data.url
filename = filepath('_data', data.filename)
print("retrieving data {0} -> {1}".format(url, filename))
urlretrieve(url, filename)
dataset_listing[name] = '_data/{0}'.format(data.filename)
with open(filepath('local_datasets.json'), 'w') as f:
json.dump(dataset_listing, f, indent=2, sort_keys=True) | def _download_datasets() | Utility to download datasets into package source | 3.523629 | 3.440773 | 1.024081 |
def load_json(path):
raw = pkgutil.get_data('vega_datasets', path)
return json.loads(bytes_decode(raw))
info = load_json('datasets.json')
descriptions = load_json('dataset_info.json')
local_datasets = load_json('local_datasets.json')
for name in info:
info[name]['is_local'] = (name in local_datasets)
for name in descriptions:
info[name].update(descriptions[name])
return info | def _load_dataset_info() | This loads dataset info from three package files:
vega_datasets/datasets.json
vega_datasets/dataset_info.json
vega_datasets/local_datasets.json
It returns a dictionary with dataset information. | 3.37284 | 2.685683 | 1.255859 |
clsdict = {subcls.name: subcls for subcls in cls.__subclasses__()
if hasattr(subcls, 'name')}
return clsdict.get(name, cls)(name) | def init(cls, name) | Return an instance of this class or an appropriate subclass | 4.574991 | 4.082142 | 1.120733 |
info = cls._dataset_info.get(name, None)
if info is None:
raise ValueError('No such dataset {0} exists, '
'use list_datasets() to get a list '
'of available datasets.'.format(name))
return info | def _infodict(cls, name) | load the info dictionary for the given name | 3.71498 | 3.593904 | 1.033689 |
if use_local and self.is_local:
return pkgutil.get_data('vega_datasets', self.pkg_filename)
else:
return urlopen(self.url).read() | def raw(self, use_local=True) | Load the raw dataset from remote URL or local file
Parameters
----------
use_local : boolean
If True (default), then attempt to load the dataset locally. If
False or if the dataset is not available locally, then load the
data from an external URL. | 4.719867 | 5.592462 | 0.843969 |
chromedriver_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'chromedriver_binary')
chromedriver_filename = find_binary_in_path(get_chromedriver_filename())
if chromedriver_filename:
print("\nChromedriver already installed at {}...\n".format(chromedriver_filename))
new_filename = os.path.join(chromedriver_dir, get_chromedriver_filename())
self.copy_file(chromedriver_filename, new_filename)
else:
chromedriver_bin = get_chromedriver_filename()
chromedriver_filename = os.path.join(chromedriver_dir, chromedriver_bin)
if not os.path.isfile(chromedriver_filename):
print("\nDownloading Chromedriver...\n")
if not os.path.isdir(chromedriver_dir):
os.mkdir(chromedriver_dir)
url = get_chromedriver_url()
try:
response = urlopen(url)
if response.getcode() != 200:
raise URLError('Not Found')
except URLError:
raise RuntimeError('Failed to download chromedriver archive: {}'.format(url))
archive = BytesIO(response.read())
with zipfile.ZipFile(archive) as zip_file:
zip_file.extract(chromedriver_bin, chromedriver_dir)
else:
print("\nChromedriver already installed at {}...\n".format(chromedriver_filename))
if not os.access(chromedriver_filename, os.X_OK):
os.chmod(chromedriver_filename, 0o744)
build_py.run(self) | def run(self) | Downloads, unzips and installs chromedriver.
If a chromedriver binary is found in PATH it will be copied, otherwise downloaded. | 1.99813 | 1.871705 | 1.067545 |
chromedriver_dir = os.path.abspath(os.path.dirname(__file__))
if 'PATH' not in os.environ:
os.environ['PATH'] = chromedriver_dir
elif chromedriver_dir not in os.environ['PATH']:
os.environ['PATH'] += utils.get_variable_separator()+chromedriver_dir | def add_chromedriver_to_path() | Appends the directory of the chromedriver binary file to PATH. | 2.161273 | 2.039767 | 1.059568 |
base_url = 'https://chromedriver.storage.googleapis.com/'
if sys.platform.startswith('linux') and sys.maxsize > 2 ** 32:
platform = 'linux'
architecture = '64'
elif sys.platform == 'darwin':
platform = 'mac'
architecture = '64'
elif sys.platform.startswith('win'):
platform = 'win'
architecture = '32'
else:
raise RuntimeError('Could not determine chromedriver download URL for this platform.')
return base_url + version + '/chromedriver_' + platform + architecture + '.zip' | def get_chromedriver_url(version='74.0.3729.6') | Generates the download URL for current platform , architecture and the given version. Default version is 74.0.3729.6.
Supports Linux, MacOS and Windows.
:param version: chromedriver version string, default '74.0.3729.6'
:return: Download URL for chromedriver | 1.921213 | 1.856651 | 1.034774 |
if 'PATH' not in os.environ:
return None
for directory in os.environ['PATH'].split(get_variable_separator()):
binary = os.path.abspath(os.path.join(directory, filename))
if os.path.isfile(binary) and os.access(binary, os.X_OK):
return binary
return None | def find_binary_in_path(filename) | Searches for a binary named `filename` in the current PATH. If an executable is found, its absolute path is returned
else None.
:param filename: Filename of the binary
:return: Absolute path or None | 2.051547 | 2.197419 | 0.933617 |
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
log.info("close session: %s", self.id)
self.state = STATE_CLOSING
if exc is not None:
self.exception = exc
self.interrupted = True
try:
await self.handler(SockjsMessage(MSG_CLOSE, exc), self)
except Exception:
log.exception("Exception in close handler.") | async def _remote_close(self, exc=None) | close session from remote. | 4.211224 | 3.844002 | 1.095531 |
assert isinstance(msg, str), "String is required"
if self._debug:
log.info("outgoing message: %s, %s", self.id, str(msg)[:200])
if self.state != STATE_OPEN:
return
self._feed(FRAME_MESSAGE, msg) | def send(self, msg) | send message to client. | 6.501092 | 6.346093 | 1.024424 |
if self._debug:
log.info("outgoing message: %s, %s", self.id, frm[:200])
if self.state != STATE_OPEN:
return
self._feed(FRAME_MESSAGE_BLOB, frm) | def send_frame(self, frm) | send message frame to client. | 8.814685 | 7.963869 | 1.106834 |
if self.state in (STATE_CLOSING, STATE_CLOSED):
return
if self._debug:
log.debug("close session: %s", self.id)
self.state = STATE_CLOSING
self._feed(FRAME_CLOSE, (code, reason)) | def close(self, code=3000, reason="Go away!") | close session | 4.258619 | 4.168181 | 1.021697 |
for session in list(self.values()):
if session.state != STATE_CLOSED:
await session._remote_closed()
self.sessions.clear()
super(SessionManager, self).clear() | async def clear(self) | Manually expire all sessions in the pool. | 7.565271 | 6.326344 | 1.195836 |
timestamp = int(time.time()) * 1000
tke = cls(timestamp = timestamp,
# Alias must be lower case or it will corrupt the keystore for Java Keytool and Keytool Explorer
alias = alias.lower(),
cert = cert)
return tke | def new(cls, alias, cert) | Helper function to create a new TrustedCertEntry.
:param str alias: The alias for the Trusted Cert Entry
:param str certs: The certificate, as a byte string.
:returns: A loaded :class:`TrustedCertEntry` instance, ready
to be placed in a keystore. | 12.817807 | 13.891168 | 0.922731 |
timestamp = int(time.time()) * 1000
cert_chain = []
for cert in certs:
cert_chain.append(('X.509', cert))
pke = cls(timestamp = timestamp,
# Alias must be lower case or it will corrupt the keystore for Java Keytool and Keytool Explorer
alias = alias.lower(),
cert_chain = cert_chain)
if key_format == 'pkcs8':
private_key_info = decoder.decode(key, asn1Spec=rfc5208.PrivateKeyInfo())[0]
pke._algorithm_oid = private_key_info['privateKeyAlgorithm']['algorithm'].asTuple()
pke.pkey = private_key_info['privateKey'].asOctets()
pke.pkey_pkcs8 = key
elif key_format == 'rsa_raw':
pke._algorithm_oid = RSA_ENCRYPTION_OID
# We must encode it to pkcs8
private_key_info = rfc5208.PrivateKeyInfo()
private_key_info.setComponentByName('version','v1')
a = AlgorithmIdentifier()
a.setComponentByName('algorithm', pke._algorithm_oid)
a.setComponentByName('parameters', '\x05\x00')
private_key_info.setComponentByName('privateKeyAlgorithm', a)
private_key_info.setComponentByName('privateKey', key)
pke.pkey_pkcs8 = encoder.encode(private_key_info, ifNotEmpty=True)
pke.pkey = key
else:
raise UnsupportedKeyFormatException("Key Format '%s' is not supported" % key_format)
return pke | def new(cls, alias, certs, key, key_format='pkcs8') | Helper function to create a new PrivateKeyEntry.
:param str alias: The alias for the Private Key Entry
:param list certs: An list of certificates, as byte strings.
The first one should be the one belonging to the private key,
the others the chain (in correct order).
:param str key: A byte string containing the private key in the
format specified in the key_format parameter (default pkcs8).
:param str key_format: The format of the provided private key.
Valid options are pkcs8 or rsa_raw. Defaults to pkcs8.
:returns: A loaded :class:`PrivateKeyEntry` instance, ready
to be placed in a keystore.
:raises UnsupportedKeyFormatException: If the key format is
unsupported. | 3.225716 | 3.041208 | 1.060669 |
if self.is_decrypted():
return
encrypted_info = decoder.decode(self._encrypted, asn1Spec=rfc5208.EncryptedPrivateKeyInfo())[0]
algo_id = encrypted_info['encryptionAlgorithm']['algorithm'].asTuple()
algo_params = encrypted_info['encryptionAlgorithm']['parameters'].asOctets()
encrypted_private_key = encrypted_info['encryptedData'].asOctets()
plaintext = None
try:
if algo_id == sun_crypto.SUN_JKS_ALGO_ID:
plaintext = sun_crypto.jks_pkey_decrypt(encrypted_private_key, key_password)
elif algo_id == sun_crypto.SUN_JCE_ALGO_ID:
if self.store_type != "jceks":
raise UnexpectedAlgorithmException("Encountered JCEKS private key protection algorithm in JKS keystore")
# see RFC 2898, section A.3: PBES1 and definitions of AlgorithmIdentifier and PBEParameter
params = decoder.decode(algo_params, asn1Spec=rfc2898.PBEParameter())[0]
salt = params['salt'].asOctets()
iteration_count = int(params['iterationCount'])
plaintext = sun_crypto.jce_pbe_decrypt(encrypted_private_key, key_password, salt, iteration_count)
else:
raise UnexpectedAlgorithmException("Unknown %s private key protection algorithm: %s" % (self.store_type.upper(), algo_id))
except (BadHashCheckException, BadPaddingException):
raise DecryptionFailureException("Failed to decrypt data for private key '%s'; wrong password?" % self.alias)
# at this point, 'plaintext' is a PKCS#8 PrivateKeyInfo (see RFC 5208)
private_key_info = decoder.decode(plaintext, asn1Spec=rfc5208.PrivateKeyInfo())[0]
key = private_key_info['privateKey'].asOctets()
algorithm_oid = private_key_info['privateKeyAlgorithm']['algorithm'].asTuple()
self._encrypted = None
self._pkey = key
self._pkey_pkcs8 = plaintext
self._algorithm_oid = algorithm_oid | def decrypt(self, key_password) | Decrypts the entry using the given password. Has no effect if the entry has already been decrypted.
:param str key_password: The password to decrypt the entry with. If the entry was loaded from a JCEKS keystore,
the password must not contain any characters outside of the ASCII character set.
:raises DecryptionFailureException: If the entry could not be decrypted using the given password.
:raises UnexpectedAlgorithmException: If the entry was encrypted with an unknown or unexpected algorithm
:raise ValueError: If the entry was loaded from a JCEKS keystore and the password contains non-ASCII characters. | 3.186727 | 3.06208 | 1.040707 |
if not self.is_decrypted():
return
encrypted_private_key = sun_crypto.jks_pkey_encrypt(self.pkey_pkcs8, key_password)
a = AlgorithmIdentifier()
a.setComponentByName('algorithm', sun_crypto.SUN_JKS_ALGO_ID)
a.setComponentByName('parameters', '\x05\x00')
epki = rfc5208.EncryptedPrivateKeyInfo()
epki.setComponentByName('encryptionAlgorithm',a)
epki.setComponentByName('encryptedData', encrypted_private_key)
self._encrypted = encoder.encode(epki)
self._pkey = None
self._pkey_pkcs8 = None
self._algorithm_oid = None | def encrypt(self, key_password) | Encrypts the private key, so that it can be saved to a keystore.
This will make it necessary to decrypt it again if it is going to be used later.
Has no effect if the entry is already encrypted.
:param str key_password: The password to encrypt the entry with. | 4.022165 | 4.059634 | 0.99077 |
timestamp = int(time.time()) * 1000
raise NotImplementedError("Creating Secret Keys not implemented") | def new(cls, alias, sealed_obj, algorithm, key, key_size) | Helper function to create a new SecretKeyEntry.
:returns: A loaded :class:`SecretKeyEntry` instance, ready
to be placed in a keystore. | 11.938378 | 18.293772 | 0.652593 |
if store_type not in ['jks', 'jceks']:
raise UnsupportedKeystoreTypeException("The Keystore Type '%s' is not supported" % store_type)
entries = {}
for entry in store_entries:
if not isinstance(entry, AbstractKeystoreEntry):
raise UnsupportedKeystoreEntryTypeException("Entries must be a KeyStore Entry")
if store_type != 'jceks' and isinstance(entry, SecretKeyEntry):
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
alias = entry.alias
if alias in entries:
raise DuplicateAliasException("Found duplicate alias '%s'" % alias)
entries[alias] = entry
return cls(store_type, entries) | def new(cls, store_type, store_entries) | Helper function to create a new KeyStore.
:param string store_type: What kind of keystore
the store should be. Valid options are jks or jceks.
:param list store_entries: Existing entries that
should be added to the keystore.
:returns: A loaded :class:`KeyStore` instance,
with the specified entries.
:raises DuplicateAliasException: If some of the
entries have the same alias.
:raises UnsupportedKeyStoreTypeException: If the keystore is of
an unsupported type
:raises UnsupportedKeyStoreEntryTypeException: If some
of the keystore entries are unsupported (in this keystore type) | 2.930605 | 2.947127 | 0.994394 |
if self.store_type == 'jks':
keystore = MAGIC_NUMBER_JKS
elif self.store_type == 'jceks':
raise NotImplementedError("Saving of JCEKS keystores is not implemented")
else:
raise UnsupportedKeystoreTypeException("Only JKS and JCEKS keystores are supported")
keystore += b4.pack(2) # version 2
keystore += b4.pack(len(self.entries))
for alias, item in self.entries.items():
if isinstance(item, TrustedCertEntry):
keystore += self._write_trusted_cert(alias, item)
elif isinstance(item, PrivateKeyEntry):
keystore += self._write_private_key(alias, item, store_password)
elif isinstance(item, SecretKeyEntry):
if self.store_type != 'jceks':
raise UnsupportedKeystoreEntryTypeException('Secret Key only allowed in JCEKS keystores')
raise NotImplementedError("Saving of Secret Keys not implemented")
else:
raise UnsupportedKeystoreEntryTypeException("Unknown entry type in keystore")
hash_fn = hashlib.sha1
store_password_utf16 = store_password.encode('utf-16be')
hash = hash_fn(store_password_utf16 + SIGNATURE_WHITENING + keystore).digest()
keystore += hash
return keystore | def saves(self, store_password) | Saves the keystore so that it can be read by other applications.
If any of the private keys are unencrypted, they will be encrypted
with the same password as the keystore.
:param str store_password: Password for the created keystore
(and for any unencrypted keys)
:returns: A byte string representation of the keystore.
:raises UnsupportedKeystoreTypeException: If the keystore
is of an unsupported type
:raises UnsupportedKeystoreEntryTypeException: If the keystore
contains an unsupported entry type | 3.228169 | 2.837283 | 1.137768 |
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, TrustedCertEntry)]) | def certs(self) | A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`TrustedCertEntry`. | 7.778069 | 4.169896 | 1.865291 |
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, SecretKeyEntry)]) | def secret_keys(self) | A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`SecretKeyEntry`. | 7.028303 | 4.337548 | 1.62034 |
return dict([(a, e) for a, e in self.entries.items()
if isinstance(e, PrivateKeyEntry)]) | def private_keys(self) | A subset of the :attr:`entries` dictionary, filtered down to only
those entries of type :class:`PrivateKeyEntry`. | 6.64757 | 4.562919 | 1.456868 |
clazz = obj.get_class()
while clazz:
if clazz.name == class_name:
return True
clazz = clazz.superclass
return False | def _java_is_subclass(cls, obj, class_name) | Given a deserialized JavaObject as returned by the javaobj library,
determine whether it's a subclass of the given class name. | 3.368201 | 3.170474 | 1.062365 |
password_bytes = (password_str.encode('utf-16be') + b"\x00\x00") if len(password_str) > 0 else b""
u = hashfn().digest_size # in bytes
v = hashfn().block_size # in bytes
_salt = bytearray(salt)
_password_bytes = bytearray(password_bytes)
D = bytearray([purpose_byte])*v
S_len = ((len(_salt) + v -1)//v)*v
S = bytearray([_salt[n % len(_salt)] for n in range(S_len)])
P_len = ((len(_password_bytes) + v -1)//v)*v
P = bytearray([_password_bytes[n % len(_password_bytes)] for n in range(P_len)])
I = S + P
c = (desired_key_size + u - 1)//u
derived_key = b""
for i in range(1,c+1):
A = hashfn(bytes(D + I)).digest()
for j in range(iteration_count - 1):
A = hashfn(A).digest()
A = bytearray(A)
B = bytearray([A[n % len(A)] for n in range(v)])
# Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit
# blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by
# setting I_j=(I_j+B+1) mod 2^v for each j.
for j in range(len(I)//v):
_adjust(I, j*v, B)
derived_key += bytes(A)
# truncate derived_key to the desired size
derived_key = derived_key[:desired_key_size]
return derived_key | def derive_key(hashfn, purpose_byte, password_str, salt, iteration_count, desired_key_size) | Implements PKCS#12 key derivation as specified in RFC 7292, Appendix B, "Deriving Keys and IVs from Passwords and Salt".
Ported from BC's implementation in org.bouncycastle.crypto.generators.PKCS12ParametersGenerator.
hashfn: hash function to use (expected to support the hashlib interface and attributes)
password_str: text string (not yet transformed into bytes)
salt: byte sequence
purpose: "purpose byte", signifies the purpose of the generated pseudorandom key material
desired_key_size: desired amount of bytes of key material to generate | 3.115347 | 3.185047 | 0.978116 |
x = (b[-1] & 0xFF) + (a[a_offset + len(b) - 1] & 0xFF) + 1
a[a_offset + len(b) - 1] = ctypes.c_ubyte(x).value
x >>= 8
for i in range(len(b)-2, -1, -1):
x += (b[i] & 0xFF) + (a[a_offset + i] & 0xFF)
a[a_offset + i] = ctypes.c_ubyte(x).value
x >>= 8 | def _adjust(a, a_offset, b) | a = bytearray
a_offset = int
b = bytearray | 1.99454 | 1.894156 | 1.052997 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.