patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -35,7 +35,6 @@ class BZAObject(dict):
self.token = None
self.log = logging.getLogger(self.__class__.__name__)
self.http_session = requests.Session()
- self.http_request = self.http_session.request
# copy infrastructure from prototype
if isinstance(proto, BZAObject): | 1 | """
The idea for this module is to keep it separate from bzt codebase as much as possible,
it may become separate library in the future. Things like imports and logging should be minimal.
"""
import base64
import json
import logging
import time
from collections import OrderedDict
import requests
from bzt import TaurusNetworkError, ManualShutdown, VERSION
from bzt.six import string_types
from bzt.six import text_type
from bzt.six import urlencode
from bzt.utils import to_json, MultiPartForm
BZA_TEST_DATA_RECEIVED = 100
class BZAObject(dict):
def __init__(self, proto=None, data=None):
"""
:type proto: BZAObject
:type data: dict
"""
super(BZAObject, self).__init__()
self.update(data if data is not None else {})
self.address = "https://a.blazemeter.com"
self.data_address = "https://data.blazemeter.com"
self.timeout = 30
self.logger_limit = 256
self.token = None
self.log = logging.getLogger(self.__class__.__name__)
self.http_session = requests.Session()
self.http_request = self.http_session.request
# copy infrastructure from prototype
if isinstance(proto, BZAObject):
attrs_own = set(dir(BZAObject()))
attrs_parent = set(dir(BZAObject.__bases__[0]()))
attrs_diff = attrs_own - attrs_parent # get only BZAObject attrs
for attr in attrs_diff:
if attr.startswith('__') or attr in (self._request.__name__,):
continue
self.__setattr__(attr, proto.__getattribute__(attr))
def _request(self, url, data=None, headers=None, method=None, raw_result=False):
"""
:param url: str
:type data: Union[dict,str]
:param headers: dict
:param method: str
:return: dict
"""
if not headers:
headers = {}
headers["X-Client-Id"] = "Taurus"
headers["X-Client-Version"] = VERSION
if isinstance(self.token, string_types) and ':' in self.token:
token = self.token
if isinstance(token, text_type):
token = token.encode('ascii')
token = base64.b64encode(token).decode('ascii')
headers['Authorization'] = 'Basic ' + token
elif self.token:
headers["X-Api-Key"] = self.token
if method:
log_method = method
else:
log_method = 'GET' if data is None else 'POST'
url = str(url)
if isinstance(data, text_type):
data = data.encode("utf-8")
if isinstance(data, (dict, list)):
data = to_json(data)
headers["Content-Type"] = "application/json"
self.log.debug("Request: %s %s %s", log_method, url, data[:self.logger_limit] if data else None)
response = self.http_request(method=log_method, url=url, data=data, headers=headers, timeout=self.timeout)
resp = response.content
if not isinstance(resp, str):
resp = resp.decode()
self.log.debug("Response [%s]: %s", response.status_code, resp[:self.logger_limit] if resp else None)
if response.status_code >= 400:
try:
result = json.loads(resp) if len(resp) else {}
if 'error' in result and result['error']:
raise TaurusNetworkError("API call error %s: %s" % (url, result['error']))
else:
raise TaurusNetworkError("API call error %s on %s: %s" % (response.status_code, url, result))
except ValueError:
raise TaurusNetworkError("API call error %s: %s %s" % (url, response.status_code, response.reason))
if raw_result:
return resp
try:
result = json.loads(resp) if len(resp) else {}
except ValueError as exc:
self.log.debug('Response: %s', resp)
raise TaurusNetworkError("Non-JSON response from API: %s" % exc)
if 'error' in result and result['error']:
raise TaurusNetworkError("API call error %s: %s" % (url, result['error']))
return result
class BZAObjectsList(list):
def first(self):
""" Returns first item of non-empty list or None """
if len(self):
return self[0]
return None
def __getattr__(self, name):
def call_list_items(*args, **kwargs):
res = BZAObjectsList()
for item in self:
method = getattr(item, name)
chunk = method(*args, **kwargs)
if not isinstance(chunk, BZAObjectsList):
msg = "%s.%s() must return BZAObjectsList, but returned %s"
raise TypeError(msg % (type(item).__name__, name, type(chunk).__name__))
res += chunk
# logging.debug("%s[%s]: %s", name, len(res), json.dumps(res, indent=True))
return res
return call_list_items
# ================================= Entities =================================
class User(BZAObject):
def ping(self):
""" Quick check if we can access the service """
self._request(self.address + '/api/v4/web/version')
def accounts(self, ident=None, name=None):
"""
:rtype: BZAObjectsList[Account]
"""
res = self._request(self.address + '/api/v4/accounts')
accounts = []
for acc in res['result']:
if ident is not None and acc['id'] != ident:
continue
if name is not None and acc['name'] != name:
continue
accounts.append(Account(self, acc))
return BZAObjectsList(accounts)
def fetch(self):
res = self._request(self.address + '/api/v4/user')
if 'result' in res:
self.update(res['result'])
else:
self.update(res)
return self
def available_locations(self, include_harbors=False):
self.log.warn("Deprecated method used: available_locations")
if 'locations' not in self:
self.fetch()
locations = {}
for loc in self['locations']:
loc_id = str(loc['id'])
if not loc_id.startswith('harbor-') or include_harbors:
locations[loc_id] = loc
return locations
def collection_draft(self, name, taurus_config, resource_files):
if resource_files:
draft_id = "taurus_%s" % id(self.token)
self._upload_collection_resources(resource_files, draft_id)
taurus_config.merge({"dataFiles": {"draftId": draft_id}})
collection_draft = self._import_config(taurus_config)
collection_draft['name'] = name
return collection_draft
def _import_config(self, config):
url = self.address + "/api/v4/multi-tests/taurus-import"
resp = self._request(url, data=config, method="POST")
return resp['result']
def _upload_collection_resources(self, resource_files, draft_id):
self.log.debug('Uploading resource files: %s', resource_files)
url = self.address + "/api/v4/web/elfinder/%s" % draft_id
body = MultiPartForm()
body.add_field("cmd", "upload")
body.add_field("target", "s1_Lw")
body.add_field('folder', 'drafts')
for rfile in resource_files:
body.add_file('upload[]', rfile)
hdr = {"Content-Type": str(body.get_content_type())}
self._request(url, body.form_as_bytes(), headers=hdr)
def test_by_ids(self, account_id=None, workspace_id=None, project_id=None, test_id=None, taurus_only=False):
account = self.accounts(ident=account_id).first()
if not account:
raise ValueError("Account not found: %s" % account_id)
workspace = account.workspaces(ident=workspace_id).first()
if workspace is None:
raise ValueError("Workspace not found: %s" % workspace_id)
project = workspace.projects(ident=project_id).first()
if project:
target = project
else:
target = workspace
test = target.multi_tests(ident=test_id).first()
if test is None:
test_type = "taurus" if taurus_only else None
test = target.tests(ident=test_id, test_type=test_type).first()
if test is None:
raise ValueError("Test wasn't found")
return account, workspace, project, test
class Account(BZAObject):
def workspaces(self, ident=None, name=None):
"""
:rtype: BZAObjectsList[Workspace]
"""
params = {"accountId": self['id'], 'enabled': 'true', 'limit': 100}
params = OrderedDict(sorted(params.items(), key=lambda t: t[0]))
res = self._request(self.address + '/api/v4/workspaces?' + urlencode(params))
workspaces = []
for wksp in res['result']:
if not wksp['enabled']:
continue
if name is not None and wksp['name'] != name:
continue
if ident is not None and wksp['id'] != ident:
continue
workspaces.append(Workspace(self, wksp))
return BZAObjectsList(workspaces)
class Workspace(BZAObject):
def projects(self, name=None, ident=None):
"""
:rtype: BZAObjectsList[Project]
"""
params = OrderedDict()
params.update({"workspaceId": self['id']})
if name:
params.update({"name": name})
res = self._request(self.address + '/api/v4/projects?' + urlencode(params))
projects = BZAObjectsList()
for item in res['result']:
if name is not None and item['name'] != name:
continue
if ident is not None and item['id'] != ident:
continue
projects.append(Project(self, item))
return BZAObjectsList(projects)
def locations(self, include_private=False):
if 'locations' not in self:
self.fetch()
res = []
for loc in self['locations']:
if not loc['id'].startswith('harbor-') or include_private:
res.append(Location(self, loc))
return BZAObjectsList(res)
def private_locations(self):
"""
:rtype: BZAObjectsList[BZAObject]
"""
params = {"workspaceId": self['id']}
res = self._request(self.address + '/api/v4/private-locations?' + urlencode(params))
return BZAObjectsList([BZAObject(self, x) for x in res['result']])
def tests(self, name=None, ident=None, test_type=None):
"""
:rtype: BZAObjectsList[Test]
"""
params = OrderedDict({"workspaceId": self['id']})
if name is not None:
params["name"] = name
if ident is not None:
params["id"] = ident
res = self._request(self.address + '/api/v4/tests?' + urlencode(params))
tests = BZAObjectsList()
for item in res['result']:
if ident is not None and item['id'] != ident:
continue
if name is not None and item['name'] != name:
continue
if test_type is not None and item['configuration']['type'] != test_type:
continue
tests.append(Test(self, item))
return tests
def multi_tests(self, name=None, ident=None):
"""
:rtype: BZAObjectsList[MultiTest]
"""
params = OrderedDict({"workspaceId": self['id']})
if name is not None:
params["name"] = name
if ident is not None:
params["id"] = ident
res = self._request(self.address + '/api/v4/multi-tests?' + urlencode(params))
tests = BZAObjectsList()
for item in res['result']:
if ident is not None and item['id'] != ident:
continue
if name is not None and item['name'] != name:
continue
tests.append(MultiTest(self, item))
return tests
def create_project(self, proj_name):
params = {"name": str(proj_name), "workspaceId": self['id']}
data = self._request(self.address + '/api/v4/projects', params)
return Project(self, data['result'])
def fetch(self):
res = self._request(self.address + '/api/v4/workspaces/%s' % self['id'])
self.update(res['result'])
return self
class Location(BZAObject):
pass
class Project(BZAObject):
def tests(self, name=None, ident=None, test_type=None):
"""
:rtype: BZAObjectsList[Test]
"""
params = OrderedDict({"projectId": self['id']})
if name is not None:
params["name"] = name
if ident is not None:
params["id"] = ident
res = self._request(self.address + '/api/v4/tests?' + urlencode(params))
tests = BZAObjectsList()
for item in res['result']:
if ident is not None and item['id'] != ident:
continue
if name is not None and item['name'] != name:
continue
if test_type is not None and item['configuration']['type'] != test_type:
continue
tests.append(Test(self, item))
return tests
def multi_tests(self, name=None, ident=None):
"""
:rtype: BZAObjectsList[MultiTest]
"""
params = OrderedDict({"projectId": self['id']})
if name is not None:
params["name"] = name
if ident is not None:
params["id"] = ident
res = self._request(self.address + '/api/v4/multi-tests?' + urlencode(params))
tests = BZAObjectsList()
for item in res['result']:
if ident is not None and item['id'] != ident:
continue
if name is not None and item['name'] != name:
continue
tests.append(MultiTest(self, item))
return tests
def create_test(self, name, configuration):
"""
:param name:
:param configuration:
:rtype: Test
"""
self.log.debug("Creating new test")
url = self.address + '/api/v4/tests'
data = {"name": name, "projectId": self['id'], "configuration": configuration}
resp = self._request(url, data)
return Test(self, resp['result'])
def create_multi_test(self, collection_draft):
collection_draft['projectId'] = self['id']
url = self.address + "/api/v4/multi-tests"
resp = self._request(url, data=collection_draft, method="POST")
return MultiTest(self, resp['result'])
class Test(BZAObject):
def start_external(self):
url = self.address + "/api/v4/tests/%s/start-external" % self['id']
res = self._request(url, method='POST')
result = res['result']
session = Session(self, result['session'])
session.data_signature = result['signature']
return session, Master(self, result['master'])
def start_anonymous_external_test(self):
"""
:rtype: (Session,Master,str)
"""
url = self.address + "/api/v4/sessions"
res = self._request(url, method='POST')
result = res['result']
session = Session(self, result['session'])
session.data_signature = result['signature']
return session, Master(self, result['master']), result['publicTokenUrl']
def get_files(self):
path = self.address + "/api/v4/web/elfinder/%s" % self['id']
query = urlencode(OrderedDict({'cmd': 'open', 'target': 's1_Lw'}))
url = path + '?' + query
response = self._request(url)
return response["files"]
def delete_files(self):
files = self.get_files()
self.log.debug("Test files: %s", [filedict['name'] for filedict in files])
if not files:
return
path = "/api/v4/web/elfinder/%s" % self['id']
query = "cmd=rm&" + "&".join("targets[]=%s" % fname['hash'] for fname in files)
url = self.address + path + '?' + query
response = self._request(url)
if len(response['removed']) == len(files):
self.log.debug("Successfully deleted %d test files", len(response['removed']))
return response['removed']
def start(self, as_functional=False):
url = self.address + "/api/v4/tests/%s/start" % self['id']
if as_functional:
url += "?functionalExecution=true"
resp = self._request(url, method='POST')
master = Master(self, resp['result'])
return master
def upload_files(self, taurus_config, resource_files):
self.log.debug("Uploading files into the test: %s", resource_files)
url = '%s/api/v4/tests/%s/files' % (self.address, self['id'])
body = MultiPartForm()
body.add_file_as_string('script', 'taurus.yml', taurus_config)
for rfile in resource_files:
body.add_file('files[]', rfile)
hdr = {"Content-Type": str(body.get_content_type())}
self._request(url, body.form_as_bytes(), headers=hdr)
def update_props(self, coll):
url = self.address + "/api/v4/tests/%s" % self['id']
res = self._request(url, data=coll, method="PATCH")
return res['result']
class MultiTest(BZAObject):
def start(self):
# NOTE: delayedStart=true means that BM will not start test until all instances are ready
# if omitted - instances will start once ready (not simultaneously),
# which may cause inconsistent data in aggregate report.
url = self.address + "/api/v4/multi-tests/%s/start?delayedStart=true" % self['id']
resp = self._request(url, method="POST")
return Master(self, resp['result'])
def stop(self):
url = self.address + "/api/v4/multi-tests/%s/stop" % self['id']
self._request(url, method='POST')
def update_collection(self, coll):
url = self.address + "/api/v4/multi-tests/%s" % self['id']
self._request(url, data=coll, method="PATCH")
def delete(self):
url = self.address + "/api/v4/multi-tests/%s" % self['id']
self._request(url, method="DELETE")
class Master(BZAObject):
def __init__(self, proto=None, data=None):
super(Master, self).__init__(proto, data)
self.warned_of_too_much_labels = False
def make_report_public(self):
url = self.address + "/api/v4/masters/%s/public-token" % self['id']
res = self._request(url, {"publicToken": None}, method="POST")
public_token = res['result']['publicToken']
report_link = self.address + "/app/?public-token=%s#/masters/%s/summary" % (public_token, self['id'])
return report_link
def fetch(self):
url = self.address + "/api/v4/masters/%s" % self['id']
res = self._request(url)
self.update(res['result'])
return self
def set(self, data):
url = self.address + "/api/v4/masters/%s" % self['id']
res = self._request(url, data, method='PATCH')
self.update(res['result'])
def get_status(self):
sess = self._request(self.address + '/api/v4/masters/%s/status' % self['id'])
return sess['result']
def sessions(self):
sess = self._request(self.address + '/api/v4/masters/%s/sessions' % self['id'])
if 'sessions' in sess['result']:
arr = sess['result']['sessions']
else:
arr = sess['result']
return BZAObjectsList([Session(self, x) for x in arr])
def get_kpis(self, min_ts):
params = [
("interval", 1),
("from", min_ts),
("master_ids[]", self['id']),
]
for item in ('t', 'lt', 'by', 'n', 'ec', 'ts', 'na'):
params.append(("kpis[]", item))
labels = self.get_labels()[:100]
if len(labels) == 100 and not self.warned_of_too_much_labels:
self.log.warn("Using only first 100 labels, while test has more labels")
self.warned_of_too_much_labels = True
for label in labels:
params.append(("labels[]", label['id']))
url = self.address + "/api/v4/data/kpis?" + urlencode(params)
res = self._request(url)
return res['result']
def get_labels(self, ):
url = self.address + "/api/v4/data/labels?" + urlencode({'master_id': self['id']})
res = self._request(url)
return res['result']
def get_aggregate_report(self):
url = self.address + "/api/v4/masters/%s/reports/aggregatereport/data" % self['id']
res = self._request(url)
return res['result']
def get_errors(self):
url = self.address + "/api/v4/masters/%s/reports/errorsreport/data?noDataError=false" % self['id']
res = self._request(url)
return res['result']
def force_start(self):
url = self.address + "/api/v4/masters/%s/force-start" % self['id']
self._request(url, method="POST")
def stop(self):
url = self.address + "/api/v4/masters/%s/stop"
self._request(url % self['id'], method='POST')
def terminate(self):
url = self.address + "/api/v4/masters/%s/terminate"
self._request(url % self['id'], method='POST')
def get_full(self):
url = self.address + "/api/v4/masters/%s/full" % self['id']
return self._request(url)['result']
def get_functional_report_groups(self):
url = self.address + "/api/v4/masters/%s/reports/functional/groups" % self['id']
return self._request(url)['result']
def get_functional_report_group(self, group_id):
url = self.address + "/api/v4/masters/%s/reports/functional/groups/%s" % (self['id'], group_id)
return self._request(url)['result']
class Session(BZAObject):
def __init__(self, proto=None, data=None):
super(Session, self).__init__(proto, data)
self.data_signature = None
self.kpi_target = 'labels_bulk'
self.monitoring_upload_notified = False
def fetch(self):
url = self.address + "/api/v4/sessions/%s" % self['id']
res = self._request(url)
self.update(res['result'])
return self
def set(self, data):
url = self.address + "/api/v4/sessions/%s" % self['id']
res = self._request(url, data, method='PATCH')
self.update(res['result'])
def stop(self):
url = self.address + "/api/v4/sessions/%s/stop" % self['id']
self._request(url, method='POST')
def terminate(self):
url = self.address + "/api/v4/sessions/%s/terminate" % self['id']
self._request(url, method='POST')
def stop_anonymous(self):
url = self.address + "/api/v4/sessions/%s/terminate-external" % self['id'] # FIXME: V4 API has issue with it
data = {"signature": self.data_signature, "testId": self['testId'], "sessionId": self['id']}
self._request(url, data)
def send_kpi_data(self, data, is_check_response=True, submit_target=None):
"""
Sends online data
:type submit_target: str
:param is_check_response:
:type data: str
"""
submit_target = self.kpi_target if submit_target is None else submit_target
url = self.data_address + "/submit.php?session_id=%s&signature=%s&test_id=%s&user_id=%s"
url %= self['id'], self.data_signature, self['testId'], self['userId']
url += "&pq=0&target=%s&update=1" % submit_target
hdr = {"Content-Type": "application/json"}
response = self._request(url, data, headers=hdr)
if response and 'response_code' in response and response['response_code'] != 200:
raise TaurusNetworkError("Failed to feed data to %s, response code %s" %
(submit_target, response['response_code']))
if response and 'result' in response and is_check_response:
result = response['result']['session']
self.log.debug("Result: %s", result)
if 'statusCode' in result and result['statusCode'] > 100:
self.log.info("Test was stopped through Web UI: %s", result['status'])
raise ManualShutdown("The test was interrupted through Web UI")
def send_monitoring_data(self, engine_id, data):
file_name = '%s-%s-c.monitoring.json' % (self['id'], engine_id)
self.upload_file(file_name, to_json(data))
if not self.monitoring_upload_notified:
self.log.debug("Sending engine health notification")
self.notify_monitoring_file(file_name)
self.monitoring_upload_notified = True
def upload_file(self, filename, contents=None):
"""
Upload single artifact
:type filename: str
:type contents: str
:raise TaurusNetworkError:
"""
body = MultiPartForm() # TODO: can we migrate off it, and use something native to requests lib?
# maybe http://stackoverflow.com/questions/12385179/how-to-send-a-multipart-form-data-with-requests-in-python
if contents is None:
body.add_file('file', filename)
else:
body.add_file_as_string('file', filename, contents)
url = self.data_address + "/api/v4/image/%s/files?signature=%s"
url %= self['id'], self.data_signature
hdr = {"Content-Type": str(body.get_content_type())}
response = self._request(url, body.form_as_bytes(), headers=hdr)
if not response['result']:
raise TaurusNetworkError("Upload failed: %s" % response)
def get_logs(self):
url = self.address + "/api/v4/sessions/%s/reports/logs" % self['id']
return self._request(url)['result']['data']
def notify_monitoring_file(self, file_name):
data = {
'fileName': file_name,
}
data_str = json.dumps(data)
self.send_kpi_data(data_str, submit_target='engine_health')
class BZAProxy(BZAObject):
def __init__(self):
super(BZAProxy, self).__init__()
self.delay = 5
def stop(self):
self._request(self.address + '/api/latest/proxy/recording/stop', method='POST')
def start(self):
self._request(self.address + '/api/latest/proxy/recording/start', method='POST')
def get_jmx(self, smart=False):
url = '/api/latest/proxy/download?format=jmx&smart=' + str(smart).lower()
response_url = self._request(self.address + url).get('result')
response_content = self._request(response_url, raw_result=True)
return response_content
def get_addr(self):
response = self._request(self.address + '/api/latest/proxy')
proxy_info = response['result']
if proxy_info:
self.log.info('Using existing recording proxy...')
if proxy_info['status'] == 'active':
self.log.info('Proxy is active, stop it')
self.stop()
else:
self.log.info('Creating new recording proxy...')
response = self._request(self.address + '/api/latest/proxy', method='POST')
proxy_info = response['result']
self._request(self.address + '/api/latest/proxy/recording/clear', method='POST')
return 'http://%s:%s' % (proxy_info['host'], proxy_info['port'])
def get_json(self):
response = self._request(self.address + '/api/latest/proxy/download?format=json', raw_result=True)
return response
| 1 | 14,993 | this is used as important way of mocking for unit tests, FYI | Blazemeter-taurus | py |
@@ -156,6 +156,7 @@ if not config.style_17:
options.Points = Options('plot', show_frame=True)
options.Histogram = Options('style', line_color='black', fill_color=Cycle())
+options.Distribution = Options('style', muted_alpha=0.2)
options.ErrorBars = Options('style', color='black')
options.Spread = Options('style', color=Cycle(), alpha=0.6, line_color='black')
options.Bars = Options('style', color=Cycle(), line_color='black', width=0.8) | 1 | from __future__ import absolute_import
from distutils.version import LooseVersion
import numpy as np
import bokeh
from bokeh.palettes import all_palettes
from ...core import (Store, Overlay, NdOverlay, Layout, AdjointLayout,
GridSpace, GridMatrix, NdLayout, config)
from ...element import (Curve, Points, Scatter, Image, Raster, Path,
RGB, Histogram, Spread, HeatMap, Contours, Bars,
Box, Bounds, Ellipse, Polygons, BoxWhisker, Arrow,
ErrorBars, Text, HLine, VLine, Spline, Spikes,
Table, ItemTable, Area, HSV, QuadMesh, VectorField,
Graph, Nodes, EdgePaths, Distribution, Bivariate,
TriMesh)
from ...core.options import Options, Cycle, Palette
from ...core.util import VersionError
if LooseVersion(bokeh.__version__) < '0.12.10':
raise VersionError("The bokeh extension requires a bokeh version >=0.12.10, "
"please upgrade from bokeh %s to a more recent version."
% bokeh.__version__, bokeh.__version__, '0.12.10')
try:
from ...interface import DFrame
except:
DFrame = None
from .annotation import TextPlot, LineAnnotationPlot, SplinePlot, ArrowPlot
from .callbacks import Callback # noqa (API import)
from .element import OverlayPlot, ElementPlot
from .chart import (PointPlot, CurvePlot, SpreadPlot, ErrorPlot, HistogramPlot,
SideHistogramPlot, BarPlot, SpikesPlot, SideSpikesPlot,
AreaPlot, VectorFieldPlot, BoxWhiskerPlot)
from .graphs import GraphPlot, NodePlot, TriMeshPlot
from .path import PathPlot, PolygonPlot, ContourPlot
from .plot import GridPlot, LayoutPlot, AdjointLayoutPlot
from .raster import RasterPlot, RGBPlot, HeatMapPlot, HSVPlot, QuadMeshPlot
from .renderer import BokehRenderer
from .stats import DistributionPlot, BivariatePlot
from .tabular import TablePlot
from .util import bokeh_version # noqa (API import)
Store.renderers['bokeh'] = BokehRenderer.instance()
if len(Store.renderers) == 1:
Store.current_backend = 'bokeh'
associations = {Overlay: OverlayPlot,
NdOverlay: OverlayPlot,
GridSpace: GridPlot,
GridMatrix: GridPlot,
AdjointLayout: AdjointLayoutPlot,
Layout: LayoutPlot,
NdLayout: LayoutPlot,
# Charts
Curve: CurvePlot,
Bars: BarPlot,
BoxWhisker: BoxWhiskerPlot,
Points: PointPlot,
Scatter: PointPlot,
ErrorBars: ErrorPlot,
Spread: SpreadPlot,
Spikes: SpikesPlot,
Area: AreaPlot,
VectorField: VectorFieldPlot,
Histogram: HistogramPlot,
# Rasters
Image: RasterPlot,
RGB: RGBPlot,
HSV: HSVPlot,
Raster: RasterPlot,
HeatMap: HeatMapPlot,
QuadMesh: QuadMeshPlot,
# Paths
Path: PathPlot,
Contours: ContourPlot,
Path: PathPlot,
Box: PathPlot,
Bounds: PathPlot,
Ellipse: PathPlot,
Polygons: PolygonPlot,
# Annotations
HLine: LineAnnotationPlot,
VLine: LineAnnotationPlot,
Text: TextPlot,
Spline: SplinePlot,
Arrow: ArrowPlot,
# Graph Elements
Graph: GraphPlot,
Nodes: NodePlot,
EdgePaths: PathPlot,
TriMesh: TriMeshPlot,
# Tabular
Table: TablePlot,
ItemTable: TablePlot,
# Statistics
Distribution: DistributionPlot,
Bivariate: BivariatePlot}
if DFrame is not None:
associations[DFrame] = TablePlot
Store.register(associations, 'bokeh')
if config.style_17:
ElementPlot.show_grid = True
RasterPlot.show_grid = True
ElementPlot.show_frame = True
else:
# Raster types, Path types and VectorField should have frames
for framedcls in [VectorFieldPlot, ContourPlot, PathPlot, PolygonPlot,
RasterPlot, RGBPlot, HSVPlot, QuadMeshPlot, HeatMapPlot]:
framedcls.show_frame = True
AdjointLayoutPlot.registry[Histogram] = SideHistogramPlot
AdjointLayoutPlot.registry[Spikes] = SideSpikesPlot
point_size = np.sqrt(6) # Matches matplotlib default
Cycle.default_cycles['default_colors'] = ['#30a2da', '#fc4f30', '#e5ae38',
'#6d904f', '#8b8b8b']
# Register bokeh.palettes with Palette and Cycle
def colormap_generator(palette):
return lambda value: palette[int(value*(len(palette)-1))]
Palette.colormaps.update({name: colormap_generator(p[max(p.keys())])
for name, p in all_palettes.items()})
Cycle.default_cycles.update({name: p[max(p.keys())] for name, p in all_palettes.items()
if max(p.keys()) < 256})
dflt_cmap = 'hot' if config.style_17 else 'fire'
options = Store.options(backend='bokeh')
# Charts
options.Curve = Options('style', color=Cycle(), line_width=2)
options.BoxWhisker = Options('style', box_fill_color=Cycle(), whisker_color='black',
box_line_color='black', outlier_color='black')
options.Scatter = Options('style', color=Cycle(), size=point_size, cmap=dflt_cmap)
options.Points = Options('style', color=Cycle(), size=point_size, cmap=dflt_cmap)
if not config.style_17:
options.Points = Options('plot', show_frame=True)
options.Histogram = Options('style', line_color='black', fill_color=Cycle())
options.ErrorBars = Options('style', color='black')
options.Spread = Options('style', color=Cycle(), alpha=0.6, line_color='black')
options.Bars = Options('style', color=Cycle(), line_color='black', width=0.8)
options.Spikes = Options('style', color='black', cmap='fire')
options.Area = Options('style', color=Cycle(), alpha=1, line_color='black')
options.VectorField = Options('style', color='black')
# Paths
if not config.style_17:
options.Contours = Options('plot', show_legend=True)
options.Contours = Options('style', color=Cycle(), cmap='viridis')
options.Path = Options('style', color=Cycle(), cmap='viridis')
options.Box = Options('style', color='black')
options.Bounds = Options('style', color='black')
options.Ellipse = Options('style', color='black')
options.Polygons = Options('style', color=Cycle(), line_color='black',
cmap='viridis')
# Rasters
options.Image = Options('style', cmap=dflt_cmap)
options.GridImage = Options('style', cmap=dflt_cmap)
options.Raster = Options('style', cmap=dflt_cmap)
options.QuadMesh = Options('style', cmap=dflt_cmap, line_alpha=0)
options.HeatMap = Options('style', cmap='RdYlBu_r', line_alpha=0)
# Annotations
options.HLine = Options('style', color=Cycle(), line_width=3, alpha=1)
options.VLine = Options('style', color=Cycle(), line_width=3, alpha=1)
options.Arrow = Options('style', arrow_size=10)
# Graphs
options.Graph = Options('style', node_size=15, node_fill_color=Cycle(),
node_line_color='black',
node_nonselection_fill_color=Cycle(),
node_hover_line_color='black',
node_hover_fill_color='limegreen',
node_nonselection_alpha=0.2,
edge_nonselection_alpha=0.2,
node_nonselection_line_color='black',
edge_line_color='black', edge_line_width=2,
edge_nonselection_line_color='black',
edge_hover_line_color='limegreen')
options.TriMesh = Options('style', node_size=5, node_line_color='black',
node_fill_color='white', edge_line_color='black',
node_hover_fill_color='limegreen',
edge_hover_line_color='limegreen',
edge_nonselection_alpha=0.2,
edge_nonselection_line_color='black',
node_nonselection_alpha=0.2,
edge_line_width=1)
options.TriMesh = Options('plot', tools=[])
options.Nodes = Options('style', line_color='black', color=Cycle(),
size=20, nonselection_fill_color=Cycle(),
selection_fill_color='limegreen',
hover_fill_color='indianred')
options.Nodes = Options('plot', tools=['hover', 'tap'])
options.EdgePaths = Options('style', color='black', nonselection_alpha=0.2,
line_width=2, selection_color='limegreen',
hover_line_color='indianred')
options.EdgePaths = Options('plot', tools=['hover', 'tap'])
# Define composite defaults
options.GridMatrix = Options('plot', shared_xaxis=True, shared_yaxis=True,
xaxis=None, yaxis=None)
options.Overlay = Options('style', click_policy='mute')
options.NdOverlay = Options('style', click_policy='mute')
options.Curve = Options('style', muted_alpha=0.2)
options.Path = Options('style', muted_alpha=0.2)
options.Scatter = Options('style', muted_alpha=0.2)
options.Points = Options('style', muted_alpha=0.2)
options.Polygons = Options('style', muted_alpha=0.2)
# Statistics
options.Distribution = Options('style', fill_color=Cycle(), line_color='black',
fill_alpha=0.5)
| 1 | 19,715 | Sorry, should have been clearer, I meant adding this to the ``Histogram`` options one line above. | holoviz-holoviews | py |
@@ -566,7 +566,7 @@ namespace pwiz.SkylineTest
// Bad terminus
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" terminus=\"X\" formula=\"C23N\" />");
// Bad formula
- AssertEx.DeserializeError<StaticMod, ArgumentException>("<static_modification name=\"Mod\" aminoacid=\"K\" formula=\"C23NHe2\" />");
+ AssertEx.DeserializeError<StaticMod, ArgumentException>("<static_modification name=\"Mod\" aminoacid=\"K\" formula=\"C23NHx2\" />");
// Terminal label without amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"15N\" terminus=\"C\" label_13C=\"true\"/>");
// Formula and labeled atoms | 1 | /*
* Original author: Brendan MacLean <brendanx .at. u.washington.edu>,
* MacCoss Lab, Department of Genome Sciences, UW
*
* Copyright 2009 University of Washington - Seattle, WA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Xml.Serialization;
using Microsoft.VisualStudio.TestTools.UnitTesting;
using pwiz.Common.Chemistry;
using pwiz.ProteomeDatabase.API;
using pwiz.Skyline.Model;
using pwiz.Skyline.Model.DocSettings;
using pwiz.Skyline.Model.Lib;
using pwiz.Skyline.Properties;
using pwiz.Skyline.Util;
using pwiz.SkylineTestUtil;
using SequenceTerminus = pwiz.Skyline.Model.SequenceTerminus;
namespace pwiz.SkylineTest
{
/// <summary>
/// This is a test class for SrmSettingsTest and is intended
/// to contain all SrmSettingsTest Unit Tests
/// </summary>
[TestClass]
public class SrmSettingsTest : AbstractUnitTest
{
private const string XML_DIRECTIVE = "<?xml version=\"1.0\" encoding=\"utf-16\"?>\r\n";
/// <summary>
/// Simple test of serializing the default SrmSettings, reloading
/// and ensuring consistency.
/// </summary>
[TestMethod]
public void SettingsSerializeDefaultsTest()
{
AssertEx.Serializable(SrmSettingsList.GetDefault(), AssertEx.SettingsCloned);
}
/// <summary>
/// Test of deserializing current settings.
/// </summary>
[TestMethod]
public void SettingsSerializeCurrentTest()
{
AssertEx.Serializable(AssertEx.Deserialize<SrmSettings>(SETTINGS_CURRENT), 3, AssertEx.SettingsCloned);
}
private const string SETTINGS_CURRENT =
"<settings_summary name=\"Default\">\n" +
" <peptide_settings>\n" +
" <enzyme name=\"LysN promisc\" cut=\"KASR\" no_cut=\"\" sense=\"N\" />\n" +
" <digest_settings max_missed_cleavages=\"1\" exclude_ragged_ends=\"true\" />\n" +
" <peptide_prediction>\n" +
" <predict_retention_time name=\"Bovine Standard (100A)\" calculator=\"SSRCalc 3.0 (100A)\"\n" +
" time_window=\"13.6\">\n" +
" <regression_rt slope=\"1.681\" intercept=\"-6.247\" />\n" +
" </predict_retention_time>\n" +
" </peptide_prediction>\n" +
" <peptide_filter start=\"0\" min_length=\"5\" max_length=\"30\" min_transtions=\"4\"\n" +
" auto_select=\"True\">\n" +
" <peptide_exclusions>\n" +
" <exclusion name=\"Met\" regex=\"[M]\" />\n" +
" <exclusion name=\"NXT/NXS\" regex=\"N.[TS]\" />\n" +
" <exclusion name=\"D Runs\" regex=\"DDDD\" />\n" +
" </peptide_exclusions>\n" +
" </peptide_filter>\n" +
" <peptide_libraries />\n" +
" <peptide_modifications>\n" +
" <static_modifications>\n" +
" <static_modification name=\"Test2\" aminoacid=\"M\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
" </static_modifications>\n" +
" </peptide_modifications>\n" +
" </peptide_settings>\n" +
" <transition_settings>\n" +
" <transition_prediction precursor_mass_type=\"Average\" fragment_mass_type=\"Average\">\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regression_ce charge=\"2\" slope=\"0.0431\" intercept=\"4.7556\" />\n" +
" </predict_collision_energy>\n" +
" <predict_declustering_potential name=\"Test1\" slope=\"0.5\" intercept=\"5\" />\n" +
" </transition_prediction>\n" +
" <transition_filter precursor_charges=\"2,3\" product_charges=\"1,2\"\n" +
" fragment_range_first=\"y3\" fragment_range_last=\"last y-ion - 1\"\n" +
" include_n_proline=\"true\" include_c_glu_asp=\"true\" auto_select=\"true\" />\n" +
" <transition_libraries ion_match_tolerance=\"0.5\" ion_count=\"3\" pick_from=\"all\" />\n" +
" <transition_integration/>" +
" <transition_instrument min_mz=\"52\" max_mz=\"1503\" />\n" +
" </transition_settings>\n" +
"</settings_summary>";
/// <summary>
/// Test of deserializing v0.1 settings, by deserializing versions written
/// by v0.1 and the current code, and checking for equality.
/// </summary>
[TestMethod]
public void SettingsSerialize_0_1_Test()
{
// ReSharper disable InconsistentNaming
XmlSerializer ser_0_1 = new XmlSerializer(typeof(SrmSettingsList));
XmlSerializer serCurrent = new XmlSerializer(typeof(SrmSettings));
using (TextReader reader_0_1 = new StringReader(SETTINGS_LIST_0_1))
using (TextReader readerCurrent = new StringReader(SETTINGS_CURRENT))
{
SrmSettings settings_0_1 = ((SrmSettingsList) ser_0_1.Deserialize(reader_0_1))[0];
SrmSettings settingsCurrent = (SrmSettings) serCurrent.Deserialize(readerCurrent);
AssertEx.SettingsCloned(settings_0_1, settingsCurrent);
}
// ReSharper restore InconsistentNaming
}
private const string SETTINGS_LIST_0_1 =
"<SrmSettingsList>\n" +
" <ArrayOfSrmSettings xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n" +
" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">\n" +
" <SrmSettings name=\"Default\">\n" +
" <peptide_settings>\n" +
" <enzyme name=\"LysN promisc\" cut=\"KASR\" no_cut=\"\" sense=\"N\" />\n" +
" <digest_settings max_missed_cleavages=\"1\" exclude_ragged_ends=\"true\" />\n" +
" <peptide_filter start=\"0\" min_length=\"5\" max_length=\"30\" min_transtions=\"4\"\n" +
" auto_select=\"true\">\n" +
" <peptide_exclusions>\n" +
" <exclusion name=\"Met\" regex=\"[M]\" />\n" +
" <exclusion name=\"NXT/NXS\" regex=\"N.[TS]\" />\n" +
" <exclusion name=\"D Runs\" regex=\"DDDD\" />\n" +
" </peptide_exclusions>\n" +
" </peptide_filter>\n" +
" <peptide_modifications>\n" +
" <static_modifications>\n" +
" <static_modification name=\"Test2\" aminoacid=\"M\"\n" +
" massdiff_monoisotopic=\"5\" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
" </static_modifications>\n" +
" </peptide_modifications>\n" +
" </peptide_settings>\n" +
" <transition_settings>\n" +
" <transition_prediction precursor_mass_type=\"Average\" fragment_mass_type=\"Average\">\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regressions>\n" +
" <regression_ce slope=\"0.0431\" intercept=\"4.7556\" charge=\"2\" />\n" +
" </regressions>\n" +
" </predict_collision_energy>\n" +
// Retention time moved from transition to prediction
" <predict_retention_time name=\"Bovine Standard (100A)\" calculator=\"SSRCalc 3.0 (100A)\"\n" +
" time_window=\"13.6\">\n" +
" <regression_rt slope=\"1.681\" intercept=\"-6.247\" />\n" +
" </predict_retention_time>\n" +
" <predict_declustering_potential slope=\"0.5\" intercept=\"5\" name=\"Test1\" />\n" +
" </transition_prediction>\n" +
" <transition_filter precursor_charges=\"2,3\" product_charges=\"1,2\"\n" +
" fragment_range_first=\"y3\" fragment_range_last=\"last y-ion - 1\"\n" +
" include_n_prolene=\"true\" include_c_glu_asp=\"true\" auto_select=\"true\" />\n" +
" <transition_instrument min_mz=\"52\" max_mz=\"1503\" />\n" +
" </transition_settings>\n" +
" </SrmSettings>\n" +
" </ArrayOfSrmSettings>\n" +
"</SrmSettingsList>";
/// <summary>
/// Test de/serialization of all the other types of lists stored
/// in user.config.
/// </summary>
[TestMethod]
public void SettingsSerializeListsTest()
{
AssertEx.Serialization<EnzymeList>(SETTINGS_ENZYME_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<StaticModList>(SETTINGS_STATIC_MOD_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<HeavyModList>(SETTINGS_HEAVY_MOD_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<PeptideExcludeList>(SETTINGS_EXCLUSIONS_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<CollisionEnergyList>(SETTINGS_CE_LIST, (t, c) => CheckSettingsList(t, c, true), false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<DeclusterPotentialList>(SETTINGS_DP_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
AssertEx.Serialization<RetentionTimeList>(SETTINGS_RT_LIST, CheckSettingsList, false); // Not part of a Skyline document, don't check against schema
}
private const string SETTINGS_ENZYME_LIST =
"<EnzymeList>\n" +
" <enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"P\" sense=\"C\" />\n" +
" <enzyme name=\"Trypsin/P\" cut=\"KR\" no_cut=\"\" sense=\"C\" />\n" +
" <enzyme name=\"Chymotrypsin\" cut=\"FWYM\" no_cut=\"P\" sense=\"C\" />\n" +
" <enzyme name=\"AspN\" cut=\"D\" no_cut=\"\" sense=\"N\" />\n" +
" <enzyme name=\"Trypsin AspN\" cut_c=\"KR\" no_cut_c=\"P\" cut_n=\"D\" no_cut_n=\"\" />\n" +
"</EnzymeList>";
private const string SETTINGS_STATIC_MOD_LIST =
"<StaticModList>\n" +
" <static_modification name=\"Test1\" aminoacid=\"C\"\n" +
" formula=\"C2H3 - ON4\" />\n" +
" <static_modification name=\"Test2\" terminus=\"N\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
"</StaticModList>";
private const string SETTINGS_HEAVY_MOD_LIST =
"<HeavyModList>\n" +
" <static_modification name=\"Test1\" aminoacid=\"C\"\n" +
" formula=\"C2H3 - ON4\" />\n" +
" <static_modification name=\"Test2\" terminus=\"N\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />\n" +
" <static_modification name=\"Test3\" aminoacid=\"K\" terminus=\"N\"\n" +
" formula=\"CH3ON2\" />\n" +
"</HeavyModList>";
private const string SETTINGS_EXCLUSIONS_LIST =
"<PeptideExcludeList>\n" +
" <exclusion name=\"Cys\" regex=\"[C]\" />\n" +
" <exclusion name=\"Met\" regex=\"[M]\" />\n" +
" <exclusion name=\"His\" regex=\"[H]\" />\n" +
" <exclusion name=\"NXT/NXS\" regex=\"N.[TS]\" />\n" +
" <exclusion name=\"RP/KP\" regex=\"[RK]P\" />\n" +
" <exclusion name=\"D Runs\" regex=\"DDDD\" />\n" +
"</PeptideExcludeList>";
private const string SETTINGS_CE_LIST =
"<CollisionEnergyList>\n" +
" <predict_collision_energy name=\"Thermo\">\n" +
" <regression_ce charge=\"2\" slope=\"0.034\" intercept=\"3.314\" />\n" +
" <regression_ce charge=\"3\" slope=\"0.044\" intercept=\"3.314\" />\n" +
" </predict_collision_energy>\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regression_ce charge=\"2\" slope=\"0.0431\" intercept=\"4.7556\" />\n" +
" </predict_collision_energy>\n" +
"</CollisionEnergyList>";
private const string SETTINGS_DP_LIST =
"<DeclusterPotentialList>\n" +
" <predict_declustering_potential name=\"None\" slope=\"0\" intercept=\"0\" />\n" +
" <predict_declustering_potential name=\"ABI\" slope=\"0.0729\" intercept=\"31.117\" />\n" +
" <predict_declustering_potential name=\"Test1\" slope=\"0.5\" intercept=\"5\" />\n" +
"</DeclusterPotentialList>";
private const string SETTINGS_RT_LIST =
"<RetentionTimeList>\n" +
" <predict_retention_time name=\"None\" time_window=\"0\">\n" +
" <regression_rt slope=\"0\" intercept=\"0\" />\n" +
" </predict_retention_time>\n" +
" <predict_retention_time name=\"Bovine Standard (100A)\" calculator=\"SSRCalc 3.0 (100A)\"\n" +
" time_window=\"13.6\">\n" +
" <regression_rt slope=\"1.681\" intercept=\"-6.247\" />\n" +
" </predict_retention_time>\n" +
"</RetentionTimeList>";
/// <summary>
/// Test XML deserialization where major parts are missing.
/// </summary>
[TestMethod]
public void SettingsSerializeStubTest()
{
XmlSerializer ser = new XmlSerializer(typeof(SrmSettings));
using (TextReader reader = new StringReader(XML_DIRECTIVE + string.Format(SETTINGS_STUBS, SrmSettingsList.DefaultName)))
{
var target = SrmSettingsList.GetDefault();
var copy = (SrmSettings) ser.Deserialize(reader);
Assert.AreSame(target.PeptideSettings.Enzyme, copy.PeptideSettings.Enzyme);
Assert.AreSame(target.PeptideSettings.DigestSettings, copy.PeptideSettings.DigestSettings);
AssertEx.Cloned(target.PeptideSettings.Prediction, copy.PeptideSettings.Prediction);
Assert.AreSame(target.PeptideSettings.Filter, copy.PeptideSettings.Filter);
Assert.AreSame(target.PeptideSettings.Libraries, copy.PeptideSettings.Libraries);
Assert.AreSame(target.PeptideSettings.Modifications, copy.PeptideSettings.Modifications);
AssertEx.Cloned(target.PeptideSettings, copy.PeptideSettings);
Assert.AreSame(target.TransitionSettings.Prediction, copy.TransitionSettings.Prediction);
Assert.AreSame(target.TransitionSettings.Filter, copy.TransitionSettings.Filter);
Assert.AreSame(target.TransitionSettings.Libraries, copy.TransitionSettings.Libraries);
Assert.AreSame(target.TransitionSettings.Instrument, copy.TransitionSettings.Instrument);
AssertEx.Cloned(target.TransitionSettings, copy.TransitionSettings);
AssertEx.Cloned(target, copy);
}
}
// This string should deserialize successfully to the default SRM settings.
private const string SETTINGS_STUBS =
"<settings_summary name=\"{0}\">\n" +
" <peptide_settings>\n" +
" <peptide_prediction/>\n" +
" </peptide_settings>\n" +
" <transition_settings/>\n" +
"</settings_summary>";
/// <summary>
/// Test error handling in XML deserialization of <see cref="Enzyme"/>.
/// </summary>
[TestMethod]
public void SerializeEnzymeTest()
{
// Valid first
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut=\"M\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (2)\" cut=\"M\" sense=\"N\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (3)\" cut=\"ACDEFGHIKLMNPQRSTVWY\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (4)\" cut_c=\"M\" cut_n=\"K\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (4)\" cut_c=\"M\" no_cut_c=\"N\" cut_n=\"K\" no_cut_n=\"P\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut_c=\"M\" no_cut_c=\"P\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut_n=\"M\" no_cut_n=\"P\" />");
AssertEx.DeserializeNoError<Enzyme>("<enzyme name=\"Validate (1)\" cut_n=\"M\" no_cut_n=\"P\" semi=\"True\"/>");
// Missing parameters
AssertEx.DeserializeError<Enzyme>("<enzyme/>");
// No name
AssertEx.DeserializeError<Enzyme>("<enzyme cut=\"KR\" no_cut=\"P\" sense=\"C\" />");
// No cleavage
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"\" no_cut=\"P\" sense=\"N\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"\" no_cut_c=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_n=\"\" no_cut_n=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"M\" no_cut_c=\"N\" cut_n=\"\" no_cut_n=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"\" no_cut_c=\"N\" cut_n=\"K\" no_cut_n=\"P\" />");
// Bad cleavage
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"X\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"MKRM\" no_cut=\"P\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"MKRM\" no_cut_c=\"P\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_n=\"MKRM\" no_cut_n=\"P\" />");
// Bad restrict
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"+\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"AMRGR\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_c=\"KR\" no_cut_c=\"+\" sense=\"C\" />");
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut_n=\"KR\" no_cut_n=\"AMRGR\" sense=\"C\" />");
// Bad sense
AssertEx.DeserializeError<Enzyme>("<enzyme name=\"Trypsin\" cut=\"KR\" no_cut=\"P\" sense=\"X\" />");
}
/// <summary>
/// Test Enzyme digestion
/// </summary>
[TestMethod]
public void EnzymeDigestionTest()
{
const string sequence = "KKRFAHFAHPRFAHKPAHKAHMERMLSTKKKRSTTKRK";
var enzymeTrypsin = new Enzyme("Trypsin", "KR", "P");
DigestsTo(sequence, false, 12, enzymeTrypsin, "FAHFAHPR", "FAHKPAHK", "AHMER", "MLSTK", "STTK"); // NB enzyme.CountCleavagePoints gave 8 rather than 12 prior to Feb 2016
DigestsTo(sequence, true, 12, enzymeTrypsin, "FAHKPAHK", "AHMER");
var enzymeReverseTrypsin = new Enzyme("R-Trypsin", "KR", "P", SequenceTerminus.N);
DigestsTo(sequence, false, 12, enzymeReverseTrypsin, "RFAHFAHPRFAH", "KPAH", "KAHME", "RMLST", "RSTT");
DigestsTo(sequence, true, 12, enzymeReverseTrypsin, "KPAH", "KAHME");
var enzymeUnrestrictedTrypsin = new Enzyme("U-Trypsin", "KR", null);
DigestsTo(sequence, false, 13, enzymeUnrestrictedTrypsin, "FAHFAHPR", "FAHK", "PAHK", "AHMER", "MLSTK", "STTK");
DigestsTo(sequence, true, 13, enzymeUnrestrictedTrypsin, "FAHK", "PAHK", "AHMER");
var enzymeUnReverseTrypsin = new Enzyme("U-R-Trypsin", "KR", null, SequenceTerminus.N);
DigestsTo(sequence, false, 13, enzymeUnReverseTrypsin, "RFAHFAHP", "RFAH", "KPAH", "KAHME", "RMLST", "RSTT");
DigestsTo(sequence, true, 13, enzymeUnReverseTrypsin,"RFAH", "KPAH", "KAHME");
var enzymeBothTrypsinR = new Enzyme("B-TrypsinR", "R", "P", "K", "P");
DigestsTo(sequence, false, 12, enzymeBothTrypsinR, "KR", "FAHFAHPR", "FAH", "KPAH", "KAHMER", "MLST", "KR", "STT", "KR");
DigestsTo(sequence, true, 12, enzymeBothTrypsinR, "KR", "FAHFAHPR", "FAH", "KPAH", "KAHMER", "STT", "KR");
var enzymeBothTrypsinK = new Enzyme("B-TrypsinK", "K", "P", "R", "P");
DigestsTo(sequence, false, 8, enzymeBothTrypsinK, "RFAHFAHPRFAHKPAHK", "AHME", "RMLSTK", "RSTTK", "RK");
DigestsTo(sequence, true, 8, enzymeBothTrypsinK, "AHME", "RK");
var enzymeUnrestrictedBothTrypsin = new Enzyme("U-B-Trypsin", "K", null, "R", null);
DigestsTo(sequence, false, 10, enzymeUnrestrictedBothTrypsin, "RFAHFAHP", "RFAHK", "PAHK", "AHME", "RMLSTK", "RSTTK", "RK");
DigestsTo(sequence, true, 10, enzymeUnrestrictedBothTrypsin, "RFAHK", "PAHK", "AHME", "RK");
var enzymeTrypsinSemi = new Enzyme("Trypsin (semi)", "KR", "P", null, null, true);
DigestsTo(sequence, false, 12, null, 4, enzymeTrypsinSemi,
"FAHFAHPR",
"FAHFAHP",
"FAHFAH",
"FAHFA",
"FAHF",
"AHFAHPR",
"HFAHPR",
"FAHPR",
"AHPR",
"FAHKPAHK",
"FAHKPAH",
"FAHKPA",
"FAHKP",
"FAHK",
"AHKPAHK",
"HKPAHK",
"KPAHK",
"PAHK",
"AHMER",
"AHME",
"HMER",
"MLSTK",
"MLST",
"LSTK",
"STTK");
DigestsTo("ASKSUPAHLONGNONCLEAVINGSEQUENCERCPEPTIDE", false, 2, 8, 5, enzymeTrypsinSemi,
"SUPAHLON",
"SUPAHLO",
"SUPAHL",
"SUPAH",
"EQUENCER",
"QUENCER",
"UENCER",
"ENCER",
"CPEPTIDE",
"CPEPTID",
"CPEPTI",
"CPEPT",
"PEPTIDE",
"EPTIDE",
"PTIDE");
// Make sure Equals and GetHashCode are implemented to include the new semi bool
var trypCompare = new Enzyme("Trypsin", "KR", "P", null, null);
var trypSemiCompare = new Enzyme("Trypsin", "KR", "P", null, null, true);
Assert.AreNotEqual(trypCompare, trypSemiCompare);
Assert.AreNotEqual(trypCompare.GetHashCode(), trypSemiCompare.GetHashCode());
// And serialization is implemented to include new property
AssertEx.Serializable(enzymeTrypsinSemi, (e1, e2) =>
{
Assert.AreEqual(e1, e2);
Assert.AreNotSame(e1, e2);
});
}
private static void DigestsTo(string sequence, bool excludeRaggedEnds, int expectedCleavagePoints, Enzyme enzyme, params string[] pepSeqs)
{
DigestsTo(sequence, excludeRaggedEnds, expectedCleavagePoints, null, null, enzyme, pepSeqs);
}
private static void DigestsTo(string sequence, bool excludeRaggedEnds, int expectedCleavagePoints, int? maxPepLen, int? minPepLen, Enzyme enzyme, params string[] pepSeqs)
{
var fastaSeq = new FastaSequence("p", "d", new ProteinMetadata[0], sequence);
var digestSettings = new DigestSettings(0, excludeRaggedEnds);
var peptides = "Missed " + enzyme.CountCleavagePoints(sequence) + " " +
string.Join(" ", enzyme.Digest(fastaSeq, digestSettings, maxPepLen, minPepLen).Select(p => p.Target));
var expected = "Missed " + expectedCleavagePoints + " " + string.Join(" ", pepSeqs);
Assert.AreEqual(expected, peptides);
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="DigestSettings"/>.
/// </summary>
[TestMethod]
public void SerializeDigestTest()
{
// Valid first
AssertEx.DeserializeNoError<DigestSettings>("<digest_settings max_missed_cleavages=\"0\" exclude_ragged_ends=\"true\" />");
AssertEx.DeserializeNoError<DigestSettings>("<digest_settings max_missed_cleavages=\"9\" exclude_ragged_ends=\"false\" />");
AssertEx.DeserializeNoError<DigestSettings>("<digest_settings/>");
// Errors
AssertEx.DeserializeError<DigestSettings>("<digest_settings max_missed_cleavages=\"10\" exclude_ragged_ends=\"true\" />");
AssertEx.DeserializeError<DigestSettings>("<digest_settings max_missed_cleavages=\"-1\" exclude_ragged_ends=\"true\" />");
AssertEx.DeserializeError<DigestSettings>("<digest_settings max_missed_cleavages=\"0\" exclude_ragged_ends=\"yes\" />");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="DigestSettings"/>.
/// </summary>
[TestMethod]
public void SerializePeptidePredictionTest()
{
// Valid first
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction />");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"false\" />");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"true\" />");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"true\" measured_rt_window=\"2.0\"/>");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction use_measured_rts=\"false\" measured_rt_window=\"2.0\"/>");
AssertEx.DeserializeNoError<PeptidePrediction>("<peptide_prediction measured_rt_window=\"5.0\"/>");
// Errors (out of range)
AssertEx.DeserializeError<PeptidePrediction>("<peptide_prediction measured_rt_window=\"0.01\"/>");
AssertEx.DeserializeError<PeptidePrediction>("<peptide_prediction measured_rt_window=\"600.0\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="PeptideFilter"/>.
/// </summary>
[TestMethod]
public void SerializePeptideFilterTest()
{
// Valid first
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter min_length=\"2\" max_length=\"200\" min_transtions=\"1\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"none\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"protein\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"gene\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"species\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"100\" min_length=\"2\" max_length=\"5\" auto_select=\"true\"/>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\" auto_select=\"true\"><peptide_exclusions/></peptide_filter>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Valid\" regex=\"^[^C]$\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeNoError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Valid\" regex=\"M\\[\" include=\"true\" match_mod_sequence=\"true\"/></peptide_exclusions></peptide_filter>");
// Missing parameters
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter/>");
// min_length range
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"1\" max_length=\"30\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"500\" max_length=\"30\"/>");
// max_length range
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"10\" max_length=\"8\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"4\" max_length=\"4\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter min_length=\"8\" max_length=\"500\"/>");
// start range
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"-1\" min_length=\"8\" max_length=\"25\"/>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"50000\" min_length=\"8\" max_length=\"25\"/>");
// bad exclusions
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Noex\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion regex=\"PX\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Invalid\" regex=\"!(M[)\" match_mod_sequence=\"true\"/></peptide_exclusions></peptide_filter>");
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"25\" min_length=\"8\" max_length=\"25\">" +
"<peptide_exclusions><exclusion name=\"Invalid\" regex=\"M\\[\" include=\"T\" match_mod_sequence=\"T\"/></peptide_exclusions></peptide_filter>");
// bad peptide uniqueness mode
AssertEx.DeserializeError<PeptideFilter>("<peptide_filter start=\"0\" min_length=\"100\" max_length=\"100\" min_transtions=\"20\" auto_select=\"false\" unique_by = \"nonsense\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="PeptideModifications"/>.
/// </summary>
[TestMethod]
public void SerializePeptideModificationsTest()
{
// Valid first
AssertEx.DeserializeNoError<PeptideModifications>("<peptide_modifications><static_modifications/></peptide_modifications>");
AssertEx.DeserializeNoError<PeptideModifications>("<peptide_modifications/>");
var mods = AssertEx.Deserialize<PeptideModifications>("<peptide_modifications internal_standard=\"none\"><static_modifications/><heavy_modifications/></peptide_modifications>");
Assert.AreEqual(0, mods.InternalStandardTypes.Count);
mods = AssertEx.Deserialize<PeptideModifications>("<peptide_modifications internal_standard=\"light\"></peptide_modifications>");
Assert.AreEqual(1, mods.InternalStandardTypes.Count);
Assert.AreEqual("light", mods.InternalStandardTypes[0].Name);
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="StaticMod"/>.
/// </summary>
[TestMethod]
public void SerializeStaticModTest()
{
const string structuralModificationType = "structural_modification_type";
const string isotopeModificationType = "isotope_modification_type";
// Valid first
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"R\" terminus=\"C\" formula=\"C2H3ON15PS\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" terminus=\"N\" formula=\"-ON4\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"P\" formula=\"C23 - O N P14\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"P\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Oxidation\" aminoacid=\"M, D\" formula=\"O\" variable=\"true\"/>", true, true, structuralModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Mod\" formula=\"C23N\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"15N\" label_15N=\"true\" />", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Heavy K\" aminoacid=\"K\" label_13C=\"true\" label_15N=\"true\" label_18O=\"true\" label_2H=\"true\"/>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Aqua\" aminoacid=\"K, R\" label_13C=\"true\" label_15N=\"true\" label_18O=\"true\" label_2H=\"true\"/>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss1\" aminoacid=\"T, S\" formula=\"HPO3\"><fragment_loss formula=\"HP3O4\"/></static_modification>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss3\" aminoacid=\"T, S\" formula=\"HPO3\" explicit_decl=\"true\"><fragment_loss formula=\"HP3O4\"/><fragment_loss formula=\"H2O\"/><fragment_loss formula=\"NH3\"/></static_modification>", true, true, isotopeModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\"><potential_loss formula=\"NH3\"/></static_modification>", true, true, structuralModificationType);
AssertEx.DeserializeNoError<StaticMod>("<static_modification name=\"LossInclusion\" aminoacid=\"T, S\" formula=\"HPO3\"><potential_loss formula=\"HP3O4\" inclusion=\"Always\"/><potential_loss formula=\"HP2O3\" inclusion=\"Library\"/><potential_loss formula=\"HP1O2\" inclusion=\"Never\"/></static_modification>", true, true, structuralModificationType);
// Missing parameters
AssertEx.DeserializeError<StaticMod>("<static_modification />");
// Bad amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"X\" formula=\"C23N\" />");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"KR\" formula=\"C23N\" />");
// Bad terminus
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" terminus=\"X\" formula=\"C23N\" />");
// Bad formula
AssertEx.DeserializeError<StaticMod, ArgumentException>("<static_modification name=\"Mod\" aminoacid=\"K\" formula=\"C23NHe2\" />");
// Terminal label without amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"15N\" terminus=\"C\" label_13C=\"true\"/>");
// Formula and labeled atoms
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"15N\" label_15N=\"true\" formula=\"C23N\" />");
// Missing formula and masses
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"R\" />");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"R\" formula=\"\" />");
// Both formula and masses
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"P\" formula=\"C23N\" massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />");
// Bad amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"A, B, C\" />");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" aminoacid=\"DM\" />");
// Variable with no amino acid
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Mod\" variable=\"true\" />");
// Loss only failures
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\" variable=\"true\"><potential_loss formula=\"NH3\"/></static_modification>");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"Loss-only\" aminoacid=\"K, R, Q, N\" explicit_decl=\"true\"><potential_loss formula=\"NH3\"/></static_modification>");
AssertEx.DeserializeError<StaticMod>("<static_modification name=\"LossInclusion\" aminoacid=\"T, S\" formula=\"HPO3\"><potential_loss formula=\"HP3O4\" inclusion=\"Sometimes\"/></static_modification>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="FragmentLoss"/>.
/// </summary>
[TestMethod]
public void SerializeFragmentLossTest()
{
// Valid first
AssertEx.DeserializeNoError<FragmentLoss>("<potential_loss formula=\"H2O\"/>");
AssertEx.DeserializeNoError<FragmentLoss>("<potential_loss formula=\"HCO3\"/>");
AssertEx.DeserializeNoError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"5\"\n" +
" massdiff_average=\"5.1\" />");
// Negative formula
AssertEx.DeserializeError<FragmentLoss>("<potential_loss formula=\"-H2O\"/>");
// Too big formula
AssertEx.DeserializeError<FragmentLoss>("<potential_loss formula=\"N393\"/>");
// Bad formula
AssertEx.DeserializeError<FragmentLoss, ArgumentException>("<potential_loss formula=\"H3Mx5Cl5\"/>");
// Constant mass out of range
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"" + FragmentLoss.MIN_LOSS_MASS / 2 + "\"\n" +
" massdiff_average=\"1\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"1\"\n" +
" massdiff_average=\"" + FragmentLoss.MIN_LOSS_MASS / 2 + "\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"" + (FragmentLoss.MAX_LOSS_MASS + 1) + "\"\n" +
" massdiff_average=\"1\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"1\"\n" +
" massdiff_average=\"" + (FragmentLoss.MAX_LOSS_MASS + 1) + "\" />");
// Missing information
AssertEx.DeserializeError<FragmentLoss>("<potential_loss/>");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_monoisotopic=\"1\" />");
AssertEx.DeserializeError<FragmentLoss>("<potential_loss massdiff_average=\"1\" />");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionPrediction"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionPredictionTest()
{
// Valid first
AssertEx.DeserializeNoError<TransitionPrediction>("<transition_prediction>" +
"<predict_collision_energy name=\"Pass\">" +
"<regressions><regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" /></regressions>" +
"</predict_collision_energy></transition_prediction>");
// Bad mass type
AssertEx.DeserializeError<TransitionPrediction>("<transition_prediction precursor_mass_type=\"Bad\">" +
"<predict_collision_energy name=\"Fail\">" +
"<regressions><regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" /></regressions>" +
"</predict_collision_energy></transition_prediction>");
// No collision energy regression (Allowed during 3.7.1 development)
AssertEx.DeserializeNoError<TransitionPrediction>("<transition_prediction/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="CollisionEnergyRegression"/>.
/// </summary>
[TestMethod]
public void SerializeCollisionEnergyTest()
{
// Valid first
AssertEx.DeserializeNoError<CollisionEnergyRegression>("<predict_collision_energy name=\"Pass\">" +
"<regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" />" +
"</predict_collision_energy>");
AssertEx.DeserializeNoError<CollisionEnergyRegression>("<predict_collision_energy name=\"Pass\">" +
"<regression_ce charge=\"1\" /><regression_ce charge=\"2\" /><regression_ce charge=\"3\" /><regression_ce charge=\"4\" />" +
"</predict_collision_energy>");
// v0.1 format
AssertEx.DeserializeNoError<CollisionEnergyRegression>("<predict_collision_energy name=\"Pass\">" +
"<regressions><regression_ce /></regressions>" +
"</predict_collision_energy>");
// No regressions
AssertEx.DeserializeError<CollisionEnergyRegression>("<predict_collision_energy name=\"Fail\" />");
// Repeated charge
AssertEx.DeserializeError<CollisionEnergyRegression>("<predict_collision_energy name=\"Fail\">" +
"<regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" />" +
"<regression_ce slope=\"0.1\" intercept=\"4.7\" charge=\"2\" />" +
"</predict_collision_energy>");
}
[TestMethod]
public void SerializeCollisionEnergyListTest()
{
XmlSerializer ser = new XmlSerializer(typeof(CollisionEnergyList));
using (TextReader reader = new StringReader(
"<CollisionEnergyList>\n" +
" <predict_collision_energy name=\"Thermo\">\n" +
" <regression_ce charge=\"2\" slope=\"0.034\" intercept=\"3.314\" />\n" +
" <regression_ce charge=\"3\" slope=\"0.044\" intercept=\"3.314\" />\n" +
" </predict_collision_energy>\n" +
" <predict_collision_energy name=\"ABI\">\n" +
" <regression_ce charge=\"2\" slope=\"0.0431\" intercept=\"4.7556\" />\n" +
" </predict_collision_energy>\n" +
"</CollisionEnergyList>"))
{
var listCE = (CollisionEnergyList) ser.Deserialize(reader);
Assert.AreSame(CollisionEnergyList.NONE, listCE[0]);
Assert.AreEqual(listCE.GetDefaults(listCE.RevisionIndexCurrent).Count() + 2, listCE.Count);
Assert.AreEqual(listCE.RevisionIndexCurrent, listCE.RevisionIndex);
foreach (var regressionCE in listCE.GetDefaults(listCE.RevisionIndexCurrent))
{
CollisionEnergyRegression regressionTmp;
Assert.IsTrue(listCE.TryGetValue(regressionCE.GetKey(), out regressionTmp));
Assert.AreEqual(regressionCE, regressionTmp);
}
}
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="DeclusteringPotentialRegression"/>.
/// </summary>
[TestMethod]
public void SerializeDeclusteringPotentialTest()
{
// Valid first
AssertEx.DeserializeNoError<DeclusteringPotentialRegression>("<predict_declustering_potential name=\"Pass\"" +
" slope=\"0.1\" intercept=\"4.7\" />");
AssertEx.DeserializeNoError<DeclusteringPotentialRegression>("<predict_declustering_potential name=\"Pass\" />");
// No name
AssertEx.DeserializeError<DeclusteringPotentialRegression>("<predict_declustering_potential" +
" slope=\"0.1\" intercept=\"4.7\" />");
// Non-numeric parameter
AssertEx.DeserializeError<DeclusteringPotentialRegression>("<predict_declustering_potential name=\"Pass\"" +
" slope=\"X\" intercept=\"4.7\" />");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionFilter"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionFilterTest()
{
// Valid first
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" " +
"include_n_prolene=\"true\" include_c_glu_asp=\"true\" auto_select=\"true\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"1,2,3,4,5,6,7,8,9\" product_charges=\"1,2,3,4,5\" " +
"fragment_range_first=\"(m/z > precursor) - 2\" fragment_range_last=\"start + 4\" " +
"include_n_prolene=\"false\" include_c_glu_asp=\"false\" auto_select=\"false\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"P,Y,Z\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"y,b,c,z,a,x,p\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
// v0.7 measured_ion examples
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\">" +
"<measured_ion name=\"N-terminal to Proline\" cut=\"P\" sense=\"N\"/>" +
"<measured_ion name=\"Reporter Test\" formula=\"C4H2O\" charges=\"1\"/>" +
"</transition_filter>");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" precursor_mz_window=\"" + TransitionFilter.MAX_EXCLUSION_WINDOW + "\"/>");
// Bad charges
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"0\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"0\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2,2\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"3\" product_charges=\"" + (Transition.MAX_PRODUCT_CHARGE + 1) + "\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"" + (TransitionGroup.MAX_PRECURSOR_CHARGE + 1) + "\" product_charges=\"2\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter " +
"fragment_range_first=\"y1\" fragment_range_last=\"last y-ion\" />");
// Bad ion type
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"precursor\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
AssertEx.DeserializeNoError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_types=\"d,w\" fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" />");
// Bad fragments
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"b10\" fragment_range_last=\"last y-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"y1\" fragment_range_last=\"last z-ion\" />");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" />");
// Out of range precursor m/z window
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" precursor_mz_window=\"" + (TransitionFilter.MAX_EXCLUSION_WINDOW*2).ToString(CultureInfo.InvariantCulture) + "\"/>");
AssertEx.DeserializeError<TransitionFilter>("<transition_filter precursor_charges=\"2\" product_charges=\"1\" " +
"fragment_range_first=\"m/z > precursor\" fragment_range_last=\"last y-ion - 3\" precursor_mz_window=\"" + (TransitionFilter.MIN_EXCLUSION_WINDOW/2).ToString(CultureInfo.InvariantCulture) + "\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="MeasuredIon"/>.
/// </summary>
[TestMethod]
public void SerializeMeasuredIonTest()
{
// Valid first
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"C-terminal Glu or Asp restricted\"" +
" cut=\"ED\" no_cut=\"A\" sense=\"C\" min_length=\"" + MeasuredIon.MAX_MIN_FRAGMENT_LENGTH.ToString(CultureInfo.InvariantCulture) + "\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"N-terminal many\"" +
" cut=\"ACPESTID\" no_cut=\"ACPESTID\" sense=\"N\" min_length=\"" + MeasuredIon.MIN_MIN_FRAGMENT_LENGTH.ToString(CultureInfo.InvariantCulture) + "\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\" sense=\"N\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"H4P2O5\" charges=\"1\"/>");
// Old style (as detected by use of "charges" instead of "charge"), mass is assumed to be M-H
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + MeasuredIon.MIN_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + (MeasuredIon.MAX_REPORTER_MASS-2*BioMassCalc.MassProton).ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
// Modern style, mass is assumed to be the actual ion mass (which will decrease by charge*massElectron)
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + (MeasuredIon.MIN_REPORTER_MASS).ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + (MeasuredIon.MAX_REPORTER_MASS).ToString(CultureInfo.InvariantCulture) + "\" charge=\"1\"/>");
AssertEx.DeserializeNoError<MeasuredIon>("<measured_ion name =\"Reporter Formula\" formula = \"H2O\" charges = \"1\" optional = \"true\"/>");
// No name
AssertEx.DeserializeError<MeasuredIon>("<measured_ion" +
" cut=\"P\" sense=\"N\"/>");
// No cut attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" sense=\"N\"/>");
// Invalid cut attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"b\" sense=\"N\"/>");
// Invalid no_cut attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\" no_cut=\"b\" sense=\"N\"/>");
// Missing sense attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\"/>");
// Invalid sense attribute
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Minimal\"" +
" cut=\"P\" sense=\"x\"/>");
// Min length too short
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"C-terminal Glu or Asp restricted\"" +
" cut=\"ED\" no_cut=\"A\" sense=\"C\" min_length=\"" + (MeasuredIon.MIN_MIN_FRAGMENT_LENGTH - 1).ToString(CultureInfo.InvariantCulture) + "\"/>");
// Min length too long
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"C-terminal Glu or Asp restricted\"" +
" cut=\"ED\" no_cut=\"A\" sense=\"C\" min_length=\"" + (MeasuredIon.MAX_MIN_FRAGMENT_LENGTH + 1).ToString(CultureInfo.InvariantCulture) + "\"/>");
// Reporter with bad formulas
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"He3\" charges=\"1\"/>");
// Reporter with formulas producing out of range masses
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"H2\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter formula\"" +
" formula=\"HP230O200\" charges=\"1\"/>");
// Reporter without formula and without both masses
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + MeasuredIon.MIN_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_average=\"" + MeasuredIon.MAX_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
// Reporter without formula and out of range masses
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + (MeasuredIon.MIN_REPORTER_MASS - 0.1).ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + MeasuredIon.MAX_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
AssertEx.DeserializeError<MeasuredIon>("<measured_ion name=\"Reporter numeric\"" +
" mass_monoisotopic=\"" + MeasuredIon.MIN_REPORTER_MASS.ToString(CultureInfo.InvariantCulture) + "\" mass_average=\"" + (MeasuredIon.MAX_REPORTER_MASS + 0.1).ToString(CultureInfo.InvariantCulture) + "\" charges=\"1\"/>");
}
private const string LEGACY_LOW_ACCURACY = "Low Accuracy";
private const string LEGACY_HIGH_ACCURACY = "High Accuracy";
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionInstrument"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionInstrumentTest()
{
// Valid first
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"1503\" />");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" />");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0.4\"/>");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0.001\"/>");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" dynamic_min=\"true\"/>");
// Backward compatibility with v0.7.1
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"None\"/>");
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Single\"/>", false); // Use defaults
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Multiple\"/>", false); // Use defaults
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Single\" precursor_filter=\"0.11\" product_filter_type=\"" +
LEGACY_LOW_ACCURACY + "\" product_filter=\"1\"/>", false);
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_filter_type=\"" +
LEGACY_HIGH_ACCURACY + "\" product_filter=\"10\"/>", false);
// Ignore extra filter values when None specified for precursor filter type
AssertEx.DeserializeNoError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"2000\" dynamic_min=\"true\" precursor_filter_type=\"None\" precursor_filter=\"0.11\" product_filter_type=\"" +
LEGACY_LOW_ACCURACY + "\" product_filter=\"1\"/>", false);
// Empty element
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument />");
// Out of range values
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"-1\" max_mz=\"1503\" />");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"52\" max_mz=\"100\" />");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0\"/>");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" mz_match_tolerance=\"0.65\"/>");
AssertEx.DeserializeError<TransitionInstrument>("<transition_instrument min_mz=\"10\" max_mz=\"5000\" dynamic_min=\"maybe\"/>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="TransitionFullScan"/>.
/// </summary>
[TestMethod]
public void SerializeTransitionFullScanTest()
{
string validLoRes = ToXml((TransitionFullScan.MIN_LO_RES + TransitionFullScan.MAX_LO_RES) / 2);
string validHiRes = ToXml((TransitionFullScan.MIN_HI_RES + TransitionFullScan.MAX_HI_RES) / 2);
string validHiResMz = ToXml((TransitionFullScan.MIN_RES_MZ + TransitionFullScan.MAX_RES_MZ) / 2);
string validPPM = ToXml((TransitionFullScan.MIN_CENTROID_PPM + TransitionFullScan.MAX_CENTROID_PPM) / 2);
// Valid first
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan />");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.qit +"\" " +
"precursor_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.tof + "\" " +
"precursor_res=\"" + validHiRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" precursor_filter=\"0.11\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes+ "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_left_filter=\"5\" precursor_right_filter=\"20\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\"/>"); // Use default res mz
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" precursor_res_mz=\"" + validHiResMz + "\" " +
"acquisition_method=\"" + FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" " +
"acquisition_method=\"" + FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>"); // Use default res mz
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.centroided + "\" " +
"precursor_res=\"" + validPPM + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan product_mass_analyzer=\"" + FullScanMassAnalyzerType.centroided + "\" " +
"product_res=\"" + validPPM + "\"/>");
// Isotope enrichments
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.tof + "\" " +
"precursor_res=\"" + validHiRes + "\">" + VALID_ISOTOPE_ENRICHMENT_XML + "</transition_full_scan>");
// Errors
string overMaxMulti = ToXml(TransitionFullScan.MAX_PRECURSOR_MULTI_FILTER * 2);
string underMinMulti = ToXml(TransitionFullScan.MIN_PRECURSOR_MULTI_FILTER / 2);
string overMaxPPM = ToXml(TransitionFullScan.MAX_CENTROID_PPM * 2);
string underMinPPM = ToXml(TransitionFullScan.MIN_CENTROID_PPM / 2);
string underMinLoRes = ToXml(TransitionFullScan.MIN_LO_RES / 2);
string overMaxLoRes = ToXml(TransitionFullScan.MAX_LO_RES * 2);
string underMinHiRes = ToXml(TransitionFullScan.MIN_HI_RES / 2);
string overMaxHiRes = ToXml(TransitionFullScan.MAX_HI_RES * 2);
string underMinResMz = ToXml(TransitionFullScan.MIN_RES_MZ / 2);
string defaultResMz = ToXml(TransitionFullScan.DEFAULT_RES_MZ);
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"Unknown\" " +
"product_resolution=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
"Unknown" + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_resoltion=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"" + overMaxMulti + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_resoltion=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"" + underMinMulti + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_resoltion=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.DIA + "\" precursor_left_filter=\"5\" precursor_right_filter=\"fail\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + underMinLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + overMaxLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + underMinHiRes + "\" product_res_mz=\"" + defaultResMz + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" product_res=\"" + overMaxHiRes + "\" product_res_mz=\"" + defaultResMz + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" precursor_res_mz=\"" + underMinResMz + "\" " +
"acquisition_method=\"" + FullScanAcquisitionMethod.DIA + "\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" precursor_res=\"" + underMinPPM + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" precursor_res=\"" + overMaxPPM + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" product_res=\"" + underMinPPM + "\"/>");
AssertEx.DeserializeError<TransitionFullScan>("<transition_full_scan acquisition_method=\"" +
FullScanAcquisitionMethod.Targeted + "\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.centroided + "\" product_res=\"" + overMaxPPM + "\"/>");
// With new isolation scheme tag.
AssertEx.DeserializeError<TransitionFullScan>(string.Format(@"
<transition_full_scan product_mass_analyzer=""{0}"" product_resoltion=""{1}"" acquisition_method=""{2}"">
<isolation_scheme name=""test"" precursor_filter=""{3}""/>
</transition_full_scan>",
FullScanMassAnalyzerType.qit, validLoRes, FullScanAcquisitionMethod.DIA, overMaxMulti));
AssertEx.DeserializeError<TransitionFullScan>(string.Format(@"
<transition_full_scan product_mass_analyzer=""{0}"" product_resoltion=""{1}"" acquisition_method=""{2}"">
<isolation_scheme name=""test"" precursor_filter=""{3}""/>
</transition_full_scan>",
FullScanMassAnalyzerType.qit, validLoRes, FullScanAcquisitionMethod.DIA, underMinMulti));
AssertEx.DeserializeError<TransitionFullScan>(string.Format(@"
<transition_full_scan product_mass_analyzer=""{0}"" product_res=""{1}"" product_res_mz=""{2}"" acquisition_method=""{3}"">
<isolation_scheme name=""test"" precursor_left_filter=""5"" precursor_right_filter=""fail""/>
</transition_full_scan>",
FullScanMassAnalyzerType.ft_icr, validHiRes, validHiResMz, FullScanAcquisitionMethod.DIA));
// Check backward compatibility reading old "Single" and "Multiple" filter types.
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Single\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\"/>"); // Use defaults
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_filter=\"0.11\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_left_filter=\"5\" precursor_right_filter=\"20\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\" product_res_mz=\"" + validHiResMz + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.ft_icr + "\" product_res=\"" + validHiRes + "\"/>"); // Use default res mz
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" precursor_res_mz=\"" + validHiResMz + "\" " +
"precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>");
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" +
FullScanMassAnalyzerType.orbitrap + "\" precursor_res=\"" + validHiRes + "\" " +
"precursor_filter_type=\"Multiple\" precursor_filter=\"2\" product_mass_analyzer=\"" +
FullScanMassAnalyzerType.qit + "\" product_res=\"" + validLoRes + "\"/>"); // Use default res mz
// Isotope enrichments with low res
AssertEx.DeserializeNoError<TransitionFullScan>("<transition_full_scan precursor_mass_analyzer=\"" + FullScanMassAnalyzerType.qit + "\" " +
"precursor_res=\"" + validLoRes + "\">" + VALID_ISOTOPE_ENRICHMENT_XML + "</transition_full_scan>");
}
/// <summary>
/// Test error handling in XML deserialization of <see cref="IsotopeEnrichments"/>.
/// </summary>
[TestMethod]
public void SerializeIsotopeEnrichmentsTest()
{
// Valid first
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"H'\">0.9</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"C'\">" + ToXml(IsotopeEnrichmentItem.MAX_ATOM_PERCENT_ENRICHMENT) + "</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"N'\">" + ToXml(IsotopeEnrichmentItem.MAX_ATOM_PERCENT_ENRICHMENT / 2) + "</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"O'\">" + ToXml(IsotopeEnrichmentItem.MIN_ATOM_PERCENT_ENRICHMENT) + "</atom_percent_enrichment>");
AssertEx.DeserializeNoError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"O"\">" + ToXml(IsotopeEnrichmentItem.MIN_ATOM_PERCENT_ENRICHMENT * 2) + "</atom_percent_enrichment>");
// Invalid
for (char c = 'A'; c <= 'Z'; c++)
{
AssertEx.DeserializeError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"" + c + "\">0.9</atom_percent_enrichment>");
}
AssertEx.DeserializeError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"N'\">" + ToXml(IsotopeEnrichmentItem.MAX_ATOM_PERCENT_ENRICHMENT+1) + "</atom_percent_enrichment>");
AssertEx.DeserializeError<IsotopeEnrichmentItem>("<atom_percent_enrichment symbol=\"O"\">" + ToXml(IsotopeEnrichmentItem.MIN_ATOM_PERCENT_ENRICHMENT-1) + "</atom_percent_enrichment>");
// Valid enrichments
AssertEx.DeserializeNoError<IsotopeEnrichments>(VALID_ISOTOPE_ENRICHMENT_XML);
// Invalid enrichments
AssertEx.DeserializeNoError<IsotopeEnrichments>("<isotope_enrichments name=\"Cambridge Isotope Labs\">" +
"<atom_percent_enrichment symbol=\"H'\">0.9</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"C'\">0.91</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"N'\">0.92</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"O'\">0.93</atom_percent_enrichment>" +
"</isotope_enrichments>"); // Missing label atom O"
string expected = null;
var enrichments = AssertEx.RoundTrip(IsotopeEnrichmentsList.GetDefault(), ref expected);
foreach (var symbol in BioMassCalc.HeavySymbols)
{
string isotopeSymbol = symbol;
double expectedEnrichment = BioMassCalc.GetIsotopeEnrichmentDefault(isotopeSymbol);
// Make sure the distribution in the IsotopeAbundances object got set correctly
double heavyMass = BioMassCalc.GetHeavySymbolMass(isotopeSymbol);
Assert.IsTrue(enrichments.IsotopeAbundances.ContainsKey(isotopeSymbol));
MassDistribution massDistribution;
Assert.IsTrue(enrichments.IsotopeAbundances.TryGetValue(isotopeSymbol, out massDistribution));
foreach (var elementIsotopeMass in massDistribution.Keys)
{
// If the heavy mass is one of the element's stable isotopes, then it should match exactly
// If it's not a stable isotope, then it must be at least some number close to 1 Dalton away.
if (Math.Abs(elementIsotopeMass - heavyMass) < .9)
{
Assert.AreEqual(elementIsotopeMass, heavyMass);
}
}
// Make sure the enrichments are set correctly
int indexEnrichment = enrichments.Enrichments.IndexOf(item => Equals(item.IsotopeSymbol, isotopeSymbol));
Assert.AreNotEqual(-1, indexEnrichment);
Assert.AreEqual(expectedEnrichment, enrichments.Enrichments[indexEnrichment].AtomPercentEnrichment);
}
foreach (var symDist in BioMassCalc.DEFAULT_ABUNDANCES)
{
var distDefault = symDist.Value;
var distEnriched = enrichments.IsotopeAbundances[symDist.Key];
AssertEx.AreEqualDeep(distDefault.ToArray(), distEnriched.ToArray());
}
}
/// <summary>
/// Test serialization of ion mobility data
/// </summary>
[TestMethod]
public void SerializeIonMobilityTest()
{
// Check using drift time predictor without measured drift times
const string predictor = "<predict_drift_time name=\"test\" resolving_power=\"100\"> <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/>" +
"<regression_dt charge=\"1\" slope=\"1\" intercept=\"0\"/></predict_drift_time>";
const string predictorNoRegression = "<predict_drift_time name=\"test\" resolving_power=\"100\"> <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/></predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor);
var pred = AssertEx.Deserialize<IonMobilityPredictor>(predictor);
Assert.AreEqual("db.imdb", pred.IonMobilityLibrary.PersistencePath);
Assert.AreEqual("scaled", pred.IonMobilityLibrary.Name);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred.WindowWidthCalculator.ResolvingPower);
var driftTimeMax = 5000;
var driftTime = 2000;
Assert.AreEqual(40, pred.WindowWidthCalculator.WidthAt(driftTime, driftTimeMax));
Assert.AreEqual(1, pred.GetRegressionLine(1).Slope);
Assert.AreEqual(0, pred.GetRegressionLine(1).Intercept);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor.Replace("100", "0"), Resources.DriftTimePredictor_Validate_Resolving_power_must_be_greater_than_0_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor.Replace("db.imdb", ""), Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_provide_a_filename_for_the_library_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictorNoRegression, Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_include_per_charge_regression_values_);
// Check using drift time predictor with only measured drift times, and no high energy drift offset
const string predictor1 = "<predict_drift_time name=\"test1\" resolving_power=\"100\"><measured_dt modified_sequence=\"JLMN\" charge=\"1\" drift_time=\"17.0\" /> </predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor1);
var pred1 = AssertEx.Deserialize<IonMobilityPredictor>(predictor1);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred1.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(0, pred1.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(0, pred1.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred1.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(17.0, pred1.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).IonMobility.Mobility);
Assert.AreEqual(17.0, pred1.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).GetHighEnergyDriftTimeMsec() ?? 0); // Apply the high energy offset
Assert.IsFalse(pred1.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that charge state
Assert.IsFalse(pred1.GetMeasuredIonMobility(new LibKey("LMNJK", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that peptide
// Check using drift time predictor with only measured drift times, and a high energy scan drift time offset
const string predictor2 = "<predict_drift_time name=\"test2\" resolving_power=\"100\"><measured_dt modified_sequence=\"JLMN\" charge=\"1\" drift_time=\"17.0\" collisional_cross_section=\"0\" high_energy_drift_time_offset=\"-1.0\"/> </predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor2);
var pred2 = AssertEx.Deserialize<IonMobilityPredictor>(predictor2);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred2.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(0, pred2.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(0, pred2.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred2.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(17.0, pred2.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).IonMobility.Mobility);
Assert.AreEqual(16.0, pred2.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.SINGLY_PROTONATED)).GetHighEnergyDriftTimeMsec() ?? 0); // Apply the high energy offset
Assert.IsFalse(pred2.GetMeasuredIonMobility(new LibKey("JLMN", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that charge state
Assert.IsFalse(pred2.GetMeasuredIonMobility(new LibKey("LMNJK", Adduct.QUINTUPLY_PROTONATED)).IonMobility.HasValue); // Should not find a value for that peptide
// Check using drift time predictor with only measured drift times, and a high energy scan drift time offset, and linear width
string predictor3 = "<predict_drift_time name=\"test\" peak_width_calc_type=\"resolving_power\" resolving_power=\"100\" width_at_dt_zero=\"20\" width_at_dt_max=\"500\"> <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/>" +
"<regression_dt charge=\"1\" slope=\"1\" intercept=\"0\"/></predict_drift_time>";
string predictor3NoRegression = "<predict_drift_time name=\"test\" peak_width_calc_type=\"resolving_power\" resolving_power=\"100\" width_at_dt_zero=\"100\" width_at_dt_max=\"500\" > <ion_mobility_library name=\"scaled\" database_path=\"db.imdb\"/></predict_drift_time>";
AssertEx.DeserializeNoError<IonMobilityPredictor>(predictor3);
pred = AssertEx.Deserialize<IonMobilityPredictor>(predictor3);
Assert.AreEqual("db.imdb", pred.IonMobilityLibrary.PersistencePath);
Assert.AreEqual("scaled", pred.IonMobilityLibrary.Name);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.resolving_power, pred.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(40, pred.WindowWidthCalculator.WidthAt(driftTime, driftTimeMax));
var widthAtDt0 = 20;
var widthAtDtMax = 500;
Assert.AreEqual(widthAtDt0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(widthAtDtMax, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(1, pred.GetRegressionLine(1).Slope);
Assert.AreEqual(0, pred.GetRegressionLine(1).Intercept);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("100", "0"), Resources.DriftTimePredictor_Validate_Resolving_power_must_be_greater_than_0_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("db.imdb", ""), Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_provide_a_filename_for_the_library_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3NoRegression, Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_include_per_charge_regression_values_);
predictor3 = predictor3.Replace("\"resolving_power\"", "\"linear_range\"");
predictor3NoRegression = predictor3NoRegression.Replace("\"resolving_power\"", "\"linear_range\"");
pred = AssertEx.Deserialize<IonMobilityPredictor>(predictor3);
Assert.AreEqual("db.imdb", pred.IonMobilityLibrary.PersistencePath);
Assert.AreEqual("scaled", pred.IonMobilityLibrary.Name);
Assert.AreEqual(IonMobilityWindowWidthCalculator.IonMobilityPeakWidthType.linear_range, pred.WindowWidthCalculator.PeakWidthMode);
Assert.AreEqual(widthAtDt0, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueZero);
Assert.AreEqual(widthAtDtMax, pred.WindowWidthCalculator.PeakWidthAtIonMobilityValueMax);
Assert.AreEqual(100, pred.WindowWidthCalculator.ResolvingPower);
Assert.AreEqual(1, pred.GetRegressionLine(1).Slope);
Assert.AreEqual(0, pred.GetRegressionLine(1).Intercept);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("20", "-1"), Resources.DriftTimeWindowWidthCalculator_Validate_Peak_width_must_be_non_negative_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("500", "-1"), Resources.DriftTimeWindowWidthCalculator_Validate_Peak_width_must_be_non_negative_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3.Replace("db.imdb", ""), Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_provide_a_filename_for_the_library_);
AssertEx.DeserializeError<IonMobilityPredictor>(predictor3NoRegression, Resources.IonMobilityPredictor_Validate_Ion_mobility_predictors_using_an_ion_mobility_library_must_include_per_charge_regression_values_);
Assert.AreEqual(widthAtDt0 + (widthAtDtMax-widthAtDt0)*driftTime/driftTimeMax, pred.WindowWidthCalculator.WidthAt(driftTime, driftTimeMax));
}
private const string VALID_ISOTOPE_ENRICHMENT_XML =
"<isotope_enrichments name=\"Cambridge Isotope Labs\">" +
"<atom_percent_enrichment symbol=\"H'\">0.9</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"C'\">0.91</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"N'\">0.92</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"O'\">0.93</atom_percent_enrichment>" +
"<atom_percent_enrichment symbol=\"O"\">0.94</atom_percent_enrichment>" +
"</isotope_enrichments>";
private static string ToXml(double value)
{
return value.ToString(CultureInfo.InvariantCulture);
}
private static void CheckSettingsList<TItem>(SettingsList<TItem> target, SettingsList<TItem> copy)
where TItem : IKeyContainer<string>, IXmlSerializable
{
CheckSettingsList(target, copy, false);
}
private static void CheckSettingsList<TItem>(SettingsList<TItem> target, SettingsList<TItem> copy, bool firstSame)
where TItem : IKeyContainer<string>, IXmlSerializable
{
Assert.AreEqual(target.Count, copy.Count);
for (int i = 0; i < target.Count; i++)
{
if (firstSame && i == 0)
Assert.AreSame(target[i], copy[i]);
else
AssertEx.Cloned(target[i], copy[i]);
}
}
}
}
| 1 | 12,410 | Just ignorance on my part, but what is the significance of the change from C23NHe2 to C23NHx2? | ProteoWizard-pwiz | .cs |
@@ -486,6 +486,12 @@ class histogram(Operation):
bin_range = param.NumericTuple(default=None, length=2, doc="""
Specifies the range within which to compute the bins.""")
+ bins = param.ClassSelector(default=None, class_=(np.ndarray, list), doc="""
+ An explicit set of bin edges.""")
+
+ cumulative = param.Boolean(default=False, doc="""
+ Whether to compute the cumulative histogram""")
+
dimension = param.String(default=None, doc="""
Along which dimension of the Element to compute the histogram.""")
| 1 | """
Collection of either extremely generic or simple Operation
examples.
"""
from __future__ import division
import numpy as np
import param
from param import _is_number
from ..core import (Operation, NdOverlay, Overlay, GridMatrix,
HoloMap, Dataset, Element, Collator, Dimension)
from ..core.data import ArrayInterface, DictInterface
from ..core.util import (group_sanitizer, label_sanitizer, pd,
basestring, datetime_types, isfinite, dt_to_int)
from ..element.chart import Histogram, Scatter
from ..element.raster import Raster, Image, RGB, QuadMesh
from ..element.path import Contours, Polygons
from ..element.util import categorical_aggregate2d # noqa (API import)
from ..streams import RangeXY
column_interfaces = [ArrayInterface, DictInterface]
if pd:
from ..core.data import PandasInterface
column_interfaces.append(PandasInterface)
def identity(x,k): return x
class operation(Operation):
"""
The most generic operation that wraps any callable into an
Operation. The callable needs to accept an HoloViews
component and a key (that may be ignored) and must return a new
HoloViews component.
This class may be useful for turning a HoloViews method into an
operation to define as compositor operation. For instance, the
following definition:
operation.instance(op=lambda x, k: x.collapse(np.subtract))
Could be used to implement a collapse operation to subtracts the
data between Rasters in an Overlay.
"""
output_type = param.Parameter(None, doc="""
The output element type which may be None to disable type
checking.
May be used to declare useful information to other code in
HoloViews e.g required for tab-completion support of operations
registered with compositors.""")
group = param.String(default='Operation', doc="""
The group assigned to the result after having applied the
operator.""")
op = param.Callable(default=identity, doc="""
The operation used to generate a new HoloViews object returned
by the operation. By default, the identity operation is
applied.""")
def _process(self, view, key=None):
retval = self.p.op(view, key)
if (self.p.output_type is not None):
assert isinstance(retval, self.p.output_type), \
"Return value does not match the declared output type."
return retval.relabel(group=self.p.group)
class factory(Operation):
"""
Simple operation that constructs any element that accepts some
other element as input. For instance, RGB and HSV elements can be
created from overlays of Image elements.
"""
output_type = param.Parameter(RGB, doc="""
The output type of the factor operation.
By default, if three overlaid Images elements are supplied,
the corresponding RGB element will be returned. """)
def _process(self, view, key=None):
return self.p.output_type(view)
class chain(Operation):
"""
Defining an Operation chain is an easy way to define a new
Operation from a series of existing ones. The argument is a
list of Operation (or Operation instances) that are
called in sequence to generate the returned element.
chain(operations=[gradient, threshold.instance(level=2)])
This operation can accept an Image instance and would first
compute the gradient before thresholding the result at a level of
2.0.
Instances are only required when arguments need to be passed to
individual operations so the resulting object is a function over a
single argument.
"""
output_type = param.Parameter(Image, doc="""
The output type of the chain operation. Must be supplied if
the chain is to be used as a channel operation.""")
group = param.String(default='Chain', doc="""
The group assigned to the result after having applied the chain.""")
operations = param.List(default=[], class_=Operation, doc="""
A list of Operations (or Operation instances)
that are applied on the input from left to right..""")
def _process(self, view, key=None):
processed = view
for operation in self.p.operations:
processed = operation.process_element(processed, key,
input_ranges=self.p.input_ranges)
return processed.clone(group=self.p.group)
class transform(Operation):
"""
Generic Operation to transform an input Image or RGBA
element into an output Image. The transformation is defined by
the supplied callable that accepts the data of the input Image
(typically a numpy array) and returns the transformed data of the
output Image.
This operator is extremely versatile; for instance, you could
implement an alternative to the explicit threshold operator with:
operator=lambda x: np.clip(x, 0, 0.5)
Alternatively, you can implement a transform computing the 2D
autocorrelation using the scipy library with:
operator=lambda x: scipy.signal.correlate2d(x, x)
"""
output_type = Image
group = param.String(default='Transform', doc="""
The group assigned to the result after applying the
transform.""")
operator = param.Callable(doc="""
Function of one argument that transforms the data in the input
Image to the data in the output Image. By default, acts as
the identity function such that the output matches the input.""")
def _process(self, img, key=None):
processed = (img.data if not self.p.operator
else self.p.operator(img.data))
return img.clone(processed, group=self.p.group)
class image_overlay(Operation):
"""
Operation to build a overlay of images to a specification from a
subset of the required elements.
This is useful for reordering the elements of an overlay,
duplicating layers of an overlay or creating blank image elements
in the appropriate positions.
For instance, image_overlay may build a three layered input
suitable for the RGB factory operation even if supplied with one
or two of the required channels (creating blank channels for the
missing elements).
Note that if there is any ambiguity regarding the match, the
strongest match will be used. In the case of a tie in match
strength, the first layer in the input is used. One successful
match is always required.
"""
output_type = Overlay
spec = param.String(doc="""
Specification of the output Overlay structure. For instance:
Image.R * Image.G * Image.B
Will ensure an overlay of this structure is created even if
(for instance) only (Image.R * Image.B) is supplied.
Elements in the input overlay that match are placed in the
appropriate positions and unavailable specification elements
are created with the specified fill group.""")
fill = param.Number(default=0)
default_range = param.Tuple(default=(0,1), doc="""
The default range that will be set on the value_dimension of
any automatically created blank image elements.""")
group = param.String(default='Transform', doc="""
The group assigned to the resulting overlay.""")
@classmethod
def _match(cls, el, spec):
"Return the strength of the match (None if no match)"
spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))
if not isinstance(el, Image) or spec_dict['type'] != 'Image':
raise NotImplementedError("Only Image currently supported")
sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}
strength = 1
for key in ['group', 'label']:
attr_value = sanitizers[key](getattr(el, key))
if key in spec_dict:
if spec_dict[key] != attr_value: return None
strength += 1
return strength
def _match_overlay(self, raster, overlay_spec):
"""
Given a raster or input overlay, generate a list of matched
elements (None if no match) and corresponding tuple of match
strength values.
"""
ordering = [None]*len(overlay_spec) # Elements to overlay
strengths = [0]*len(overlay_spec) # Match strengths
elements = raster.values() if isinstance(raster, Overlay) else [raster]
for el in elements:
for pos in range(len(overlay_spec)):
strength = self._match(el, overlay_spec[pos])
if strength is None: continue # No match
elif (strength <= strengths[pos]): continue # Weaker match
else: # Stronger match
ordering[pos] = el
strengths[pos] = strength
return ordering, strengths
def _process(self, raster, key=None):
specs = tuple(el.strip() for el in self.p.spec.split('*'))
ordering, strengths = self._match_overlay(raster, specs)
if all(el is None for el in ordering):
raise Exception("The image_overlay operation requires at least one match")
completed = []
strongest = ordering[np.argmax(strengths)]
for el, spec in zip(ordering, specs):
if el is None:
spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))
el = Image(np.ones(strongest.data.shape) * self.p.fill,
group=spec_dict.get('group','Image'),
label=spec_dict.get('label',''))
el.vdims[0].range = self.p.default_range
completed.append(el)
return np.prod(completed)
class threshold(Operation):
"""
Threshold a given Image whereby all values higher than a given
level map to the specified high value and all values lower than
that level map to the specified low value.
"""
output_type = Image
level = param.Number(default=0.5, doc="""
The value at which the threshold is applied. Values lower than
the threshold map to the 'low' value and values above map to
the 'high' value.""")
high = param.Number(default=1.0, doc="""
The value given to elements greater than (or equal to) the
threshold.""")
low = param.Number(default=0.0, doc="""
The value given to elements below the threshold.""")
group = param.String(default='Threshold', doc="""
The group assigned to the thresholded output.""")
def _process(self, matrix, key=None):
if not isinstance(matrix, Image):
raise TypeError("The threshold operation requires a Image as input.")
arr = matrix.data
high = np.ones(arr.shape) * self.p.high
low = np.ones(arr.shape) * self.p.low
thresholded = np.where(arr > self.p.level, high, low)
return matrix.clone(thresholded, group=self.p.group)
class gradient(Operation):
"""
Compute the gradient plot of the supplied Image.
If the Image value dimension is cyclic, the smallest step is taken
considered the cyclic range
"""
output_type = Image
group = param.String(default='Gradient', doc="""
The group assigned to the output gradient matrix.""")
def _process(self, matrix, key=None):
if len(matrix.vdims) != 1:
raise ValueError("Input matrix to gradient operation must "
"have single value dimension.")
matrix_dim = matrix.vdims[0]
data = np.flipud(matrix.dimension_values(matrix_dim, flat=False))
r, c = data.shape
if matrix_dim.cyclic and (None in matrix_dim.range):
raise Exception("Cyclic range must be specified to compute "
"the gradient of cyclic quantities")
cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)
if cyclic_range is not None:
# shift values such that wrapping works ok
data = data - matrix_dim.range[0]
dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]
dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]
if cyclic_range is not None: # Wrap into the specified range
# Convert negative differences to an equivalent positive value
dx = dx % cyclic_range
dy = dy % cyclic_range
#
# Prefer small jumps
dx_negatives = dx - cyclic_range
dy_negatives = dy - cyclic_range
dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)
dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)
return Image(np.sqrt(dx * dx + dy * dy), bounds=matrix.bounds, group=self.p.group)
class convolve(Operation):
"""
Apply a convolution to an overlay using the top layer as the
kernel for convolving the bottom layer. Both Image elements in
the input overlay should have a single value dimension.
"""
output_type = Image
group = param.String(default='Convolution', doc="""
The group assigned to the convolved output.""")
kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc="""
A 2-dimensional slice of the kernel layer to use in the
convolution in lbrt (left, bottom, right, top) format. By
default, no slicing is applied.""")
def _process(self, overlay, key=None):
if len(overlay) != 2:
raise Exception("Overlay must contain at least to items.")
[target, kernel] = overlay.get(0), overlay.get(1)
if len(target.vdims) != 1:
raise Exception("Convolution requires inputs with single value dimensions.")
xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])
yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])
k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data
data = np.flipud(target.dimension_values(2, flat=False))
fft1 = np.fft.fft2(data)
fft2 = np.fft.fft2(k, s=data.shape)
convolved_raw = np.fft.ifft2(fft1 * fft2).real
k_rows, k_cols = k.shape
rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)
convolved = rolled / float(k.sum())
return Image(convolved, bounds=target.bounds, group=self.p.group)
class contours(Operation):
"""
Given a Image with a single channel, annotate it with contour
lines for a given set of contour levels.
The return is an NdOverlay with a Contours layer for each given
level, overlaid on top of the input Image.
"""
output_type = Overlay
levels = param.ClassSelector(default=10, class_=(list, int), doc="""
A list of scalar values used to specify the contour levels.""")
group = param.String(default='Level', doc="""
The group assigned to the output contours.""")
filled = param.Boolean(default=False, doc="""
Whether to generate filled contours""")
overlaid = param.Boolean(default=False, doc="""
Whether to overlay the contour on the supplied Element.""")
def _process(self, element, key=None):
try:
from matplotlib.contour import QuadContourSet
from matplotlib.axes import Axes
from matplotlib.figure import Figure
except ImportError:
raise ImportError("contours operation requires matplotlib.")
extent = element.range(0) + element.range(1)[::-1]
if type(element) is Raster:
data = [np.flipud(element.data)]
elif isinstance(element, Image):
data = [np.flipud(element.dimension_values(2, flat=False))]
elif isinstance(element, QuadMesh):
data = (element.dimension_values(0, False, flat=False),
element.dimension_values(1, False, flat=False),
element.dimension_values(2, flat=False))
if isinstance(self.p.levels, int):
levels = self.p.levels+2 if self.p.filled else self.p.levels+3
zmin, zmax = element.range(2)
levels = np.linspace(zmin, zmax, levels)
else:
levels = self.p.levels
xdim, ydim = element.dimensions('key', label=True)
fig = Figure()
ax = Axes(fig, [0, 0, 1, 1])
contour_set = QuadContourSet(ax, *data, filled=self.p.filled,
extent=extent, levels=levels)
if self.p.filled:
contour_type = Polygons
else:
contour_type = Contours
vdims = element.vdims[:1]
paths = []
empty = np.full((1, 2), np.NaN)
for level, cset in zip(contour_set.get_array(), contour_set.collections):
subpaths = []
for path in cset.get_paths():
if path.codes is None:
subpaths.append(path.vertices)
else:
subpaths += np.split(path.vertices, np.where(path.codes==1)[0][1:])
if len(subpaths):
subpath = np.concatenate([p for sp in subpaths for p in (sp, empty)][:-1])
else:
subpath = np.array([])
paths.append({(xdim, ydim): subpath, element.vdims[0].name: level})
contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims)
if self.p.overlaid:
contours = element * contours
return contours
class histogram(Operation):
"""
Returns a Histogram of the input element data, binned into
num_bins over the bin_range (if specified) along the specified
dimension.
"""
bin_range = param.NumericTuple(default=None, length=2, doc="""
Specifies the range within which to compute the bins.""")
dimension = param.String(default=None, doc="""
Along which dimension of the Element to compute the histogram.""")
frequency_label = param.String(default='{dim} Frequency', doc="""
Format string defining the label of the frequency dimension of the Histogram.""")
groupby = param.ClassSelector(default=None, class_=(basestring, Dimension), doc="""
Defines a dimension to group the Histogram returning an NdOverlay of Histograms.""")
individually = param.Boolean(default=True, doc="""
Specifies whether the histogram will be rescaled for each Element in a UniformNdMapping.""")
log = param.Boolean(default=False, doc="""
Whether to use base 10 logarithmic samples for the bin edges.""")
mean_weighted = param.Boolean(default=False, doc="""
Whether the weighted frequencies are averaged.""")
normed = param.ObjectSelector(default=True,
objects=[True, False, 'integral', 'height'],
doc="""
Controls normalization behavior. If `True` or `'integral'`, then
`density=True` is passed to np.histogram, and the distribution
is normalized such that the integral is unity. If `False`,
then the frequencies will be raw counts. If `'height'`, then the
frequencies are normalized such that the max bin height is unity.""")
nonzero = param.Boolean(default=False, doc="""
Whether to use only nonzero values when computing the histogram""")
num_bins = param.Integer(default=20, doc="""
Number of bins in the histogram .""")
weight_dimension = param.String(default=None, doc="""
Name of the dimension the weighting should be drawn from""")
style_prefix = param.String(default=None, allow_None=None, doc="""
Used for setting a common style for histograms in a HoloMap or AdjointLayout.""")
def _process(self, view, key=None):
if self.p.groupby:
if not isinstance(view, Dataset):
raise ValueError('Cannot use histogram groupby on non-Dataset Element')
grouped = view.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)
self.p.groupby = None
return grouped.map(self._process, Dataset)
if self.p.dimension:
selected_dim = self.p.dimension
else:
selected_dim = [d.name for d in view.vdims + view.kdims][0]
data = np.array(view.dimension_values(selected_dim))
if self.p.nonzero:
mask = data > 0
data = data[mask]
if self.p.weight_dimension:
weights = np.array(view.dimension_values(self.p.weight_dimension))
if self.p.nonzero:
weights = weights[mask]
else:
weights = None
data = data[isfinite(data)]
hist_range = self.p.bin_range or view.range(selected_dim)
# Avoids range issues including zero bin range and empty bins
if hist_range == (0, 0) or any(not isfinite(r) for r in hist_range):
hist_range = (0, 1)
datetimes = False
steps = self.p.num_bins + 1
start, end = hist_range
if data.dtype.kind == 'M' or (data.dtype.kind == 'O' and isinstance(data[0], datetime_types)):
start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns')
datetimes = True
data = data.astype('datetime64[ns]').astype('int64') * 1000.
hist_range = start, end
if self.p.log:
bin_min = max([abs(start), data[data>0].min()])
edges = np.logspace(np.log10(bin_min), np.log10(end), steps)
else:
edges = np.linspace(start, end, steps)
normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed
if len(data):
if normed:
# This covers True, 'height', 'integral'
hist, edges = np.histogram(data, density=True, range=(start, end),
weights=weights, bins=edges)
if normed=='height':
hist /= hist.max()
else:
hist, edges = np.histogram(data, normed=normed, range=(start, end),
weights=weights, bins=edges)
if self.p.weight_dimension and self.p.mean_weighted:
hist_mean, _ = np.histogram(data, density=False, range=(start, end),
bins=self.p.num_bins)
hist /= hist_mean
else:
hist = np.zeros(self.p.num_bins)
hist[np.isnan(hist)] = 0
if datetimes:
edges = (edges/10e5).astype('datetime64[us]')
params = {}
if self.p.weight_dimension:
params['vdims'] = [view.get_dimension(self.p.weight_dimension)]
else:
label = self.p.frequency_label.format(dim=selected_dim)
params['vdims'] = [Dimension('{}_frequency'.format(selected_dim),
label=label)]
if view.group != view.__class__.__name__:
params['group'] = view.group
return Histogram((hist, edges), kdims=[view.get_dimension(selected_dim)],
label=view.label, **params)
class decimate(Operation):
"""
Decimates any column based Element to a specified number of random
rows if the current view defined by the x_range and y_range
contains more than max_samples. By default the operation returns a
DynamicMap with a RangeXY stream allowing dynamic downsampling.
"""
dynamic = param.Boolean(default=True, doc="""
Enables dynamic processing by default.""")
link_inputs = param.Boolean(default=True, doc="""
By default, the link_inputs parameter is set to True so that
when applying shade, backends that support linked streams
update RangeXY streams on the inputs of the shade operation.""")
max_samples = param.Integer(default=5000, doc="""
Maximum number of samples to display at the same time.""")
random_seed = param.Integer(default=42, doc="""
Seed used to initialize randomization.""")
streams = param.List(default=[RangeXY], doc="""
List of streams that are applied if dynamic=True, allowing
for dynamic interaction with the plot.""")
x_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max x-value. Auto-ranges
if set to None.""")
y_range = param.NumericTuple(default=None, length=2, doc="""
The x_range as a tuple of min and max y-value. Auto-ranges
if set to None.""")
def _process_layer(self, element, key=None):
if not isinstance(element, Dataset):
raise ValueError("Cannot downsample non-Dataset types.")
if element.interface not in column_interfaces:
element = element.clone(tuple(element.columns().values()))
xstart, xend = self.p.x_range if self.p.x_range else element.range(0)
ystart, yend = self.p.y_range if self.p.y_range else element.range(1)
# Slice element to current ranges
xdim, ydim = element.dimensions(label=True)[0:2]
sliced = element.select(**{xdim: (xstart, xend),
ydim: (ystart, yend)})
if len(sliced) > self.p.max_samples:
prng = np.random.RandomState(self.p.random_seed)
return element.iloc[prng.choice(len(sliced), self.p.max_samples, False)]
return sliced
def _process(self, element, key=None):
return element.map(self._process_layer, Element)
class interpolate_curve(Operation):
"""
Resamples a Curve using the defined interpolation method, e.g.
to represent changes in y-values as steps.
"""
interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',
'steps-post', 'linear'],
default='steps-mid', doc="""
Controls the transition point of the step along the x-axis.""")
@classmethod
def pts_to_prestep(cls, x, values):
steps = np.zeros(2 * len(x) - 1)
value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)
steps[0::2] = x
steps[1::2] = steps[0:-2:2]
val_arrays = []
for v, s in zip(values, value_steps):
s[0::2] = v
s[1::2] = s[2::2]
val_arrays.append(s)
return steps, tuple(val_arrays)
@classmethod
def pts_to_midstep(cls, x, values):
steps = np.zeros(2 * len(x))
value_steps = tuple(np.empty(2 * len(x), dtype=v.dtype) for v in values)
steps[1:-1:2] = steps[2::2] = x[:-1] + (x[1:] - x[:-1])/2
steps[0], steps[-1] = x[0], x[-1]
val_arrays = []
for v, s in zip(values, value_steps):
s[0::2] = v
s[1::2] = s[0::2]
val_arrays.append(s)
return steps, tuple(val_arrays)
@classmethod
def pts_to_poststep(cls, x, values):
steps = np.zeros(2 * len(x) - 1)
value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)
steps[0::2] = x
steps[1::2] = steps[2::2]
val_arrays = []
for v, s in zip(values, value_steps):
s[0::2] = v
s[1::2] = s[0:-2:2]
val_arrays.append(s)
return steps, tuple(val_arrays)
def _process_layer(self, element, key=None):
INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,
'steps-mid': self.pts_to_midstep,
'steps-post': self.pts_to_poststep}
if self.p.interpolation not in INTERPOLATE_FUNCS:
return element
x = element.dimension_values(0)
dtype = x.dtype
is_datetime = dtype.kind == 'M' or isinstance(x[0], datetime_types)
if is_datetime:
dt_type = dtype if dtype.kind == 'M' else 'datetime64[ns]'
x = x.astype(dt_type).astype('int64')
dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:])
xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x.astype('f'), dvals)
if is_datetime:
xs = xs.astype(dt_type)
return element.clone((xs,)+dvals)
def _process(self, element, key=None):
return element.map(self._process_layer, Element)
#==================#
# Other operations #
#==================#
class collapse(Operation):
"""
Given an overlay of Element types, collapse into single Element
object using supplied function. Collapsing aggregates over the
key dimensions of each object applying the supplied fn to each group.
This is an example of an Operation that does not involve
any Raster types.
"""
fn = param.Callable(default=np.mean, doc="""
The function that is used to collapse the curve y-values for
each x-value.""")
def _process(self, overlay, key=None):
if isinstance(overlay, NdOverlay):
collapse_map = HoloMap(overlay)
else:
collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})
return collapse_map.collapse(function=self.p.fn)
class gridmatrix(param.ParameterizedFunction):
"""
The gridmatrix operation takes an Element or HoloMap
of Elements as input and creates a GridMatrix object,
which plots each dimension in the Element against
each other dimension. This provides a very useful
overview of high-dimensional data and is inspired
by pandas and seaborn scatter_matrix implementations.
"""
chart_type = param.Parameter(default=Scatter, doc="""
The Element type used to display bivariate distributions
of the data.""")
diagonal_type = param.Parameter(default=None, doc="""
The Element type along the diagonal, may be a Histogram or any
other plot type which can visualize a univariate distribution.
This parameter overrides diagonal_operation.""")
diagonal_operation = param.Parameter(default=histogram, doc="""
The operation applied along the diagonal, may be a histogram-operation
or any other function which returns a viewable element.""")
overlay_dims = param.List(default=[], doc="""
If a HoloMap is supplied this will allow overlaying one or
more of it's key dimensions.""")
def __call__(self, data, **params):
p = param.ParamOverrides(self, params)
if isinstance(data, (HoloMap, NdOverlay)):
ranges = {d.name: data.range(d) for d in data.dimensions()}
data = data.clone({k: GridMatrix(self._process(p, v, ranges))
for k, v in data.items()})
data = Collator(data, merge_type=type(data))()
if p.overlay_dims:
data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))
return data
elif isinstance(data, Element):
data = self._process(p, data)
return GridMatrix(data)
def _process(self, p, element, ranges={}):
# Creates a unified Dataset.data attribute
# to draw the data from
if isinstance(element.data, np.ndarray):
if 'dataframe' in Dataset.datatype:
el_data = element.table('dataframe')
else:
el_data = element.table('dictionary')
else:
el_data = element.data
# Get dimensions to plot against each other
types = (str, basestring, np.str_, np.object_)+datetime_types
dims = [d for d in element.dimensions()
if _is_number(element.range(d)[0]) and
not issubclass(element.get_dimension_type(d), types)]
permuted_dims = [(d1, d2) for d1 in dims
for d2 in dims[::-1]]
# Convert Histogram type to operation to avoid one case in the if below.
if p.diagonal_type is Histogram:
p.diagonal_type = None
p.diagonal_operation = histogram
data = {}
for d1, d2 in permuted_dims:
if d1 == d2:
if p.diagonal_type is not None:
if p.diagonal_type._auto_indexable_1d:
el = p.diagonal_type(el_data, kdims=[d1], vdims=[d2],
datatype=['dataframe', 'dictionary'])
else:
values = element.dimension_values(d1)
el = p.diagonal_type(values, kdims=[d1])
elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):
bin_range = ranges.get(d1.name, element.range(d1))
el = p.diagonal_operation(element, dimension=d1.name, bin_range=bin_range)
else:
el = p.diagonal_operation(element, dimension=d1.name)
else:
kdims, vdims = ([d1, d2], []) if len(p.chart_type.kdims) == 2 else (d1, d2)
el = p.chart_type(el_data, kdims=kdims, vdims=vdims,
datatype=['dataframe', 'dictionary'])
data[(d1.name, d2.name)] = el
return data
| 1 | 21,264 | What about tuples or pandas series? Do we want to support lots of different types or force a single type? | holoviz-holoviews | py |
@@ -294,10 +294,6 @@ public class ZkStateReader implements SolrCloseable {
log.debug("Loading collection config from: [{}]", path);
try {
- if (zkClient.exists(path, true) == false) {
- log.warn("No collection found at path {}.", path);
- throw new KeeperException.NoNodeException("No collection found at path: " + path);
- }
byte[] data = zkClient.getData(path, null, null, true);
if (data == null) {
log.warn("No config data found at path {}.", path); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.common.cloud;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
import java.util.function.UnaryOperator;
import java.util.stream.Collectors;
import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
import org.apache.solr.common.AlreadyClosedException;
import org.apache.solr.common.Callable;
import org.apache.solr.common.SolrCloseable;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.AutoScalingParams;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.ObjectReleaseTracker;
import org.apache.solr.common.util.Pair;
import org.apache.solr.common.util.SolrjNamedThreadFactory;
import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.Watcher.Event.EventType;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static java.util.Collections.EMPTY_MAP;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static java.util.Collections.emptySortedSet;
import static org.apache.solr.common.util.Utils.fromJSON;
public class ZkStateReader implements SolrCloseable {
public static final int STATE_UPDATE_DELAY = Integer.getInteger("solr.OverseerStateUpdateDelay", 2000); // delay between cloud state updates
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
public static final String BASE_URL_PROP = "base_url";
public static final String NODE_NAME_PROP = "node_name";
public static final String CORE_NODE_NAME_PROP = "core_node_name";
public static final String ROLES_PROP = "roles";
public static final String STATE_PROP = "state";
// if this flag equals to false and the replica does not exist in cluster state, set state op become no op (default is true)
public static final String FORCE_SET_STATE_PROP = "force_set_state";
/**
* SolrCore name.
*/
public static final String CORE_NAME_PROP = "core";
public static final String COLLECTION_PROP = "collection";
public static final String ELECTION_NODE_PROP = "election_node";
public static final String SHARD_ID_PROP = "shard";
public static final String REPLICA_PROP = "replica";
public static final String SHARD_RANGE_PROP = "shard_range";
public static final String SHARD_STATE_PROP = "shard_state";
public static final String SHARD_PARENT_PROP = "shard_parent";
public static final String NUM_SHARDS_PROP = "numShards";
public static final String LEADER_PROP = "leader";
public static final String SHARED_STORAGE_PROP = "shared_storage";
public static final String PROPERTY_PROP = "property";
public static final String PROPERTY_PROP_PREFIX = "property.";
public static final String PROPERTY_VALUE_PROP = "property.value";
public static final String MAX_AT_ONCE_PROP = "maxAtOnce";
public static final String MAX_WAIT_SECONDS_PROP = "maxWaitSeconds";
public static final String STATE_TIMESTAMP_PROP = "stateTimestamp";
public static final String COLLECTIONS_ZKNODE = "/collections";
public static final String LIVE_NODES_ZKNODE = "/live_nodes";
public static final String ALIASES = "/aliases.json";
public static final String CLUSTER_STATE = "/clusterstate.json";
public static final String CLUSTER_PROPS = "/clusterprops.json";
public static final String COLLECTION_PROPS_ZKNODE = "collectionprops.json";
public static final String REJOIN_AT_HEAD_PROP = "rejoinAtHead";
public static final String SOLR_SECURITY_CONF_PATH = "/security.json";
public static final String SOLR_AUTOSCALING_CONF_PATH = "/autoscaling.json";
public static final String SOLR_AUTOSCALING_EVENTS_PATH = "/autoscaling/events";
public static final String SOLR_AUTOSCALING_TRIGGER_STATE_PATH = "/autoscaling/triggerState";
public static final String SOLR_AUTOSCALING_NODE_ADDED_PATH = "/autoscaling/nodeAdded";
public static final String SOLR_AUTOSCALING_NODE_LOST_PATH = "/autoscaling/nodeLost";
public static final String SOLR_PKGS_PATH = "/packages.json";
public static final String DEFAULT_SHARD_PREFERENCES = "defaultShardPreferences";
public static final String REPLICATION_FACTOR = "replicationFactor";
public static final String MAX_SHARDS_PER_NODE = "maxShardsPerNode";
public static final String AUTO_ADD_REPLICAS = "autoAddReplicas";
public static final String MAX_CORES_PER_NODE = "maxCoresPerNode";
public static final String PULL_REPLICAS = "pullReplicas";
public static final String NRT_REPLICAS = "nrtReplicas";
public static final String TLOG_REPLICAS = "tlogReplicas";
public static final String READ_ONLY = "readOnly";
public static final String ROLES = "/roles.json";
public static final String CONFIGS_ZKNODE = "/configs";
public final static String CONFIGNAME_PROP = "configName";
public static final String LEGACY_CLOUD = "legacyCloud";
public static final String SAMPLE_PERCENTAGE = "samplePercentage";
/**
* @deprecated use {@link org.apache.solr.common.params.CollectionAdminParams#DEFAULTS} instead.
*/
@Deprecated
public static final String COLLECTION_DEF = "collectionDefaults";
public static final String URL_SCHEME = "urlScheme";
private static final String SOLR_ENVIRONMENT = "environment";
public static final String REPLICA_TYPE = "type";
/**
* A view of the current state of all collections; combines all the different state sources into a single view.
*/
protected volatile ClusterState clusterState;
private static final int GET_LEADER_RETRY_INTERVAL_MS = 50;
private static final int GET_LEADER_RETRY_DEFAULT_TIMEOUT = Integer.parseInt(System.getProperty("zkReaderGetLeaderRetryTimeoutMs", "4000"));
;
public static final String LEADER_ELECT_ZKNODE = "leader_elect";
public static final String SHARD_LEADERS_ZKNODE = "leaders";
public static final String ELECTION_NODE = "election";
/**
* Collections tracked in the legacy (shared) state format, reflects the contents of clusterstate.json.
*/
private Map<String, ClusterState.CollectionRef> legacyCollectionStates = emptyMap();
/**
* Last seen ZK version of clusterstate.json.
*/
private int legacyClusterStateVersion = 0;
/**
* Collections with format2 state.json, "interesting" and actively watched.
*/
private final ConcurrentHashMap<String, DocCollection> watchedCollectionStates = new ConcurrentHashMap<>();
/**
* Collections with format2 state.json, not "interesting" and not actively watched.
*/
private final ConcurrentHashMap<String, LazyCollectionRef> lazyCollectionStates = new ConcurrentHashMap<>();
/**
* Collection properties being actively watched
*/
private final ConcurrentHashMap<String, VersionedCollectionProps> watchedCollectionProps = new ConcurrentHashMap<>();
/**
* Collection properties being actively watched
*/
private final ConcurrentHashMap<String, PropsWatcher> collectionPropsWatchers = new ConcurrentHashMap<>();
private volatile SortedSet<String> liveNodes = emptySortedSet();
private volatile Map<String, Object> clusterProperties = Collections.emptyMap();
private final ZkConfigManager configManager;
private ConfigData securityData;
private final Runnable securityNodeListener;
private ConcurrentHashMap<String, CollectionWatch<DocCollectionWatcher>> collectionWatches = new ConcurrentHashMap<>();
// named this observers so there's less confusion between CollectionPropsWatcher map and the PropsWatcher map.
private ConcurrentHashMap<String, CollectionWatch<CollectionPropsWatcher>> collectionPropsObservers = new ConcurrentHashMap<>();
private Set<CloudCollectionsListener> cloudCollectionsListeners = ConcurrentHashMap.newKeySet();
private final ExecutorService notifications = ExecutorUtil.newMDCAwareCachedThreadPool("watches");
private Set<LiveNodesListener> liveNodesListeners = ConcurrentHashMap.newKeySet();
private Set<ClusterPropertiesListener> clusterPropertiesListeners = ConcurrentHashMap.newKeySet();
/**
* Used to submit notifications to Collection Properties watchers in order
**/
private final ExecutorService collectionPropsNotifications = ExecutorUtil.newMDCAwareSingleThreadExecutor(new SolrjNamedThreadFactory("collectionPropsNotifications"));
private static final long LAZY_CACHE_TIME = TimeUnit.NANOSECONDS.convert(STATE_UPDATE_DELAY, TimeUnit.MILLISECONDS);
private Future<?> collectionPropsCacheCleaner; // only kept to identify if the cleaner has already been started.
/**
* Get current {@link AutoScalingConfig}.
*
* @return current configuration from <code>autoscaling.json</code>. NOTE:
* this data is retrieved from ZK on each call.
*/
public AutoScalingConfig getAutoScalingConfig() throws KeeperException, InterruptedException {
return getAutoScalingConfig(null);
}
/**
* Get current {@link AutoScalingConfig}.
*
* @param watcher optional {@link Watcher} to set on a znode to watch for config changes.
* @return current configuration from <code>autoscaling.json</code>. NOTE:
* this data is retrieved from ZK on each call.
*/
public AutoScalingConfig getAutoScalingConfig(Watcher watcher) throws KeeperException, InterruptedException {
Stat stat = new Stat();
Map<String, Object> map = new HashMap<>();
try {
byte[] bytes = zkClient.getData(SOLR_AUTOSCALING_CONF_PATH, watcher, stat, true);
if (bytes != null && bytes.length > 0) {
map = (Map<String, Object>) fromJSON(bytes);
}
} catch (KeeperException.NoNodeException e) {
// ignore
}
map.put(AutoScalingParams.ZK_VERSION, stat.getVersion());
return new AutoScalingConfig(map);
}
private static class CollectionWatch<T> {
int coreRefCount = 0;
Set<T> stateWatchers = ConcurrentHashMap.newKeySet();
public boolean canBeRemoved() {
return coreRefCount + stateWatchers.size() == 0;
}
}
public static final Set<String> KNOWN_CLUSTER_PROPS = Set.of(
LEGACY_CLOUD,
URL_SCHEME,
AUTO_ADD_REPLICAS,
CoreAdminParams.BACKUP_LOCATION,
DEFAULT_SHARD_PREFERENCES,
MAX_CORES_PER_NODE,
SAMPLE_PERCENTAGE,
SOLR_ENVIRONMENT,
CollectionAdminParams.DEFAULTS);
/**
* Returns config set name for collection.
*
* @param collection to return config set name for
*/
public String readConfigName(String collection) throws KeeperException {
String configName = null;
String path = COLLECTIONS_ZKNODE + "/" + collection;
log.debug("Loading collection config from: [{}]", path);
try {
if (zkClient.exists(path, true) == false) {
log.warn("No collection found at path {}.", path);
throw new KeeperException.NoNodeException("No collection found at path: " + path);
}
byte[] data = zkClient.getData(path, null, null, true);
if (data == null) {
log.warn("No config data found at path {}.", path);
throw new KeeperException.NoNodeException("No config data found at path: " + path);
}
ZkNodeProps props = ZkNodeProps.load(data);
configName = props.getStr(CONFIGNAME_PROP);
if (configName == null) {
log.warn("No config data found at path{}. ", path);
throw new KeeperException.NoNodeException("No config data found at path: " + path);
}
String configPath = CONFIGS_ZKNODE + "/" + configName;
if (zkClient.exists(configPath, true) == false) {
log.error("Specified config=[{}] does not exist in ZooKeeper at location=[{}]", configName, configPath);
throw new KeeperException.NoNodeException("Specified config=[" + configName + "] does not exist in ZooKeeper at location=[" + configPath + "]");
} else {
log.debug("path=[{}] [{}]=[{}] specified config exists in ZooKeeper", configPath, CONFIGNAME_PROP, configName);
}
} catch (InterruptedException e) {
SolrZkClient.checkInterrupted(e);
log.warn("Thread interrupted when loading config name for collection {}", collection);
throw new SolrException(ErrorCode.SERVER_ERROR, "Thread interrupted when loading config name for collection " + collection, e);
}
return configName;
}
private final SolrZkClient zkClient;
private final boolean closeClient;
private volatile boolean closed = false;
private Set<CountDownLatch> waitLatches = ConcurrentHashMap.newKeySet();
public ZkStateReader(SolrZkClient zkClient) {
this(zkClient, null);
}
public ZkStateReader(SolrZkClient zkClient, Runnable securityNodeListener) {
this.zkClient = zkClient;
this.configManager = new ZkConfigManager(zkClient);
this.closeClient = false;
this.securityNodeListener = securityNodeListener;
assert ObjectReleaseTracker.track(this);
}
public ZkStateReader(String zkServerAddress, int zkClientTimeout, int zkClientConnectTimeout) {
this.zkClient = new SolrZkClient(zkServerAddress, zkClientTimeout, zkClientConnectTimeout,
// on reconnect, reload cloud info
new OnReconnect() {
@Override
public void command() {
try {
ZkStateReader.this.createClusterStateWatchersAndUpdate();
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("Interrupted", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted", e);
}
}
});
this.configManager = new ZkConfigManager(zkClient);
this.closeClient = true;
this.securityNodeListener = null;
assert ObjectReleaseTracker.track(this);
}
public ZkConfigManager getConfigManager() {
return configManager;
}
/**
* Forcibly refresh cluster state from ZK. Do this only to avoid race conditions because it's expensive.
* <p>
* It is cheaper to call {@link #forceUpdateCollection(String)} on a single collection if you must.
*
* @lucene.internal
*/
public void forciblyRefreshAllClusterStateSlow() throws KeeperException, InterruptedException {
synchronized (getUpdateLock()) {
if (clusterState == null) {
// Never initialized, just run normal initialization.
createClusterStateWatchersAndUpdate();
return;
}
// No need to set watchers because we should already have watchers registered for everything.
refreshCollectionList(null);
refreshLiveNodes(null);
refreshLegacyClusterState(null);
// Need a copy so we don't delete from what we're iterating over.
Collection<String> safeCopy = new ArrayList<>(watchedCollectionStates.keySet());
Set<String> updatedCollections = new HashSet<>();
for (String coll : safeCopy) {
DocCollection newState = fetchCollectionState(coll, null);
if (updateWatchedCollection(coll, newState)) {
updatedCollections.add(coll);
}
}
constructState(updatedCollections);
}
}
/**
* Forcibly refresh a collection's internal state from ZK. Try to avoid having to resort to this when
* a better design is possible.
*/
//TODO shouldn't we call ZooKeeper.sync() at the right places to prevent reading a stale value? We do so for aliases.
public void forceUpdateCollection(String collection) throws KeeperException, InterruptedException {
synchronized (getUpdateLock()) {
if (clusterState == null) {
log.warn("ClusterState watchers have not been initialized");
return;
}
ClusterState.CollectionRef ref = clusterState.getCollectionRef(collection);
if (ref == null || legacyCollectionStates.containsKey(collection)) {
// We either don't know anything about this collection (maybe it's new?) or it's legacy.
// First update the legacy cluster state.
log.debug("Checking legacy cluster state for collection {}", collection);
refreshLegacyClusterState(null);
if (!legacyCollectionStates.containsKey(collection)) {
// No dice, see if a new collection just got created.
LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection);
if (tryLazyCollection.get() != null) {
// What do you know, it exists!
log.debug("Adding lazily-loaded reference for collection {}", collection);
lazyCollectionStates.putIfAbsent(collection, tryLazyCollection);
constructState(Collections.singleton(collection));
}
}
} else if (ref.isLazilyLoaded()) {
log.debug("Refreshing lazily-loaded state for collection {}", collection);
if (ref.get() != null) {
return;
}
// Edge case: if there's no external collection, try refreshing legacy cluster state in case it's there.
refreshLegacyClusterState(null);
} else if (watchedCollectionStates.containsKey(collection)) {
// Exists as a watched collection, force a refresh.
log.debug("Forcing refresh of watched collection state for {}", collection);
DocCollection newState = fetchCollectionState(collection, null);
if (updateWatchedCollection(collection, newState)) {
constructState(Collections.singleton(collection));
}
} else {
log.error("Collection {} is not lazy or watched!", collection);
}
}
}
/**
* Refresh the set of live nodes.
*/
public void updateLiveNodes() throws KeeperException, InterruptedException {
refreshLiveNodes(null);
}
public Integer compareStateVersions(String coll, int version) {
DocCollection collection = clusterState.getCollectionOrNull(coll);
if (collection == null) return null;
if (collection.getZNodeVersion() < version) {
log.debug("Server older than client {}<{}", collection.getZNodeVersion(), version);
DocCollection nu = getCollectionLive(this, coll);
if (nu == null) return -1;
if (nu.getZNodeVersion() > collection.getZNodeVersion()) {
if (updateWatchedCollection(coll, nu)) {
synchronized (getUpdateLock()) {
constructState(Collections.singleton(coll));
}
}
collection = nu;
}
}
if (collection.getZNodeVersion() == version) {
return null;
}
log.debug("Wrong version from client [{}]!=[{}]", version, collection.getZNodeVersion());
return collection.getZNodeVersion();
}
public synchronized void createClusterStateWatchersAndUpdate() throws KeeperException,
InterruptedException {
// We need to fetch the current cluster state and the set of live nodes
log.debug("Updating cluster state from ZooKeeper... ");
// Sanity check ZK structure.
if (!zkClient.exists(CLUSTER_STATE, true)) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready");
}
// on reconnect of SolrZkClient force refresh and re-add watches.
loadClusterProperties();
refreshLiveNodes(new LiveNodeWatcher());
refreshLegacyClusterState(new LegacyClusterStateWatcher());
refreshStateFormat2Collections();
refreshCollectionList(new CollectionsChildWatcher());
refreshAliases(aliasesManager);
if (securityNodeListener != null) {
addSecurityNodeWatcher(pair -> {
ConfigData cd = new ConfigData();
cd.data = pair.first() == null || pair.first().length == 0 ? EMPTY_MAP : Utils.getDeepCopy((Map) fromJSON(pair.first()), 4, false);
cd.version = pair.second() == null ? -1 : pair.second().getVersion();
securityData = cd;
securityNodeListener.run();
});
securityData = getSecurityProps(true);
}
collectionPropsObservers.forEach((k, v) -> {
collectionPropsWatchers.computeIfAbsent(k, PropsWatcher::new).refreshAndWatch(true);
});
}
private void addSecurityNodeWatcher(final Callable<Pair<byte[], Stat>> callback)
throws KeeperException, InterruptedException {
zkClient.exists(SOLR_SECURITY_CONF_PATH,
new Watcher() {
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
try {
synchronized (ZkStateReader.this.getUpdateLock()) {
log.debug("Updating [{}] ... ", SOLR_SECURITY_CONF_PATH);
// remake watch
final Watcher thisWatch = this;
final Stat stat = new Stat();
final byte[] data = getZkClient().getData(SOLR_SECURITY_CONF_PATH, thisWatch, stat, true);
try {
callback.call(new Pair<>(data, stat));
} catch (Exception e) {
log.error("Error running collections node listener", e);
}
}
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}, true);
}
/**
* Construct the total state view from all sources.
* Must hold {@link #getUpdateLock()} before calling this.
*
* @param changedCollections collections that have changed since the last call,
* and that should fire notifications
*/
private void constructState(Set<String> changedCollections) {
Set<String> liveNodes = this.liveNodes; // volatile read
// Legacy clusterstate is authoritative, for backwards compatibility.
// To move a collection's state to format2, first create the new state2 format node, then remove legacy entry.
Map<String, ClusterState.CollectionRef> result = new LinkedHashMap<>(legacyCollectionStates);
// Add state format2 collections, but don't override legacy collection states.
for (Map.Entry<String, DocCollection> entry : watchedCollectionStates.entrySet()) {
result.putIfAbsent(entry.getKey(), new ClusterState.CollectionRef(entry.getValue()));
}
// Finally, add any lazy collections that aren't already accounted for.
for (Map.Entry<String, LazyCollectionRef> entry : lazyCollectionStates.entrySet()) {
result.putIfAbsent(entry.getKey(), entry.getValue());
}
this.clusterState = new ClusterState(liveNodes, result, legacyClusterStateVersion);
log.debug("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
legacyCollectionStates.keySet().size(),
collectionWatches.keySet().size(),
watchedCollectionStates.keySet().size(),
lazyCollectionStates.keySet().size(),
clusterState.getCollectionStates().size());
if (log.isTraceEnabled()) {
log.trace("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
legacyCollectionStates.keySet(),
collectionWatches.keySet(),
watchedCollectionStates.keySet(),
lazyCollectionStates.keySet(),
clusterState.getCollectionStates());
}
notifyCloudCollectionsListeners();
for (String collection : changedCollections) {
notifyStateWatchers(collection, clusterState.getCollectionOrNull(collection));
}
}
/**
* Refresh legacy (shared) clusterstate.json
*/
private void refreshLegacyClusterState(Watcher watcher) throws KeeperException, InterruptedException {
try {
final Stat stat = new Stat();
final byte[] data = zkClient.getData(CLUSTER_STATE, watcher, stat, true);
final ClusterState loadedData = ClusterState.load(stat.getVersion(), data, emptySet(), CLUSTER_STATE);
synchronized (getUpdateLock()) {
if (this.legacyClusterStateVersion >= stat.getVersion()) {
// Nothing to do, someone else updated same or newer.
return;
}
Set<String> updatedCollections = new HashSet<>();
for (String coll : this.collectionWatches.keySet()) {
ClusterState.CollectionRef ref = this.legacyCollectionStates.get(coll);
// legacy collections are always in-memory
DocCollection oldState = ref == null ? null : ref.get();
ClusterState.CollectionRef newRef = loadedData.getCollectionStates().get(coll);
DocCollection newState = newRef == null ? null : newRef.get();
if (newState == null) {
// check that we haven't just migrated
newState = watchedCollectionStates.get(coll);
}
if (!Objects.equals(oldState, newState)) {
updatedCollections.add(coll);
}
}
this.legacyCollectionStates = loadedData.getCollectionStates();
this.legacyClusterStateVersion = stat.getVersion();
constructState(updatedCollections);
}
} catch (KeeperException.NoNodeException e) {
// Ignore missing legacy clusterstate.json.
synchronized (getUpdateLock()) {
this.legacyCollectionStates = emptyMap();
this.legacyClusterStateVersion = 0;
constructState(Collections.emptySet());
}
}
}
/**
* Refresh state format2 collections.
*/
private void refreshStateFormat2Collections() {
for (String coll : collectionWatches.keySet()) {
new StateWatcher(coll).refreshAndWatch();
}
}
// We don't get a Stat or track versions on getChildren() calls, so force linearization.
private final Object refreshCollectionListLock = new Object();
/**
* Search for any lazy-loadable state format2 collections.
* <p>
* A stateFormat=1 collection which is not interesting to us can also
* be put into the {@link #lazyCollectionStates} map here. But that is okay
* because {@link #constructState(Set)} will give priority to collections in the
* shared collection state over this map.
* In fact this is a clever way to avoid doing a ZK exists check on
* the /collections/collection_name/state.json znode
* Such an exists check is done in {@link ClusterState#hasCollection(String)} and
* {@link ClusterState#getCollectionsMap()} methods
* have a safeguard against exposing wrong collection names to the users
*/
private void refreshCollectionList(Watcher watcher) throws KeeperException, InterruptedException {
synchronized (refreshCollectionListLock) {
List<String> children = null;
try {
children = zkClient.getChildren(COLLECTIONS_ZKNODE, watcher, true);
} catch (KeeperException.NoNodeException e) {
log.warn("Error fetching collection names: [{}]", e.getMessage());
// fall through
}
if (children == null || children.isEmpty()) {
lazyCollectionStates.clear();
return;
}
// Don't lock getUpdateLock() here, we don't need it and it would cause deadlock.
// Don't mess with watchedCollections, they should self-manage.
// First, drop any children that disappeared.
this.lazyCollectionStates.keySet().retainAll(children);
for (String coll : children) {
// We will create an eager collection for any interesting collections, so don't add to lazy.
if (!collectionWatches.containsKey(coll)) {
// Double check contains just to avoid allocating an object.
LazyCollectionRef existing = lazyCollectionStates.get(coll);
if (existing == null) {
lazyCollectionStates.putIfAbsent(coll, new LazyCollectionRef(coll));
}
}
}
}
}
// We don't get a Stat or track versions on getChildren() calls, so force linearization.
private final Object refreshCollectionsSetLock = new Object();
// Ensures that only the latest getChildren fetch gets applied.
private final AtomicReference<Set<String>> lastFetchedCollectionSet = new AtomicReference<>();
/**
* Register a CloudCollectionsListener to be called when the set of collections within a cloud changes.
*/
public void registerCloudCollectionsListener(CloudCollectionsListener cloudCollectionsListener) {
cloudCollectionsListeners.add(cloudCollectionsListener);
notifyNewCloudCollectionsListener(cloudCollectionsListener);
}
/**
* Remove a registered CloudCollectionsListener.
*/
public void removeCloudCollectionsListener(CloudCollectionsListener cloudCollectionsListener) {
cloudCollectionsListeners.remove(cloudCollectionsListener);
}
private void notifyNewCloudCollectionsListener(CloudCollectionsListener listener) {
listener.onChange(Collections.emptySet(), lastFetchedCollectionSet.get());
}
private void notifyCloudCollectionsListeners() {
notifyCloudCollectionsListeners(false);
}
private void notifyCloudCollectionsListeners(boolean notifyIfSame) {
synchronized (refreshCollectionsSetLock) {
final Set<String> newCollections = getCurrentCollections();
final Set<String> oldCollections = lastFetchedCollectionSet.getAndSet(newCollections);
if (!newCollections.equals(oldCollections) || notifyIfSame) {
cloudCollectionsListeners.forEach(listener -> listener.onChange(oldCollections, newCollections));
}
}
}
private Set<String> getCurrentCollections() {
Set<String> collections = new HashSet<>();
collections.addAll(legacyCollectionStates.keySet());
collections.addAll(watchedCollectionStates.keySet());
collections.addAll(lazyCollectionStates.keySet());
return collections;
}
private class LazyCollectionRef extends ClusterState.CollectionRef {
private final String collName;
private long lastUpdateTime;
private DocCollection cachedDocCollection;
public LazyCollectionRef(String collName) {
super(null);
this.collName = collName;
this.lastUpdateTime = -1;
}
@Override
public synchronized DocCollection get(boolean allowCached) {
gets.incrementAndGet();
if (!allowCached || lastUpdateTime < 0 || System.nanoTime() - lastUpdateTime > LAZY_CACHE_TIME) {
boolean shouldFetch = true;
if (cachedDocCollection != null) {
Stat exists = null;
try {
exists = zkClient.exists(getCollectionPath(collName), null, true);
} catch (Exception e) {
}
if (exists != null && exists.getVersion() == cachedDocCollection.getZNodeVersion()) {
shouldFetch = false;
}
}
if (shouldFetch) {
cachedDocCollection = getCollectionLive(ZkStateReader.this, collName);
lastUpdateTime = System.nanoTime();
}
}
return cachedDocCollection;
}
@Override
public boolean isLazilyLoaded() {
return true;
}
@Override
public String toString() {
return "LazyCollectionRef(" + collName + ")";
}
}
// We don't get a Stat or track versions on getChildren() calls, so force linearization.
private final Object refreshLiveNodesLock = new Object();
// Ensures that only the latest getChildren fetch gets applied.
private final AtomicReference<SortedSet<String>> lastFetchedLiveNodes = new AtomicReference<>();
/**
* Refresh live_nodes.
*/
private void refreshLiveNodes(Watcher watcher) throws KeeperException, InterruptedException {
synchronized (refreshLiveNodesLock) {
SortedSet<String> newLiveNodes;
try {
List<String> nodeList = zkClient.getChildren(LIVE_NODES_ZKNODE, watcher, true);
newLiveNodes = new TreeSet<>(nodeList);
} catch (KeeperException.NoNodeException e) {
newLiveNodes = emptySortedSet();
}
lastFetchedLiveNodes.set(newLiveNodes);
}
// Can't lock getUpdateLock() until we release the other, it would cause deadlock.
SortedSet<String> oldLiveNodes, newLiveNodes;
synchronized (getUpdateLock()) {
newLiveNodes = lastFetchedLiveNodes.getAndSet(null);
if (newLiveNodes == null) {
// Someone else won the race to apply the last update, just exit.
return;
}
oldLiveNodes = this.liveNodes;
this.liveNodes = newLiveNodes;
if (clusterState != null) {
clusterState.setLiveNodes(newLiveNodes);
}
}
if (oldLiveNodes.size() != newLiveNodes.size()) {
log.info("Updated live nodes from ZooKeeper... ({}) -> ({})", oldLiveNodes.size(), newLiveNodes.size());
}
if (log.isDebugEnabled()) {
log.debug("Updated live nodes from ZooKeeper... {} -> {}", oldLiveNodes, newLiveNodes);
}
if (!oldLiveNodes.equals(newLiveNodes)) { // fire listeners
liveNodesListeners.forEach(listener -> {
if (listener.onChange(new TreeSet<>(oldLiveNodes), new TreeSet<>(newLiveNodes))) {
removeLiveNodesListener(listener);
}
});
}
}
public void registerClusterPropertiesListener(ClusterPropertiesListener listener) {
// fire it once with current properties
if (listener.onChange(getClusterProperties())) {
removeClusterPropertiesListener(listener);
} else {
clusterPropertiesListeners.add(listener);
}
}
public void removeClusterPropertiesListener(ClusterPropertiesListener listener) {
clusterPropertiesListeners.remove(listener);
}
public void registerLiveNodesListener(LiveNodesListener listener) {
// fire it once with current live nodes
if (listener.onChange(new TreeSet<>(getClusterState().getLiveNodes()), new TreeSet<>(getClusterState().getLiveNodes()))) {
removeLiveNodesListener(listener);
}
liveNodesListeners.add(listener);
}
public void removeLiveNodesListener(LiveNodesListener listener) {
liveNodesListeners.remove(listener);
}
/**
* @return information about the cluster from ZooKeeper
*/
public ClusterState getClusterState() {
return clusterState;
}
public Object getUpdateLock() {
return this;
}
public void close() {
this.closed = true;
notifications.shutdownNow();
waitLatches.parallelStream().forEach(c -> {
c.countDown();
});
ExecutorUtil.shutdownAndAwaitTermination(notifications);
ExecutorUtil.shutdownAndAwaitTermination(collectionPropsNotifications);
if (closeClient) {
zkClient.close();
}
assert ObjectReleaseTracker.release(this);
}
@Override
public boolean isClosed() {
return closed;
}
public String getLeaderUrl(String collection, String shard, int timeout) throws InterruptedException {
ZkCoreNodeProps props = new ZkCoreNodeProps(getLeaderRetry(collection, shard, timeout));
return props.getCoreUrl();
}
public Replica getLeader(Set<String> liveNodes, DocCollection docCollection, String shard) {
Replica replica = docCollection != null ? docCollection.getLeader(shard) : null;
if (replica != null && liveNodes.contains(replica.getNodeName())) {
return replica;
}
return null;
}
public Replica getLeader(String collection, String shard) {
if (clusterState != null) {
DocCollection docCollection = clusterState.getCollectionOrNull(collection);
Replica replica = docCollection != null ? docCollection.getLeader(shard) : null;
if (replica != null && getClusterState().liveNodesContain(replica.getNodeName())) {
return replica;
}
}
return null;
}
public boolean isNodeLive(String node) {
return liveNodes.contains(node);
}
/**
* Get shard leader properties, with retry if none exist.
*/
public Replica getLeaderRetry(String collection, String shard) throws InterruptedException {
return getLeaderRetry(collection, shard, GET_LEADER_RETRY_DEFAULT_TIMEOUT);
}
/**
* Get shard leader properties, with retry if none exist.
*/
public Replica getLeaderRetry(String collection, String shard, int timeout) throws InterruptedException {
AtomicReference<Replica> leader = new AtomicReference<>();
try {
waitForState(collection, timeout, TimeUnit.MILLISECONDS, (n, c) -> {
if (c == null)
return false;
Replica l = getLeader(n, c, shard);
if (l != null) {
leader.set(l);
return true;
}
return false;
});
} catch (TimeoutException e) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "No registered leader was found after waiting for "
+ timeout + "ms " + ", collection: " + collection + " slice: " + shard + " saw state=" + clusterState.getCollectionOrNull(collection)
+ " with live_nodes=" + clusterState.getLiveNodes());
}
return leader.get();
}
/**
* Get path where shard leader properties live in zookeeper.
*/
public static String getShardLeadersPath(String collection, String shardId) {
return COLLECTIONS_ZKNODE + "/" + collection + "/"
+ SHARD_LEADERS_ZKNODE + (shardId != null ? ("/" + shardId)
: "") + "/leader";
}
/**
* Get path where shard leader elections ephemeral nodes are.
*/
public static String getShardLeadersElectPath(String collection, String shardId) {
return COLLECTIONS_ZKNODE + "/" + collection + "/"
+ LEADER_ELECT_ZKNODE + (shardId != null ? ("/" + shardId + "/" + ELECTION_NODE)
: "");
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName) {
return getReplicaProps(collection, shardId, thisCoreNodeName, null);
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName,
Replica.State mustMatchStateFilter) {
return getReplicaProps(collection, shardId, thisCoreNodeName, mustMatchStateFilter, null);
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName,
Replica.State mustMatchStateFilter, Replica.State mustNotMatchStateFilter) {
//TODO: We don't need all these getReplicaProps method overloading. Also, it's odd that the default is to return replicas of type TLOG and NRT only
return getReplicaProps(collection, shardId, thisCoreNodeName, mustMatchStateFilter, null, EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT));
}
public List<ZkCoreNodeProps> getReplicaProps(String collection, String shardId, String thisCoreNodeName,
Replica.State mustMatchStateFilter, Replica.State mustNotMatchStateFilter, final EnumSet<Replica.Type> acceptReplicaType) {
assert thisCoreNodeName != null;
ClusterState clusterState = this.clusterState;
if (clusterState == null) {
return null;
}
final DocCollection docCollection = clusterState.getCollectionOrNull(collection);
if (docCollection == null || docCollection.getSlicesMap() == null) {
throw new ZooKeeperException(ErrorCode.BAD_REQUEST,
"Could not find collection in zk: " + collection);
}
Map<String, Slice> slices = docCollection.getSlicesMap();
Slice replicas = slices.get(shardId);
if (replicas == null) {
throw new ZooKeeperException(ErrorCode.BAD_REQUEST, "Could not find shardId in zk: " + shardId);
}
Map<String, Replica> shardMap = replicas.getReplicasMap();
List<ZkCoreNodeProps> nodes = new ArrayList<>(shardMap.size());
for (Entry<String, Replica> entry : shardMap.entrySet().stream().filter((e) -> acceptReplicaType.contains(e.getValue().getType())).collect(Collectors.toList())) {
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(entry.getValue());
String coreNodeName = entry.getValue().getName();
if (clusterState.liveNodesContain(nodeProps.getNodeName()) && !coreNodeName.equals(thisCoreNodeName)) {
if (mustMatchStateFilter == null || mustMatchStateFilter == Replica.State.getState(nodeProps.getState())) {
if (mustNotMatchStateFilter == null || mustNotMatchStateFilter != Replica.State.getState(nodeProps.getState())) {
nodes.add(nodeProps);
}
}
}
}
if (nodes.size() == 0) {
// no replicas
return null;
}
return nodes;
}
public SolrZkClient getZkClient() {
return zkClient;
}
/**
* Get a cluster property
* <p>
* N.B. Cluster properties are updated via ZK watchers, and so may not necessarily
* be completely up-to-date. If you need to get the latest version, then use a
* {@link ClusterProperties} instance.
*
* @param key the property to read
* @param defaultValue a default value to use if no such property exists
* @param <T> the type of the property
* @return the cluster property, or a default if the property is not set
*/
@SuppressWarnings("unchecked")
public <T> T getClusterProperty(String key, T defaultValue) {
T value = (T) Utils.getObjectByPath(clusterProperties, false, key);
if (value == null)
return defaultValue;
return value;
}
/**
* Same as the above but allows a full json path as a list of parts
*
* @param keyPath path to the property example ["collectionDefauls", "numShards"]
* @param defaultValue a default value to use if no such property exists
* @return the cluster property, or a default if the property is not set
*/
public <T> T getClusterProperty(List<String> keyPath, T defaultValue) {
T value = (T) Utils.getObjectByPath(clusterProperties, false, keyPath);
if (value == null)
return defaultValue;
return value;
}
/**
* Get all cluster properties for this cluster
* <p>
* N.B. Cluster properties are updated via ZK watchers, and so may not necessarily
* be completely up-to-date. If you need to get the latest version, then use a
* {@link ClusterProperties} instance.
*
* @return a Map of cluster properties
*/
public Map<String, Object> getClusterProperties() {
return Collections.unmodifiableMap(clusterProperties);
}
private final Watcher clusterPropertiesWatcher = event -> {
// session events are not change events, and do not remove the watcher
if (Watcher.Event.EventType.None.equals(event.getType())) {
return;
}
loadClusterProperties();
};
@SuppressWarnings("unchecked")
private void loadClusterProperties() {
try {
while (true) {
try {
byte[] data = zkClient.getData(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, new Stat(), true);
this.clusterProperties = ClusterProperties.convertCollectionDefaultsToNestedFormat((Map<String, Object>) Utils.fromJSON(data));
log.debug("Loaded cluster properties: {}", this.clusterProperties);
for (ClusterPropertiesListener listener : clusterPropertiesListeners) {
listener.onChange(getClusterProperties());
}
return;
} catch (KeeperException.NoNodeException e) {
this.clusterProperties = Collections.emptyMap();
log.debug("Loaded empty cluster properties");
// set an exists watch, and if the node has been created since the last call,
// read the data again
if (zkClient.exists(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, true) == null)
return;
}
}
} catch (KeeperException | InterruptedException e) {
log.error("Error reading cluster properties from zookeeper", SolrZkClient.checkInterrupted(e));
}
}
/**
* Get collection properties for a given collection. If the collection is watched, simply return it from the cache,
* otherwise fetch it directly from zookeeper. This is a convenience for {@code getCollectionProperties(collection,0)}
*
* @param collection the collection for which properties are desired
* @return a map representing the key/value properties for the collection.
*/
public Map<String, String> getCollectionProperties(final String collection) {
return getCollectionProperties(collection, 0);
}
/**
* Get and cache collection properties for a given collection. If the collection is watched, or still cached
* simply return it from the cache, otherwise fetch it directly from zookeeper and retain the value for at
* least cacheForMillis milliseconds. Cached properties are watched in zookeeper and updated automatically.
* This version of {@code getCollectionProperties} should be used when properties need to be consulted
* frequently in the absence of an active {@link CollectionPropsWatcher}.
*
* @param collection The collection for which properties are desired
* @param cacheForMillis The minimum number of milliseconds to maintain a cache for the specified collection's
* properties. Setting a {@code CollectionPropsWatcher} will override this value and retain
* the cache for the life of the watcher. A lack of changes in zookeeper may allow the
* caching to remain for a greater duration up to the cycle time of {@link CacheCleaner}.
* Passing zero for this value will explicitly remove the cached copy if and only if it is
* due to expire and no watch exists. Any positive value will extend the expiration time
* if required.
* @return a map representing the key/value properties for the collection.
*/
public Map<String, String> getCollectionProperties(final String collection, long cacheForMillis) {
synchronized (watchedCollectionProps) { // making decisions based on the result of a get...
Watcher watcher = null;
if (cacheForMillis > 0) {
watcher = collectionPropsWatchers.compute(collection,
(c, w) -> w == null ? new PropsWatcher(c, cacheForMillis) : w.renew(cacheForMillis));
}
VersionedCollectionProps vprops = watchedCollectionProps.get(collection);
boolean haveUnexpiredProps = vprops != null && vprops.cacheUntilNs > System.nanoTime();
long untilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(cacheForMillis, TimeUnit.MILLISECONDS);
Map<String, String> properties;
if (haveUnexpiredProps) {
properties = vprops.props;
vprops.cacheUntilNs = Math.max(vprops.cacheUntilNs, untilNs);
} else {
try {
VersionedCollectionProps vcp = fetchCollectionProperties(collection, watcher);
properties = vcp.props;
if (cacheForMillis > 0) {
vcp.cacheUntilNs = untilNs;
watchedCollectionProps.put(collection, vcp);
} else {
// we're synchronized on watchedCollectionProps and we can only get here if we have found an expired
// vprops above, so it is safe to remove the cached value and let the GC free up some mem a bit sooner.
if (!collectionPropsObservers.containsKey(collection)) {
watchedCollectionProps.remove(collection);
}
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading collection properties", SolrZkClient.checkInterrupted(e));
}
}
return properties;
}
}
private class VersionedCollectionProps {
int zkVersion;
Map<String, String> props;
long cacheUntilNs = 0;
VersionedCollectionProps(int zkVersion, Map<String, String> props) {
this.zkVersion = zkVersion;
this.props = props;
}
}
static String getCollectionPropsPath(final String collection) {
return COLLECTIONS_ZKNODE + '/' + collection + '/' + COLLECTION_PROPS_ZKNODE;
}
@SuppressWarnings("unchecked")
private VersionedCollectionProps fetchCollectionProperties(String collection, Watcher watcher) throws KeeperException, InterruptedException {
final String znodePath = getCollectionPropsPath(collection);
// lazy init cache cleaner once we know someone is using collection properties.
if (collectionPropsCacheCleaner == null) {
synchronized (this) { // There can be only one! :)
if (collectionPropsCacheCleaner == null) {
collectionPropsCacheCleaner = notifications.submit(new CacheCleaner());
}
}
}
while (true) {
try {
Stat stat = new Stat();
byte[] data = zkClient.getData(znodePath, watcher, stat, true);
return new VersionedCollectionProps(stat.getVersion(), (Map<String, String>) Utils.fromJSON(data));
} catch (ClassCastException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Unable to parse collection properties for collection " + collection, e);
} catch (KeeperException.NoNodeException e) {
if (watcher != null) {
// Leave an exists watch in place in case a collectionprops.json is created later.
Stat exists = zkClient.exists(znodePath, watcher, true);
if (exists != null) {
// Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
// Loop and try again.
continue;
}
}
return new VersionedCollectionProps(-1, EMPTY_MAP);
}
}
}
/**
* Returns the content of /security.json from ZooKeeper as a Map
* If the files doesn't exist, it returns null.
*/
public ConfigData getSecurityProps(boolean getFresh) {
if (!getFresh) {
if (securityData == null) return new ConfigData(EMPTY_MAP, -1);
return new ConfigData(securityData.data, securityData.version);
}
try {
Stat stat = new Stat();
if (getZkClient().exists(SOLR_SECURITY_CONF_PATH, true)) {
final byte[] data = getZkClient().getData(ZkStateReader.SOLR_SECURITY_CONF_PATH, null, stat, true);
return data != null && data.length > 0 ?
new ConfigData((Map<String, Object>) Utils.fromJSON(data), stat.getVersion()) :
null;
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading security properties", e);
} catch (KeeperException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Error reading security properties", e);
}
return null;
}
/**
* Returns the baseURL corresponding to a given node's nodeName --
* NOTE: does not (currently) imply that the nodeName (or resulting
* baseURL) exists in the cluster.
*
* @lucene.experimental
*/
public String getBaseUrlForNodeName(final String nodeName) {
return Utils.getBaseUrlForNodeName(nodeName, getClusterProperty(URL_SCHEME, "http"));
}
/**
* Watches a single collection's format2 state.json.
*/
class StateWatcher implements Watcher {
private final String coll;
StateWatcher(String coll) {
this.coll = coll;
}
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
if (!collectionWatches.containsKey(coll)) {
// This collection is no longer interesting, stop watching.
log.debug("Uninteresting collection {}", coll);
return;
}
Set<String> liveNodes = ZkStateReader.this.liveNodes;
log.info("A cluster state change: [{}] for collection [{}] has occurred - updating... (live nodes size: [{}])",
event, coll, liveNodes.size());
refreshAndWatch();
}
/**
* Refresh collection state from ZK and leave a watch for future changes.
* As a side effect, updates {@link #clusterState} and {@link #watchedCollectionStates}
* with the results of the refresh.
*/
public void refreshAndWatch() {
try {
DocCollection newState = fetchCollectionState(coll, this);
updateWatchedCollection(coll, newState);
synchronized (getUpdateLock()) {
constructState(Collections.singleton(coll));
}
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("Unwatched collection: [{}]", coll, e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("Unwatched collection: [{}]", coll, e);
}
}
}
/**
* Watches the legacy clusterstate.json.
*/
class LegacyClusterStateWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
int liveNodesSize = ZkStateReader.this.clusterState == null ? 0 : ZkStateReader.this.clusterState.getLiveNodes().size();
log.debug("A cluster state change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodesSize);
refreshAndWatch();
}
/**
* Must hold {@link #getUpdateLock()} before calling this method.
*/
public void refreshAndWatch() {
try {
refreshLegacyClusterState(this);
} catch (KeeperException.NoNodeException e) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready");
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}
/**
* Watches collection properties
*/
class PropsWatcher implements Watcher {
private final String coll;
private long watchUntilNs;
PropsWatcher(String coll) {
this.coll = coll;
watchUntilNs = 0;
}
PropsWatcher(String coll, long forMillis) {
this.coll = coll;
watchUntilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(forMillis, TimeUnit.MILLISECONDS);
}
public PropsWatcher renew(long forMillis) {
watchUntilNs = System.nanoTime() + TimeUnit.NANOSECONDS.convert(forMillis, TimeUnit.MILLISECONDS);
return this;
}
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
boolean expired = System.nanoTime() > watchUntilNs;
if (!collectionPropsObservers.containsKey(coll) && expired) {
// No one can be notified of the change, we can ignore it and "unset" the watch
log.debug("Ignoring property change for collection {}", coll);
return;
}
log.info("A collection property change: [{}] for collection [{}] has occurred - updating...",
event, coll);
refreshAndWatch(true);
}
/**
* Refresh collection properties from ZK and leave a watch for future changes. Updates the properties in
* watchedCollectionProps with the results of the refresh. Optionally notifies watchers
*/
void refreshAndWatch(boolean notifyWatchers) {
try {
synchronized (watchedCollectionProps) { // making decisions based on the result of a get...
VersionedCollectionProps vcp = fetchCollectionProperties(coll, this);
Map<String, String> properties = vcp.props;
VersionedCollectionProps existingVcp = watchedCollectionProps.get(coll);
if (existingVcp == null || // never called before, record what we found
vcp.zkVersion > existingVcp.zkVersion || // newer info we should update
vcp.zkVersion == -1) { // node was deleted start over
watchedCollectionProps.put(coll, vcp);
if (notifyWatchers) {
notifyPropsWatchers(coll, properties);
}
if (vcp.zkVersion == -1 && existingVcp != null) { // Collection DELETE detected
// We should not be caching a collection that has been deleted.
watchedCollectionProps.remove(coll);
// core ref counting not relevant here, don't need canRemove(), we just sent
// a notification of an empty set of properties, no reason to watch what doesn't exist.
collectionPropsObservers.remove(coll);
// This is the one time we know it's safe to throw this out. We just failed to set the watch
// due to an NoNodeException, so it isn't held by ZK and can't re-set itself due to an update.
collectionPropsWatchers.remove(coll);
}
}
}
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("Lost collection property watcher for {} due to ZK error", coll, e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
log.error("Lost collection property watcher for {} due to the thread being interrupted", coll, e);
}
}
}
/**
* Watches /collections children .
*/
class CollectionsChildWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
if (ZkStateReader.this.closed) {
return;
}
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
log.debug("A collections change: [{}], has occurred - updating...", event);
refreshAndWatch();
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
}
}
/**
* Must hold {@link #getUpdateLock()} before calling this method.
*/
public void refreshAndWatch() {
try {
refreshCollectionList(this);
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}
/**
* Watches the live_nodes and syncs changes.
*/
class LiveNodeWatcher implements Watcher {
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
log.debug("A live node change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodes.size());
refreshAndWatch();
}
public void refreshAndWatch() {
try {
refreshLiveNodes(this);
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
}
public static DocCollection getCollectionLive(ZkStateReader zkStateReader, String coll) {
try {
return zkStateReader.fetchCollectionState(coll, null);
} catch (KeeperException e) {
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK: " + coll, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(ErrorCode.BAD_REQUEST, "Could not load collection from ZK: " + coll, e);
}
}
private DocCollection fetchCollectionState(String coll, Watcher watcher) throws KeeperException, InterruptedException {
String collectionPath = getCollectionPath(coll);
while (true) {
try {
Stat stat = new Stat();
byte[] data = zkClient.getData(collectionPath, watcher, stat, true);
ClusterState state = ClusterState.load(stat.getVersion(), data,
Collections.<String>emptySet(), collectionPath);
ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll);
return collectionRef == null ? null : collectionRef.get();
} catch (KeeperException.NoNodeException e) {
if (watcher != null) {
// Leave an exists watch in place in case a state.json is created later.
Stat exists = zkClient.exists(collectionPath, watcher, true);
if (exists != null) {
// Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
// Loop and try again.
continue;
}
}
return null;
}
}
}
public static String getCollectionPathRoot(String coll) {
return COLLECTIONS_ZKNODE + "/" + coll;
}
public static String getCollectionPath(String coll) {
return getCollectionPathRoot(coll) + "/state.json";
}
/**
* Notify this reader that a local Core is a member of a collection, and so that collection
* state should be watched.
* <p>
* Not a public API. This method should only be called from ZkController.
* <p>
* The number of cores per-collection is tracked, and adding multiple cores from the same
* collection does not increase the number of watches.
*
* @param collection the collection that the core is a member of
* @see ZkStateReader#unregisterCore(String)
*/
public void registerCore(String collection) {
AtomicBoolean reconstructState = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null) {
reconstructState.set(true);
v = new CollectionWatch<>();
}
v.coreRefCount++;
return v;
});
if (reconstructState.get()) {
new StateWatcher(collection).refreshAndWatch();
}
}
/**
* Notify this reader that a local core that is a member of a collection has been closed.
* <p>
* Not a public API. This method should only be called from ZkController.
* <p>
* If no cores are registered for a collection, and there are no {@link CollectionStateWatcher}s
* for that collection either, the collection watch will be removed.
*
* @param collection the collection that the core belongs to
*/
public void unregisterCore(String collection) {
AtomicBoolean reconstructState = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
if (v.coreRefCount > 0)
v.coreRefCount--;
if (v.canBeRemoved()) {
watchedCollectionStates.remove(collection);
lazyCollectionStates.put(collection, new LazyCollectionRef(collection));
reconstructState.set(true);
return null;
}
return v;
});
if (reconstructState.get()) {
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
}
}
}
/**
* Register a CollectionStateWatcher to be called when the state of a collection changes
* <em>or</em> the set of live nodes changes.
*
* <p>
* The Watcher will automatically be removed when it's
* <code>onStateChanged</code> returns <code>true</code>
* </p>
*
* <p>
* This is method is just syntactic sugar for registering both a {@link DocCollectionWatcher} and
* a {@link LiveNodesListener}. Callers that only care about one or the other (but not both) are
* encouraged to use the more specific methods register methods as it may reduce the number of
* ZooKeeper watchers needed, and reduce the amount of network/cpu used.
* </p>
*
* @see #registerDocCollectionWatcher
* @see #registerLiveNodesListener
*/
public void registerCollectionStateWatcher(String collection, CollectionStateWatcher stateWatcher) {
final DocCollectionAndLiveNodesWatcherWrapper wrapper
= new DocCollectionAndLiveNodesWatcherWrapper(collection, stateWatcher);
registerDocCollectionWatcher(collection, wrapper);
registerLiveNodesListener(wrapper);
DocCollection state = clusterState.getCollectionOrNull(collection);
if (stateWatcher.onStateChanged(liveNodes, state) == true) {
removeCollectionStateWatcher(collection, stateWatcher);
}
}
/**
* Register a DocCollectionWatcher to be called when the state of a collection changes
*
* <p>
* The Watcher will automatically be removed when it's
* <code>onStateChanged</code> returns <code>true</code>
* </p>
*/
public void registerDocCollectionWatcher(String collection, DocCollectionWatcher stateWatcher) {
AtomicBoolean watchSet = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null) {
v = new CollectionWatch<>();
watchSet.set(true);
}
v.stateWatchers.add(stateWatcher);
return v;
});
if (watchSet.get()) {
new StateWatcher(collection).refreshAndWatch();
}
DocCollection state = clusterState.getCollectionOrNull(collection);
if (stateWatcher.onStateChanged(state) == true) {
removeDocCollectionWatcher(collection, stateWatcher);
}
}
/**
* Block until a CollectionStatePredicate returns true, or the wait times out
*
* <p>
* Note that the predicate may be called again even after it has returned true, so
* implementors should avoid changing state within the predicate call itself.
* </p>
*
* <p>
* This implementation utilizes {@link CollectionStateWatcher} internally.
* Callers that don't care about liveNodes are encouraged to use a {@link DocCollection} {@link Predicate}
* instead
* </p>
*
* @param collection the collection to watch
* @param wait how long to wait
* @param unit the units of the wait parameter
* @param predicate the predicate to call on state changes
* @throws InterruptedException on interrupt
* @throws TimeoutException on timeout
* @see #waitForState(String, long, TimeUnit, Predicate)
* @see #registerCollectionStateWatcher
*/
public void waitForState(final String collection, long wait, TimeUnit unit, CollectionStatePredicate predicate)
throws InterruptedException, TimeoutException {
if (closed) {
throw new AlreadyClosedException();
}
final CountDownLatch latch = new CountDownLatch(1);
waitLatches.add(latch);
AtomicReference<DocCollection> docCollection = new AtomicReference<>();
CollectionStateWatcher watcher = (n, c) -> {
docCollection.set(c);
boolean matches = predicate.matches(n, c);
if (matches)
latch.countDown();
return matches;
};
registerCollectionStateWatcher(collection, watcher);
try {
// wait for the watcher predicate to return true, or time out
if (!latch.await(wait, unit))
throw new TimeoutException("Timeout waiting to see state for collection=" + collection + " :" + docCollection.get());
} finally {
removeCollectionStateWatcher(collection, watcher);
waitLatches.remove(latch);
}
}
/**
* Block until a Predicate returns true, or the wait times out
*
* <p>
* Note that the predicate may be called again even after it has returned true, so
* implementors should avoid changing state within the predicate call itself.
* </p>
*
* @param collection the collection to watch
* @param wait how long to wait
* @param unit the units of the wait parameter
* @param predicate the predicate to call on state changes
* @throws InterruptedException on interrupt
* @throws TimeoutException on timeout
*/
public void waitForState(final String collection, long wait, TimeUnit unit, Predicate<DocCollection> predicate)
throws InterruptedException, TimeoutException {
if (closed) {
throw new AlreadyClosedException();
}
final CountDownLatch latch = new CountDownLatch(1);
waitLatches.add(latch);
AtomicReference<DocCollection> docCollection = new AtomicReference<>();
DocCollectionWatcher watcher = (c) -> {
docCollection.set(c);
boolean matches = predicate.test(c);
if (matches)
latch.countDown();
return matches;
};
registerDocCollectionWatcher(collection, watcher);
try {
// wait for the watcher predicate to return true, or time out
if (!latch.await(wait, unit))
throw new TimeoutException("Timeout waiting to see state for collection=" + collection + " :" + docCollection.get());
} finally {
removeDocCollectionWatcher(collection, watcher);
waitLatches.remove(latch);
}
}
/**
* Block until a LiveNodesStatePredicate returns true, or the wait times out
* <p>
* Note that the predicate may be called again even after it has returned true, so
* implementors should avoid changing state within the predicate call itself.
* </p>
*
* @param wait how long to wait
* @param unit the units of the wait parameter
* @param predicate the predicate to call on state changes
* @throws InterruptedException on interrupt
* @throws TimeoutException on timeout
*/
public void waitForLiveNodes(long wait, TimeUnit unit, LiveNodesPredicate predicate)
throws InterruptedException, TimeoutException {
if (closed) {
throw new AlreadyClosedException();
}
final CountDownLatch latch = new CountDownLatch(1);
waitLatches.add(latch);
LiveNodesListener listener = (o, n) -> {
boolean matches = predicate.matches(o, n);
if (matches)
latch.countDown();
return matches;
};
registerLiveNodesListener(listener);
try {
// wait for the watcher predicate to return true, or time out
if (!latch.await(wait, unit))
throw new TimeoutException("Timeout waiting for live nodes, currently they are: " + getClusterState().getLiveNodes());
} finally {
removeLiveNodesListener(listener);
waitLatches.remove(latch);
}
}
/**
* Remove a watcher from a collection's watch list.
* <p>
* This allows Zookeeper watches to be removed if there is no interest in the
* collection.
* </p>
*
* @param collection the collection
* @param watcher the watcher
* @see #registerCollectionStateWatcher
*/
public void removeCollectionStateWatcher(String collection, CollectionStateWatcher watcher) {
final DocCollectionAndLiveNodesWatcherWrapper wrapper
= new DocCollectionAndLiveNodesWatcherWrapper(collection, watcher);
removeDocCollectionWatcher(collection, wrapper);
removeLiveNodesListener(wrapper);
}
/**
* Remove a watcher from a collection's watch list.
* <p>
* This allows Zookeeper watches to be removed if there is no interest in the
* collection.
* </p>
*
* @param collection the collection
* @param watcher the watcher
* @see #registerDocCollectionWatcher
*/
public void removeDocCollectionWatcher(String collection, DocCollectionWatcher watcher) {
AtomicBoolean reconstructState = new AtomicBoolean(false);
collectionWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
v.stateWatchers.remove(watcher);
if (v.canBeRemoved()) {
watchedCollectionStates.remove(collection);
lazyCollectionStates.put(collection, new LazyCollectionRef(collection));
reconstructState.set(true);
return null;
}
return v;
});
if (reconstructState.get()) {
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
}
}
}
/* package-private for testing */
Set<DocCollectionWatcher> getStateWatchers(String collection) {
final Set<DocCollectionWatcher> watchers = new HashSet<>();
collectionWatches.compute(collection, (k, v) -> {
if (v != null) {
watchers.addAll(v.stateWatchers);
}
return v;
});
return watchers;
}
// returns true if the state has changed
private boolean updateWatchedCollection(String coll, DocCollection newState) {
if (newState == null) {
log.debug("Removing cached collection state for [{}]", coll);
watchedCollectionStates.remove(coll);
return true;
}
boolean updated = false;
// CAS update loop
while (true) {
if (!collectionWatches.containsKey(coll)) {
break;
}
DocCollection oldState = watchedCollectionStates.get(coll);
if (oldState == null) {
if (watchedCollectionStates.putIfAbsent(coll, newState) == null) {
log.debug("Add data for [{}] ver [{}]", coll, newState.getZNodeVersion());
updated = true;
break;
}
} else {
if (oldState.getZNodeVersion() >= newState.getZNodeVersion()) {
// no change to state, but we might have been triggered by the addition of a
// state watcher, so run notifications
updated = true;
break;
}
if (watchedCollectionStates.replace(coll, oldState, newState)) {
log.debug("Updating data for [{}] from [{}] to [{}]", coll, oldState.getZNodeVersion(), newState.getZNodeVersion());
updated = true;
break;
}
}
}
// Resolve race with unregisterCore.
if (!collectionWatches.containsKey(coll)) {
watchedCollectionStates.remove(coll);
log.debug("Removing uninteresting collection [{}]", coll);
}
return updated;
}
public void registerCollectionPropsWatcher(final String collection, CollectionPropsWatcher propsWatcher) {
AtomicBoolean watchSet = new AtomicBoolean(false);
collectionPropsObservers.compute(collection, (k, v) -> {
if (v == null) {
v = new CollectionWatch<>();
watchSet.set(true);
}
v.stateWatchers.add(propsWatcher);
return v;
});
if (watchSet.get()) {
collectionPropsWatchers.computeIfAbsent(collection, PropsWatcher::new).refreshAndWatch(false);
}
}
public void removeCollectionPropsWatcher(String collection, CollectionPropsWatcher watcher) {
collectionPropsObservers.compute(collection, (k, v) -> {
if (v == null)
return null;
v.stateWatchers.remove(watcher);
if (v.canBeRemoved()) {
// don't want this to happen in middle of other blocks that might add it back.
synchronized (watchedCollectionProps) {
watchedCollectionProps.remove(collection);
}
return null;
}
return v;
});
}
public static class ConfigData {
public Map<String, Object> data;
public int version;
public ConfigData() {
}
public ConfigData(Map<String, Object> data, int version) {
this.data = data;
this.version = version;
}
}
private void notifyStateWatchers(String collection, DocCollection collectionState) {
if (this.closed) {
return;
}
try {
notifications.submit(new Notification(collection, collectionState));
} catch (RejectedExecutionException e) {
if (closed == false) {
log.error("Couldn't run collection notifications for {}", collection, e);
}
}
}
private class Notification implements Runnable {
final String collection;
final DocCollection collectionState;
private Notification(String collection, DocCollection collectionState) {
this.collection = collection;
this.collectionState = collectionState;
}
@Override
public void run() {
List<DocCollectionWatcher> watchers = new ArrayList<>();
collectionWatches.compute(collection, (k, v) -> {
if (v == null)
return null;
watchers.addAll(v.stateWatchers);
return v;
});
for (DocCollectionWatcher watcher : watchers) {
try {
if (watcher.onStateChanged(collectionState)) {
removeDocCollectionWatcher(collection, watcher);
}
} catch (Exception exception) {
log.warn("Error on calling watcher", exception);
}
}
}
}
//
// Aliases related
//
/**
* Access to the {@link Aliases}.
*/
public final AliasesManager aliasesManager = new AliasesManager();
/**
* Get an immutable copy of the present state of the aliases. References to this object should not be retained
* in any context where it will be important to know if aliases have changed.
*
* @return The current aliases, Aliases.EMPTY if not solr cloud, or no aliases have existed yet. Never returns null.
*/
public Aliases getAliases() {
return aliasesManager.getAliases();
}
// called by createClusterStateWatchersAndUpdate()
private void refreshAliases(AliasesManager watcher) throws KeeperException, InterruptedException {
synchronized (getUpdateLock()) {
constructState(Collections.emptySet());
zkClient.exists(ALIASES, watcher, true);
}
aliasesManager.update();
}
/**
* A class to manage the aliases instance, including watching for changes.
* There should only ever be one instance of this class
* per instance of ZkStateReader. Normally it will not be useful to create a new instance since
* this watcher automatically re-registers itself every time it is updated.
*/
public class AliasesManager implements Watcher { // the holder is a Zk watcher
// note: as of this writing, this class if very generic. Is it useful to use for other ZK managed things?
private final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private volatile Aliases aliases = Aliases.EMPTY;
public Aliases getAliases() {
return aliases; // volatile read
}
/**
* Writes an updated {@link Aliases} to zk.
* It will retry if there are races with other modifications, giving up after 30 seconds with a SolrException.
* The caller should understand it's possible the aliases has further changed if it examines it.
*/
public void applyModificationAndExportToZk(UnaryOperator<Aliases> op) {
// The current aliases hasn't been update()'ed yet -- which is impossible? Any way just update it first.
if (aliases.getZNodeVersion() == -1) {
try {
boolean updated = update();
assert updated;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
} catch (KeeperException e) {
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
}
}
final long deadlineNanos = System.nanoTime() + TimeUnit.SECONDS.toNanos(30);
// note: triesLeft tuning is based on ConcurrentCreateRoutedAliasTest
for (int triesLeft = 30; triesLeft > 0; triesLeft--) {
// we could synchronize on "this" but there doesn't seem to be a point; we have a retry loop.
Aliases curAliases = getAliases();
Aliases modAliases = op.apply(curAliases);
final byte[] modAliasesJson = modAliases.toJSON();
if (curAliases == modAliases) {
log.debug("Current aliases has the desired modification; no further ZK interaction needed.");
return;
}
try {
try {
final Stat stat = getZkClient().setData(ALIASES, modAliasesJson, curAliases.getZNodeVersion(), true);
setIfNewer(Aliases.fromJSON(modAliasesJson, stat.getVersion()));
return;
} catch (KeeperException.BadVersionException e) {
log.debug(e.toString(), e);
log.warn("Couldn't save aliases due to race with another modification; will update and retry until timeout");
// considered a backoff here, but we really do want to compete strongly since the normal case is
// that we will do one update and succeed. This is left as a hot loop for limited tries intentionally.
// More failures than that here probably indicate a bug or a very strange high write frequency usage for
// aliases.json, timeouts mean zk is being very slow to respond, or this node is being crushed
// by other processing and just can't find any cpu cycles at all.
update();
if (deadlineNanos < System.nanoTime()) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out trying to update aliases! " +
"Either zookeeper or this node may be overloaded.");
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
} catch (KeeperException e) {
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, e.toString(), e);
}
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Too many successive version failures trying to update aliases");
}
/**
* Ensures the internal aliases is up to date. If there is a change, return true.
*
* @return true if an update was performed
*/
public boolean update() throws KeeperException, InterruptedException {
log.debug("Checking ZK for most up to date Aliases {}", ALIASES);
// Call sync() first to ensure the subsequent read (getData) is up to date.
zkClient.getSolrZooKeeper().sync(ALIASES, null, null);
Stat stat = new Stat();
final byte[] data = zkClient.getData(ALIASES, null, stat, true);
return setIfNewer(Aliases.fromJSON(data, stat.getVersion()));
}
// ZK Watcher interface
@Override
public void process(WatchedEvent event) {
// session events are not change events, and do not remove the watcher
if (EventType.None.equals(event.getType())) {
return;
}
try {
log.debug("Aliases: updating");
// re-register the watch
Stat stat = new Stat();
final byte[] data = zkClient.getData(ALIASES, this, stat, true);
// note: it'd be nice to avoid possibly needlessly parsing if we don't update aliases but not a big deal
setIfNewer(Aliases.fromJSON(data, stat.getVersion()));
} catch (NoNodeException e) {
// /aliases.json will not always exist
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
// note: aliases.json is required to be present
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
} catch (KeeperException e) {
log.error("A ZK error has occurred", e);
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("Interrupted", e);
}
}
/**
* Update the internal aliases reference with a new one, provided that its ZK version has increased.
*
* @param newAliases the potentially newer version of Aliases
* @return true if aliases have been updated to a new version, false otherwise
*/
private boolean setIfNewer(Aliases newAliases) {
assert newAliases.getZNodeVersion() >= 0;
synchronized (this) {
int cmp = Integer.compare(aliases.getZNodeVersion(), newAliases.getZNodeVersion());
if (cmp < 0) {
log.debug("Aliases: cmp={}, new definition is: {}", cmp, newAliases);
aliases = newAliases;
this.notifyAll();
return true;
} else {
log.debug("Aliases: cmp={}, not overwriting ZK version.", cmp);
assert cmp != 0 || Arrays.equals(aliases.toJSON(), newAliases.toJSON()) : aliases + " != " + newAliases;
return false;
}
}
}
}
private void notifyPropsWatchers(String collection, Map<String, String> properties) {
try {
collectionPropsNotifications.submit(new PropsNotification(collection, properties));
} catch (RejectedExecutionException e) {
if (!closed) {
log.error("Couldn't run collection properties notifications for {}", collection, e);
}
}
}
private class PropsNotification implements Runnable {
private final String collection;
private final Map<String, String> collectionProperties;
private final List<CollectionPropsWatcher> watchers = new ArrayList<>();
private PropsNotification(String collection, Map<String, String> collectionProperties) {
this.collection = collection;
this.collectionProperties = collectionProperties;
// guarantee delivery of notification regardless of what happens to collectionPropsObservers
// while we wait our turn in the executor by capturing the list on creation.
collectionPropsObservers.compute(collection, (k, v) -> {
if (v == null)
return null;
watchers.addAll(v.stateWatchers);
return v;
});
}
@Override
public void run() {
for (CollectionPropsWatcher watcher : watchers) {
if (watcher.onStateChanged(collectionProperties)) {
removeCollectionPropsWatcher(collection, watcher);
}
}
}
}
private class CacheCleaner implements Runnable {
public void run() {
while (!Thread.interrupted()) {
try {
Thread.sleep(60000);
} catch (InterruptedException e) {
// Executor shutdown will send us an interrupt
break;
}
watchedCollectionProps.entrySet().removeIf(entry ->
entry.getValue().cacheUntilNs < System.nanoTime() && !collectionPropsObservers.containsKey(entry.getKey()));
}
}
}
/**
* Helper class that acts as both a {@link DocCollectionWatcher} and a {@link LiveNodesListener}
* while wraping and delegating to a {@link CollectionStateWatcher}
*/
private final class DocCollectionAndLiveNodesWatcherWrapper implements DocCollectionWatcher, LiveNodesListener {
private final String collectionName;
private final CollectionStateWatcher delegate;
public int hashCode() {
return collectionName.hashCode() * delegate.hashCode();
}
public boolean equals(Object other) {
if (other instanceof DocCollectionAndLiveNodesWatcherWrapper) {
DocCollectionAndLiveNodesWatcherWrapper that
= (DocCollectionAndLiveNodesWatcherWrapper) other;
return this.collectionName.equals(that.collectionName)
&& this.delegate.equals(that.delegate);
}
return false;
}
public DocCollectionAndLiveNodesWatcherWrapper(final String collectionName,
final CollectionStateWatcher delegate) {
this.collectionName = collectionName;
this.delegate = delegate;
}
@Override
public boolean onStateChanged(DocCollection collectionState) {
final boolean result = delegate.onStateChanged(ZkStateReader.this.liveNodes,
collectionState);
if (result) {
// it might be a while before live nodes changes, so proactively remove ourselves
removeLiveNodesListener(this);
}
return result;
}
@Override
public boolean onChange(SortedSet<String> oldLiveNodes, SortedSet<String> newLiveNodes) {
final DocCollection collection = ZkStateReader.this.clusterState.getCollectionOrNull(collectionName);
final boolean result = delegate.onStateChanged(newLiveNodes, collection);
if (result) {
// it might be a while before collection changes, so proactively remove ourselves
removeDocCollectionWatcher(collectionName, this);
}
return result;
}
}
}
| 1 | 33,384 | Small thing, without the check above this will throw a NoNodeException if the path doesn't exist. Maybe this can be wrapped in a try/catch just for the NoNodeException, so that the more user-friendly message used above can be thrown instead: `throw new KeeperException.NoNodeException("No collection found at path: " + path);` Just a thought, I'm not even sure it's worth it. | apache-lucene-solr | java |
@@ -17,7 +17,7 @@ class Project < ActiveRecord::Base
validates :url_name, presence: true, length: 1..60, allow_nil: false, uniqueness: { case_sensitive: false }
validates :description, length: 0..800, allow_nil: true # , if: proc { |p| p.validate_url_name_and_desc == 'true' }
validates_each :url, :download_url, allow_blank: true do |record, field, value|
- record.errors.add(field, I18n.t(:not_a_valid_url)) unless value.valid_http_url?
+ record.errors.add(field, I18n.t(:not_a_valid_url)) unless value.blank? || value.valid_http_url?
end
before_validation :clean_strings_and_urls
after_save :update_organzation_project_count | 1 | class Project < ActiveRecord::Base
include ProjectAssociations
include LinkAccessors
include Tsearch
include ProjectSearchables
include ProjectScopes
include ProjectJobs
acts_as_editable editable_attributes: [:name, :url_name, :logo_id, :organization_id, :best_analysis_id,
:description, :tag_list, :missing_source, :url, :download_url],
merge_within: 30.minutes
acts_as_protected
acts_as_taggable
link_accessors accessors: { url: :Homepage, download_url: :Download }
validates :name, presence: true, length: 1..100, allow_nil: false, uniqueness: { case_sensitive: false }
validates :url_name, presence: true, length: 1..60, allow_nil: false, uniqueness: { case_sensitive: false }
validates :description, length: 0..800, allow_nil: true # , if: proc { |p| p.validate_url_name_and_desc == 'true' }
validates_each :url, :download_url, allow_blank: true do |record, field, value|
record.errors.add(field, I18n.t(:not_a_valid_url)) unless value.valid_http_url?
end
before_validation :clean_strings_and_urls
after_save :update_organzation_project_count
after_update :remove_people, if: -> project { project.deleted_changed? && project.deleted? }
attr_accessor :managed_by_creator
def to_param
url_name.blank? ? id.to_s : url_name
end
def related_by_stacks(limit = 12)
stack_weights = StackEntry.stack_weight_sql(id)
Project.select('projects.*, shared_stacks, shared_stacks*sqrt(shared_stacks)/projects.user_count as value')
.joins(sanitize("INNER JOIN (#{stack_weights}) AS stack_weights ON stack_weights.project_id = projects.id"))
.not_deleted.where('shared_stacks > 2').order('value DESC, shared_stacks DESC').limit(limit)
end
def related_by_tags(limit = 5)
tag_weights = Tagging.tag_weight_sql(self.class, tags.map(&:id))
Project.select('projects.*, tag_weights.weight')
.joins(sanitize("INNER JOIN (#{tag_weights}) AS tag_weights ON tag_weights.project_id = projects.id"))
.not_deleted.where.not(id: id).order('tag_weights.weight DESC, projects.user_count DESC').limit(limit)
end
def active_managers
Manage.projects.for_target(self).active.to_a.map(&:account)
end
def allow_undo_to_nil?(key)
![:name].include?(key)
end
def allow_redo?(key)
(key == :organization_id && !organization_id.nil?) ? false : true
end
def main_language
return if best_analysis.nil? || best_analysis.main_language.nil?
best_analysis.main_language.name
end
def best_analysis
super || NilAnalysis.new
end
def users(query = '', sort = '')
search_term = query.present? ? ['accounts.name iLIKE ?', "%#{query}%"] : nil
orber_by = sort.eql?('name') ? 'accounts.name ASC' : 'people.kudo_position ASC'
Account.select('DISTINCT(accounts.id), accounts.*, people.kudo_position')
.joins([{ stacks: :stack_entries }, :person])
.where(stack_entries: { project_id: id })
.where(search_term)
.order(orber_by)
end
def code_published_in_code_search?
koders_status.try(:ohloh_code_ready) == true
end
def newest_contributions
contributions.sort_by_newest.includes(person: :account, contributor_fact: :primary_language).limit(10)
end
def top_contributions
contributions.sort_by_twelve_month_commits
.includes(person: :account, contributor_fact: :primary_language)
.limit(10)
end
class << self
def search_and_sort(query, sort, page)
sort_by = (sort == 'relevance') ? nil : "by_#{sort}"
tsearch(query, sort_by)
.includes(:best_analysis)
.paginate(page: page, per_page: 20)
end
end
private
def clean_strings_and_urls
self.name = String.clean_string(name)
self.description = String.clean_string(description)
end
def sanitize(sql)
Project.send :sanitize_sql, sql
end
def update_organzation_project_count
org = Organization.where(id: organization_id || organization_id_was).first
return unless org
org.update_attributes(editor_account: editor_account, projects_count: org.projects.count)
end
def remove_people
Person.where(project_id: id).destroy_all
end
end
| 1 | 7,981 | I don't know why this worked before, but we should be explicit on not verifying that a blank string (allowed as a way to remove a url/download_url) is a valid url as it is not. | blackducksoftware-ohloh-ui | rb |
@@ -212,8 +212,7 @@ module Bolt
return unless !stdout.empty? && stdout.to_i < 3
- msg = "Detected PowerShell 2 on controller. PowerShell 2 is deprecated and "\
- "support will be removed in Bolt 3.0."
+ msg = "Detected PowerShell 2 on controller. PowerShell 2 is unsupported."
Bolt::Logger.deprecation_warning("PowerShell 2 controller", msg)
end
end | 1 | # frozen_string_literal: true
# Avoid requiring the CLI from other files. It has side-effects - such as loading r10k -
# that are undesirable when using Bolt as a library.
require 'uri'
require 'benchmark'
require 'json'
require 'io/console'
require 'logging'
require 'optparse'
require 'bolt/analytics'
require 'bolt/bolt_option_parser'
require 'bolt/config'
require 'bolt/error'
require 'bolt/executor'
require 'bolt/inventory'
require 'bolt/logger'
require 'bolt/module_installer'
require 'bolt/outputter'
require 'bolt/pal'
require 'bolt/plan_creator'
require 'bolt/plugin'
require 'bolt/project_manager'
require 'bolt/puppetdb'
require 'bolt/rerun'
require 'bolt/secret'
require 'bolt/target'
require 'bolt/version'
module Bolt
class CLIExit < StandardError; end
class CLI
COMMANDS = {
'command' => %w[run],
'script' => %w[run],
'task' => %w[show run],
'plan' => %w[show run convert new],
'file' => %w[download upload],
'puppetfile' => %w[install show-modules generate-types],
'secret' => %w[encrypt decrypt createkeys],
'inventory' => %w[show],
'group' => %w[show],
'project' => %w[init migrate],
'module' => %w[add generate-types install show],
'apply' => %w[],
'guide' => %w[]
}.freeze
attr_reader :config, :options
def initialize(argv)
Bolt::Logger.initialize_logging
@logger = Bolt::Logger.logger(self)
@argv = argv
@options = {}
end
# Only call after @config has been initialized.
def inventory
@inventory ||= Bolt::Inventory.from_config(config, plugins)
end
private :inventory
def help?(remaining)
# Set the subcommand
options[:subcommand] = remaining.shift
if options[:subcommand] == 'help'
options[:help] = true
options[:subcommand] = remaining.shift
end
# This section handles parsing non-flag options which are
# subcommand specific rather then part of the config
actions = COMMANDS[options[:subcommand]]
if actions && !actions.empty?
options[:action] = remaining.shift
end
options[:help]
end
private :help?
# Wrapper method that is called by the Bolt executable. Parses the command and
# then loads the project and config. Once config is loaded, it completes the
# setup process by configuring Bolt and logging messages.
#
# This separation is needed since the Bolt::Outputter class that normally handles
# printing errors relies on config being loaded. All setup that happens before
# config is loaded will have errors printed directly to stdout, while all errors
# raised after config is loaded are handled by the outputter.
def parse
parse_command
load_config
finalize_setup
end
# Parses the command and validates options. All errors that are raised here
# are not handled by the outputter, as it relies on config being loaded.
def parse_command
parser = BoltOptionParser.new(options)
# This part aims to handle both `bolt <mode> --help` and `bolt help <mode>`.
remaining = handle_parser_errors { parser.permute(@argv) } unless @argv.empty?
if @argv.empty? || help?(remaining)
# If the subcommand is not enabled, display the default
# help text
options[:subcommand] = nil unless COMMANDS.include?(options[:subcommand])
# Update the parser for the subcommand (or lack thereof)
parser.update
puts parser.help
raise Bolt::CLIExit
end
options[:object] = remaining.shift
# Handle reading a command from a file
if options[:subcommand] == 'command' && options[:object]
options[:object] = Bolt::Util.get_arg_input(options[:object])
end
# Only parse task_options for task or plan
if %w[task plan].include?(options[:subcommand])
task_options, remaining = remaining.partition { |s| s =~ /.+=/ }
if options[:task_options]
unless task_options.empty?
raise Bolt::CLIError,
"Parameters must be specified through either the --params " \
"option or param=value pairs, not both"
end
options[:params_parsed] = true
elsif task_options.any?
options[:params_parsed] = false
options[:task_options] = Hash[task_options.map { |a| a.split('=', 2) }]
else
options[:params_parsed] = true
options[:task_options] = {}
end
end
options[:leftovers] = remaining
# Default to verbose for everything except plans
unless options.key?(:verbose)
options[:verbose] = options[:subcommand] != 'plan'
end
validate(options)
# Deprecation warnings can't be issued until after config is loaded, so
# store them for later.
@parser_deprecations = parser.deprecations
rescue Bolt::Error => e
fatal_error(e)
raise e
end
# Loads the project and configuration. All errors that are raised here are not
# handled by the outputter, as it relies on config being loaded.
def load_config
project = if ENV['BOLT_PROJECT']
Bolt::Project.create_project(ENV['BOLT_PROJECT'], 'environment')
elsif options[:project]
dir = Pathname.new(options[:project])
if (dir + Bolt::Project::BOLTDIR_NAME).directory?
Bolt::Project.create_project(dir + Bolt::Project::BOLTDIR_NAME)
else
Bolt::Project.create_project(dir)
end
else
Bolt::Project.find_boltdir(Dir.pwd)
end
@config = Bolt::Config.from_project(project, options)
rescue Bolt::Error => e
fatal_error(e)
raise e
end
# Completes the setup process by configuring Bolt and log messages
def finalize_setup
Bolt::Logger.configure(config.log, config.color)
Bolt::Logger.analytics = analytics
# Logger must be configured before checking path case and project file, otherwise logs will not display
config.check_path_case('modulepath', config.modulepath)
config.project.check_deprecated_file
# Log messages created during parser and config initialization
config.logs.each { |log| @logger.send(log.keys[0], log.values[0]) }
@parser_deprecations.each { |dep| Bolt::Logger.deprecation_warning(dep[:type], dep[:msg]) }
config.deprecations.each { |dep| Bolt::Logger.deprecation_warning(dep[:type], dep[:msg]) }
if options[:clear_cache] && File.exist?(config.project.plugin_cache_file)
FileUtils.rm(config.project.plugin_cache_file)
end
warn_inventory_overrides_cli(options)
validate_ps_version
options
rescue Bolt::Error => e
outputter.fatal_error(e)
raise e
end
private def validate_ps_version
if Bolt::Util.powershell?
command = "powershell.exe -NoProfile -NonInteractive -NoLogo -ExecutionPolicy "\
"Bypass -Command $PSVersionTable.PSVersion.Major"
stdout, _stderr, _status = Open3.capture3(command)
return unless !stdout.empty? && stdout.to_i < 3
msg = "Detected PowerShell 2 on controller. PowerShell 2 is deprecated and "\
"support will be removed in Bolt 3.0."
Bolt::Logger.deprecation_warning("PowerShell 2 controller", msg)
end
end
def update_targets(options)
target_opts = options.keys.select { |opt| %i[query rerun targets].include?(opt) }
target_string = "'--targets', '--rerun', or '--query'"
if target_opts.length > 1
raise Bolt::CLIError, "Only one targeting option #{target_string} may be specified"
elsif target_opts.empty? && options[:subcommand] != 'plan'
raise Bolt::CLIError, "Command requires a targeting option: #{target_string}"
end
targets = if options[:query]
query_puppetdb_nodes(options[:query])
elsif options[:rerun]
rerun.get_targets(options[:rerun])
else
options[:targets] || []
end
options[:target_args] = targets
options[:targets] = inventory.get_targets(targets)
end
def validate(options)
unless COMMANDS.include?(options[:subcommand])
command = Bolt::Util.powershell? ? 'Get-Command -Module PuppetBolt' : 'bolt help'
raise Bolt::CLIError,
"'#{options[:subcommand]}' is not a Bolt command. See '#{command}'."
end
actions = COMMANDS[options[:subcommand]]
if actions.any?
if options[:action].nil?
raise Bolt::CLIError,
"Expected an action of the form 'bolt #{options[:subcommand]} <action>'"
end
unless actions.include?(options[:action])
raise Bolt::CLIError,
"Expected action '#{options[:action]}' to be one of " \
"#{actions.join(', ')}"
end
end
if %w[task plan].include?(options[:subcommand]) && options[:action] == 'run'
if options[:object].nil?
raise Bolt::CLIError, "Must specify a #{options[:subcommand]} to run"
end
# This may mean that we parsed a parameter as the object
unless options[:object] =~ /\A([a-z][a-z0-9_]*)?(::[a-z][a-z0-9_]*)*\Z/
raise Bolt::CLIError,
"Invalid #{options[:subcommand]} '#{options[:object]}'"
end
end
if options[:subcommand] == 'apply' && (options[:object] && options[:code])
raise Bolt::CLIError, "--execute is unsupported when specifying a manifest file"
end
if options[:subcommand] == 'apply' && (!options[:object] && !options[:code])
raise Bolt::CLIError, "a manifest file or --execute is required"
end
if options[:subcommand] == 'command' && (!options[:object] || options[:object].empty?)
raise Bolt::CLIError, "Must specify a command to run"
end
if options[:subcommand] == 'secret' &&
(options[:action] == 'decrypt' || options[:action] == 'encrypt') &&
!options[:object]
raise Bolt::CLIError, "Must specify a value to #{options[:action]}"
end
if options[:subcommand] == 'plan' && options[:action] == 'new' && !options[:object]
raise Bolt::CLIError, "Must specify a plan name."
end
if options[:subcommand] == 'module' && options[:action] == 'add' && !options[:object]
raise Bolt::CLIError, "Must specify a module name."
end
if options[:subcommand] == 'module' && options[:action] == 'install' && options[:object]
command = Bolt::Util.powershell? ? 'Add-BoltModule -Module' : 'bolt module add'
raise Bolt::CLIError, "Invalid argument '#{options[:object]}'. To add a new module to "\
"the project, run '#{command} #{options[:object]}'."
end
if options[:subcommand] != 'file' && options[:subcommand] != 'script' &&
!options[:leftovers].empty?
raise Bolt::CLIError,
"Unknown argument(s) #{options[:leftovers].join(', ')}"
end
if options[:noop] &&
!(options[:subcommand] == 'task' && options[:action] == 'run') && options[:subcommand] != 'apply'
raise Bolt::CLIError,
"Option '--noop' may only be specified when running a task or applying manifest code"
end
if options[:env_vars]
unless %w[command script].include?(options[:subcommand]) && options[:action] == 'run'
raise Bolt::CLIError,
"Option '--env-var' may only be specified when running a command or script"
end
end
end
def handle_parser_errors
yield
rescue OptionParser::MissingArgument => e
raise Bolt::CLIError, "Option '#{e.args.first}' needs a parameter"
rescue OptionParser::InvalidArgument => e
raise Bolt::CLIError, "Invalid parameter specified for option '#{e.args.first}': #{e.args[1]}"
rescue OptionParser::InvalidOption, OptionParser::AmbiguousOption => e
raise Bolt::CLIError, "Unknown argument '#{e.args.first}'"
end
def puppetdb_client
plugins.puppetdb_client
end
def plugins
@plugins ||= Bolt::Plugin.setup(config, pal, analytics)
end
def query_puppetdb_nodes(query)
puppetdb_client.query_certnames(query)
end
def warn_inventory_overrides_cli(opts)
inventory_source = if ENV[Bolt::Inventory::ENVIRONMENT_VAR]
Bolt::Inventory::ENVIRONMENT_VAR
elsif config.inventoryfile
config.inventoryfile
elsif File.exist?(config.default_inventoryfile)
config.default_inventoryfile
end
inventory_cli_opts = %i[authentication escalation transports].each_with_object([]) do |key, acc|
acc.concat(Bolt::BoltOptionParser::OPTIONS[key])
end
inventory_cli_opts.concat(%w[no-host-key-check no-ssl no-ssl-verify no-tty])
conflicting_options = Set.new(opts.keys.map(&:to_s)).intersection(inventory_cli_opts)
if inventory_source && conflicting_options.any?
@logger.warn("CLI arguments #{conflicting_options.to_a} may be overridden by Inventory: #{inventory_source}")
end
end
def execute(options)
message = nil
handler = Signal.trap :INT do |signo|
@logger.info(
"Exiting after receiving SIG#{Signal.signame(signo)} signal.#{message ? ' ' + message : ''}"
)
exit!
end
# Initialize inventory and targets. Errors here are better to catch early.
# options[:target_args] will contain a string/array version of the targetting options this is passed to plans
# options[:targets] will contain a resolved set of Target objects
unless %w[guide module project puppetfile secret].include?(options[:subcommand]) ||
%w[convert new show].include?(options[:action])
update_targets(options)
end
screen = "#{options[:subcommand]}_#{options[:action]}"
# submit a different screen for `bolt task show` and `bolt task show foo`
if options[:action] == 'show' && options[:object]
screen += '_object'
end
screen_view_fields = {
output_format: config.format,
# For continuity
boltdir_type: config.project.type
}.merge!(analytics.plan_counts(config.project.plans_path))
# Only include target and inventory info for commands that take a targets
# list. This avoids loading inventory for commands that don't need it.
if options.key?(:targets)
screen_view_fields.merge!(target_nodes: options[:targets].count,
inventory_nodes: inventory.node_names.count,
inventory_groups: inventory.group_names.count,
inventory_version: inventory.version)
end
analytics.screen_view(screen, **screen_view_fields)
case options[:action]
when 'show'
case options[:subcommand]
when 'task'
if options[:object]
show_task(options[:object])
else
list_tasks
end
when 'plan'
if options[:object]
show_plan(options[:object])
else
list_plans
end
when 'inventory'
if options[:detail]
show_targets
else
list_targets
end
when 'group'
list_groups
when 'module'
list_modules
end
return 0
when 'show-modules'
list_modules
return 0
when 'convert'
pal.convert_plan(options[:object])
return 0
end
message = 'There may be processes left executing on some nodes.'
if %w[task plan].include?(options[:subcommand]) && options[:task_options] && !options[:params_parsed] && pal
options[:task_options] = pal.parse_params(options[:subcommand], options[:object], options[:task_options])
end
case options[:subcommand]
when 'guide'
code = if options[:object]
show_guide(options[:object])
else
list_topics
end
when 'project'
case options[:action]
when 'init'
code = Bolt::ProjectManager.new(config, outputter, pal)
.create(Dir.pwd, options[:object], options[:modules])
when 'migrate'
code = Bolt::ProjectManager.new(config, outputter, pal).migrate
end
when 'plan'
case options[:action]
when 'new'
plan_name = options[:object]
# If this passes validation, it will return the path to the plan to create
Bolt::PlanCreator.validate_input(config.project, plan_name)
code = Bolt::PlanCreator.create_plan(config.project.plans_path,
plan_name,
outputter,
options[:puppet])
when 'run'
code = run_plan(options[:object], options[:task_options], options[:target_args], options)
end
when 'module'
case options[:action]
when 'add'
code = add_project_module(options[:object], config.project, config.module_install)
when 'install'
code = install_project_modules(config.project, config.module_install, options[:force], options[:resolve])
when 'generate-types'
code = generate_types
end
when 'puppetfile'
case options[:action]
when 'generate-types'
code = generate_types
when 'install'
code = install_puppetfile(
config.puppetfile_config,
config.puppetfile,
config.modulepath.first
)
end
when 'secret'
code = Bolt::Secret.execute(plugins, outputter, options)
when 'apply'
if options[:object]
validate_file('manifest', options[:object])
options[:code] = File.read(File.expand_path(options[:object]))
end
code = apply_manifest(options[:code], options[:targets], options[:object], options[:noop])
else
executor = Bolt::Executor.new(config.concurrency, analytics, options[:noop], config.modified_concurrency)
targets = options[:targets]
results = nil
outputter.print_head
elapsed_time = Benchmark.realtime do
executor_opts = {}
executor_opts[:env_vars] = options[:env_vars] if options.key?(:env_vars)
executor.subscribe(outputter)
executor.subscribe(log_outputter)
results =
case options[:subcommand]
when 'command'
executor.run_command(targets, options[:object], executor_opts)
when 'script'
script = options[:object]
validate_file('script', script)
executor.run_script(targets, script, options[:leftovers], executor_opts)
when 'task'
pal.run_task(options[:object],
targets,
options[:task_options],
executor,
inventory)
when 'file'
src = options[:object]
dest = options[:leftovers].first
if src.nil?
raise Bolt::CLIError, "A source path must be specified"
end
if dest.nil?
raise Bolt::CLIError, "A destination path must be specified"
end
case options[:action]
when 'download'
dest = File.expand_path(dest, Dir.pwd)
executor.download_file(targets, src, dest, executor_opts)
when 'upload'
validate_file('source file', src, true)
executor.upload_file(targets, src, dest, executor_opts)
end
end
end
executor.shutdown
rerun.update(results)
outputter.print_summary(results, elapsed_time)
code = results.ok ? 0 : 2
end
code
rescue Bolt::Error => e
outputter.fatal_error(e)
raise e
ensure
# restore original signal handler
Signal.trap :INT, handler if handler
analytics&.finish
end
def show_task(task_name)
outputter.print_task_info(pal.get_task(task_name))
end
# Filters a list of content by matching substring.
#
private def filter_content(content, filter)
return content unless content && filter
content.select { |name,| name.include?(filter) }
end
def list_tasks
tasks = filter_content(pal.list_tasks(filter_content: true), options[:filter])
outputter.print_tasks(tasks, pal.user_modulepath)
end
def show_plan(plan_name)
outputter.print_plan_info(pal.get_plan_info(plan_name))
end
def list_plans
plans = filter_content(pal.list_plans_with_cache(filter_content: true), options[:filter])
outputter.print_plans(plans, pal.user_modulepath)
end
def list_targets
inventoryfile = config.inventoryfile || config.default_inventoryfile
# Retrieve the known group and target names. This needs to be done before
# updating targets, as that will add adhoc targets to the inventory.
known_names = inventory.target_names
update_targets(options)
inventory_targets, adhoc_targets = options[:targets].partition do |target|
known_names.include?(target.name)
end
target_list = {
inventory: inventory_targets,
adhoc: adhoc_targets
}
outputter.print_targets(target_list, inventoryfile)
end
def show_targets
update_targets(options)
outputter.print_target_info(options[:targets])
end
def list_groups
groups = inventory.group_names
outputter.print_groups(groups)
end
def run_plan(plan_name, plan_arguments, nodes, options)
unless nodes.empty?
if plan_arguments['nodes'] || plan_arguments['targets']
key = plan_arguments.include?('nodes') ? 'nodes' : 'targets'
raise Bolt::CLIError,
"A plan's '#{key}' parameter may be specified using the --#{key} option, but in that " \
"case it must not be specified as a separate #{key}=<value> parameter nor included " \
"in the JSON data passed in the --params option"
end
plan_params = pal.get_plan_info(plan_name)['parameters']
target_param = plan_params.dig('targets', 'type') =~ /TargetSpec/
node_param = plan_params.include?('nodes')
if node_param && target_param
msg = "Plan parameters include both 'nodes' and 'targets' with type 'TargetSpec', " \
"neither will populated with the value for --nodes or --targets."
@logger.warn(msg)
elsif node_param
plan_arguments['nodes'] = nodes.join(',')
elsif target_param
plan_arguments['targets'] = nodes.join(',')
end
end
plan_context = { plan_name: plan_name,
params: plan_arguments }
executor = Bolt::Executor.new(config.concurrency, analytics, options[:noop], config.modified_concurrency)
if %w[human rainbow].include?(options.fetch(:format, 'human'))
executor.subscribe(outputter)
else
# Only subscribe to out::message events for JSON outputter
executor.subscribe(outputter, [:message])
end
executor.subscribe(log_outputter)
executor.start_plan(plan_context)
result = pal.run_plan(plan_name, plan_arguments, executor, inventory, puppetdb_client)
# If a non-bolt exception bubbles up the plan won't get finished
executor.finish_plan(result)
executor.shutdown
rerun.update(result)
outputter.print_plan_result(result)
result.ok? ? 0 : 1
end
def apply_manifest(code, targets, filename = nil, noop = false)
Puppet[:tasks] = false
ast = pal.parse_manifest(code, filename)
if defined?(ast.body) &&
(ast.body.is_a?(Puppet::Pops::Model::HostClassDefinition) ||
ast.body.is_a?(Puppet::Pops::Model::ResourceTypeDefinition))
message = "Manifest only contains definitions and will result in no changes on the targets. "\
"Definitions must be declared for their resources to be applied. You can read more "\
"about defining and declaring classes and types in the Puppet documentation at "\
"https://puppet.com/docs/puppet/latest/lang_classes.html and "\
"https://puppet.com/docs/puppet/latest/lang_defined_types.html"
@logger.warn(message)
end
executor = Bolt::Executor.new(config.concurrency, analytics, noop, config.modified_concurrency)
executor.subscribe(outputter) if options.fetch(:format, 'human') == 'human'
executor.subscribe(log_outputter)
# apply logging looks like plan logging, so tell the outputter we're in a
# plan even though we're not
executor.publish_event(type: :plan_start, plan: nil)
results = nil
elapsed_time = Benchmark.realtime do
pal.in_plan_compiler(executor, inventory, puppetdb_client) do |compiler|
compiler.call_function('apply_prep', targets)
end
results = pal.with_bolt_executor(executor, inventory, puppetdb_client) do
Puppet.lookup(:apply_executor).apply_ast(ast, targets, catch_errors: true, noop: noop)
end
end
executor.shutdown
outputter.print_apply_result(results, elapsed_time)
rerun.update(results)
results.ok ? 0 : 1
end
def list_modules
assert_puppetfile_or_module_command(config.project.modules)
outputter.print_module_list(pal.list_modules)
end
def generate_types
assert_puppetfile_or_module_command(config.project.modules)
# generate_types will surface a nice error with helpful message if it fails
pal.generate_types(cache: true)
0
end
# Installs modules declared in the project configuration file.
#
def install_project_modules(project, config, force, resolve)
assert_project_file(project)
assert_puppetfile_or_module_command(project.modules)
unless project.modules
outputter.print_message "Project configuration file #{project.project_file} does not "\
"specify any module dependencies. Nothing to do."
return 0
end
modules = project.modules || []
installer = Bolt::ModuleInstaller.new(outputter, pal)
ok = outputter.spin do
installer.install(modules,
project.puppetfile,
project.managed_moduledir,
config,
force: force,
resolve: resolve)
end
ok ? 0 : 1
end
# Adds a single module to the project.
#
def add_project_module(name, project, config)
assert_project_file(project)
assert_puppetfile_or_module_command(project.modules)
modules = project.modules || []
installer = Bolt::ModuleInstaller.new(outputter, pal)
ok = outputter.spin do
installer.add(name,
modules,
project.puppetfile,
project.managed_moduledir,
project.project_file,
config)
end
ok ? 0 : 1
end
# Asserts that there is a project configuration file.
#
def assert_project_file(project)
unless project.project_file?
msg = if project.config_file.exist?
command = Bolt::Util.powershell? ? 'Update-BoltProject' : 'bolt project migrate'
"Detected Bolt configuration file #{project.config_file}, unable to install "\
"modules. To update to a project configuration file, run '#{command}'."
else
command = Bolt::Util.powershell? ? 'New-BoltProject' : 'bolt project init'
"Could not find project configuration file #{project.project_file}, unable "\
"to install modules. To create a Bolt project, run '#{command}'."
end
raise Bolt::Error.new(msg, 'bolt/missing-project-config-error')
end
end
# Loads a Puppetfile and installs its modules.
#
def install_puppetfile(puppetfile_config, puppetfile, moduledir)
assert_puppetfile_or_module_command(config.project.modules)
outputter.print_message("Installing modules from Puppetfile")
installer = Bolt::ModuleInstaller.new(outputter, pal)
ok = outputter.spin do
installer.install_puppetfile(puppetfile, moduledir, puppetfile_config)
end
ok ? 0 : 1
end
# Raises an error if the 'puppetfile install' command is deprecated due to
# modules being configured.
#
def assert_puppetfile_or_module_command(modules)
if Bolt::Util.powershell?
case options[:action]
when 'generate-types'
old_command = 'Register-BoltPuppetfileTypes'
new_command = 'Register-BoltModuleTypes'
when 'install'
old_command = 'Install-BoltPuppetfile'
new_command = 'Install-BoltModule'
when 'show', 'show-modules'
old_command = 'Get-BoltPuppetfileModules'
new_command = 'Get-BoltModule'
end
else
old_command = "bolt puppetfile #{options[:action]}"
new_command = if options[:action] == 'show-modules'
'bolt module show'
else
"bolt module #{options[:action]}"
end
end
if modules && options[:subcommand] == 'puppetfile'
raise Bolt::CLIError,
"Unable to use command '#{old_command}' when 'modules' is configured in "\
"bolt-project.yaml. Use '#{new_command}' instead."
elsif modules.nil? && options[:subcommand] == 'puppetfile'
msg = "Command '#{old_command}' is deprecated and will be removed in Bolt 3.0. Update your project to use "\
"the module management feature. For more information, see https://pup.pt/bolt-module-migrate."
Bolt::Logger.deprecation_warning('puppetfile command', msg)
elsif modules.nil? && options[:subcommand] == 'module'
msg = "Unable to use command '#{new_command}' when 'modules' is not configured in "\
"bolt-project.yaml. "
msg += "Use '#{old_command}' instead." if options[:action] != 'add'
raise Bolt::CLIError, msg
end
end
def pal
@pal ||= Bolt::PAL.new(Bolt::Config::Modulepath.new(config.modulepath),
config.hiera_config,
config.project.resource_types,
config.compile_concurrency,
config.trusted_external,
config.apply_settings,
config.project)
end
# Collects the list of Bolt guides and maps them to their topics.
def guides
@guides ||= begin
root_path = File.expand_path(File.join(__dir__, '..', '..', 'guides'))
files = Dir.children(root_path).sort
files.each_with_object({}) do |file, guides|
next if file !~ /\.txt\z/
topic = File.basename(file, '.txt')
guides[topic] = File.join(root_path, file)
end
rescue SystemCallError => e
raise Bolt::FileError.new("#{e.message}: unable to load guides directory", root_path)
end
end
# Display the list of available Bolt guides.
def list_topics
outputter.print_topics(guides.keys)
0
end
# Display a specific Bolt guide.
def show_guide(topic)
if guides[topic]
analytics.event('Guide', 'known_topic', label: topic)
begin
guide = File.read(guides[topic])
rescue SystemCallError => e
raise Bolt::FileError("#{e.message}: unable to load guide page", filepath)
end
outputter.print_guide(guide, topic)
else
analytics.event('Guide', 'unknown_topic', label: topic)
outputter.print_message("Did not find guide for topic '#{topic}'.\n\n")
list_topics
end
0
end
def validate_file(type, path, allow_dir = false)
if path.nil?
raise Bolt::CLIError, "A #{type} must be specified"
end
Bolt::Util.validate_file(type, path, allow_dir)
end
def rerun
@rerun ||= Bolt::Rerun.new(config.rerunfile, config.save_rerun)
end
def outputter
@outputter ||= Bolt::Outputter.for_format(config.format,
config.color,
options[:verbose],
config.trace,
config.spinner)
end
def log_outputter
@log_outputter ||= Bolt::Outputter::Logger.new(options[:verbose], config.trace)
end
def analytics
@analytics ||= begin
client = Bolt::Analytics.build_client
client.bundled_content = bundled_content
client
end
end
def bundled_content
# If the bundled content directory is empty, Bolt is likely installed as a gem.
if ENV['BOLT_GEM'].nil? && incomplete_install?
msg = <<~MSG.chomp
Bolt may be installed as a gem. To use Bolt reliably and with all of its
dependencies, uninstall the 'bolt' gem and install Bolt as a package:
https://puppet.com/docs/bolt/latest/bolt_installing.html
If you meant to install Bolt as a gem and want to disable this warning,
set the BOLT_GEM environment variable.
MSG
@logger.warn(msg)
end
# We only need to enumerate bundled content when running a task or plan
content = { 'Plan' => [],
'Task' => [],
'Plugin' => Bolt::Plugin::BUILTIN_PLUGINS }
if %w[plan task].include?(options[:subcommand]) && options[:action] == 'run'
default_content = Bolt::PAL.new(Bolt::Config::Modulepath.new([]), nil, nil)
content['Plan'] = default_content.list_plans.each_with_object([]) do |iter, col|
col << iter&.first
end
content['Task'] = default_content.list_tasks.each_with_object([]) do |iter, col|
col << iter&.first
end
end
content
end
# Gem installs include the aggregate, canary, and puppetdb_fact modules, while
# package installs include modules listed in the Bolt repo Puppetfile
def incomplete_install?
(Dir.children(Bolt::Config::Modulepath::MODULES_PATH) - %w[aggregate canary puppetdb_fact secure_env_vars]).empty?
end
# Mimicks the output from Outputter::Human#fatal_error. This should be used to print
# errors prior to config being loaded, as the outputter relies on config being loaded.
def fatal_error(error)
if $stdout.isatty
$stdout.puts("\033[31m#{error.message}\033[0m")
else
$stdout.puts(error.message)
end
end
end
end
| 1 | 17,454 | Do we want to raise an error here instead of warning? Or would it be better to just see if Bolt happens to succeed, and let it fail on it's own if it fails? I'd lean towards raising an error, but that's different from "removing support". | puppetlabs-bolt | rb |
@@ -44,7 +44,12 @@ module Bolt
# Returns options this transport supports
def self.options
- raise NotImplementedError, "self.options() must be implemented by the transport class"
+ raise NotImplementedError,
+ "self.options() or self.filter_options(unfiltered) must be implemented by the transport class"
+ end
+
+ def self.filter_options(unfiltered)
+ unfiltered.select { |k| options.include?(k) }
end
def self.validate(_options) | 1 | # frozen_string_literal: true
require 'logging'
require 'bolt/result'
module Bolt
module Transport
# This class provides the default behavior for Transports. A Transport is
# responsible for uploading files and running commands, scripts, and tasks
# on Targets.
#
# Bolt executes work on the Transport in "batches". To do that, it calls
# the batches() method, which is responsible for dividing the list of
# Targets into batches according to how it wants to handle them. It will
# then call Transport#batch_task, or the corresponding method for another
# operation, passing a list of Targets. The Transport returns a list of
# Bolt::Result objects, one per Target. Each batch is executed on a
# separate thread, controlled by the `concurrency` setting, so many batches
# may be running in parallel.
#
# The default batch implementation splits the list of Targets into batches
# of 1. It then calls run_task(), or a corresponding method for other
# operations, passing in the single Target.
#
# Most Transport implementations, like the SSH and WinRM transports, don't
# need to do their own batching, since they only operate on a single Target
# at a time. Those Transports can implement the run_task() and related
# methods, which will automatically handle running many Targets in
# parallel, and will handle publishing start and finish events for each
# Target.
#
# Transports that need their own batching, like the Orch transport, can
# instead override the batches() method to split Targets into sets that can
# be executed together, and override the batch_task() and related methods
# to execute a batch of nodes. In that case, those Transports should accept
# a block argument and call it with a :node_start event for each Target
# before executing, and a :node_result event for each Target after
# execution.
class Base
STDIN_METHODS = %w[both stdin].freeze
ENVIRONMENT_METHODS = %w[both environment].freeze
attr_reader :logger
# Returns options this transport supports
def self.options
raise NotImplementedError, "self.options() must be implemented by the transport class"
end
def self.validate(_options)
raise NotImplementedError, "self.validate() must be implemented by the transport class"
end
def initialize
@logger = Logging.logger[self]
end
def with_events(target, callback)
callback&.call(type: :node_start, target: target)
result = begin
yield
rescue StandardError, NotImplementedError => ex
Bolt::Result.from_exception(target, ex)
end
callback&.call(type: :node_result, result: result)
result
end
def provided_features
[]
end
def filter_options(target, options)
if target.options['run-as']
options.reject { |k, _v| k == '_run_as' }
else
options
end
end
# Transform a parameter map to an environment variable map, with parameter names prefixed
# with 'PT_' and values transformed to JSON unless they're strings.
def envify_params(params)
params.each_with_object({}) do |(k, v), h|
v = v.to_json unless v.is_a?(String)
h["PT_#{k}"] = v
end
end
# Raises an error if more than one target was given in the batch.
#
# The default implementations of batch_* strictly assume the transport is
# using the default batch size of 1. This method ensures that is the
# case and raises an error if it's not.
def assert_batch_size_one(method, targets)
if targets.length > 1
message = "#{self.class.name} must implement #{method} to support batches (got #{targets.length} nodes)"
raise NotImplementedError, message
end
end
# Runs the given task on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_task(targets, task, arguments, options = {}, &callback)
assert_batch_size_one("batch_task()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Running task run '#{task}' on #{target.uri}" }
run_task(target, task, arguments, filter_options(target, options))
end
end
# Runs the given command on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_command(targets, command, options = {}, &callback)
assert_batch_size_one("batch_command()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug("Running command '#{command}' on #{target.uri}")
run_command(target, command, filter_options(target, options))
end
end
# Runs the given script on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_script(targets, script, arguments, options = {}, &callback)
assert_batch_size_one("batch_script()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Running script '#{script}' on #{target.uri}" }
run_script(target, script, arguments, filter_options(target, options))
end
end
# Uploads the given source file to the destination location on a batch of nodes.
#
# The default implementation only supports batches of size 1 and will fail otherwise.
#
# Transports may override this method to implement their own batch processing.
def batch_upload(targets, source, destination, options = {}, &callback)
assert_batch_size_one("batch_upload()", targets)
target = targets.first
with_events(target, callback) do
@logger.debug { "Uploading: '#{source}' to #{destination} on #{target.uri}" }
upload(target, source, destination, filter_options(target, options))
end
end
def batch_connected?(targets)
assert_batch_size_one("connected?()", targets)
connected?(targets.first)
end
# Split the given list of targets into a list of batches. The default
# implementation returns single-node batches.
#
# Transports may override this method, and the corresponding batch_*
# methods, to implement their own batch processing.
def batches(targets)
targets.map { |target| [target] }
end
# Transports should override this method with their own implementation of running a command.
def run_command(*_args)
raise NotImplementedError, "run_command() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of running a script.
def run_script(*_args)
raise NotImplementedError, "run_script() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of running a task.
def run_task(*_args)
raise NotImplementedError, "run_task() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of file upload.
def upload(*_args)
raise NotImplementedError, "upload() must be implemented by the transport class"
end
# Transports should override this method with their own implementation of a connection test.
def connected?(_targets)
raise NotImplementedError, "connected?() must be implemented by the transport class"
end
# Unwraps any Sensitive data in an arguments Hash, so the plain-text is passed
# to the Task/Script.
#
# This works on deeply nested data structures composed of Hashes, Arrays, and
# and plain-old data types (int, string, etc).
def unwrap_sensitive_args(arguments)
# Skip this if Puppet isn't loaded
return arguments unless defined?(Puppet::Pops::Types::PSensitiveType::Sensitive)
case arguments
when Array
# iterate over the array, unwrapping all elements
arguments.map { |x| unwrap_sensitive_args(x) }
when Hash
# iterate over the arguments hash and unwrap all keys and values
arguments.each_with_object({}) { |(k, v), h|
h[unwrap_sensitive_args(k)] = unwrap_sensitive_args(v)
}
when Puppet::Pops::Types::PSensitiveType::Sensitive
# this value is Sensitive, unwrap it
unwrap_sensitive_args(arguments.unwrap)
else
# unknown data type, just return it
arguments
end
end
end
end
end
| 1 | 10,095 | This is kind of confusing with the other `filter_options`. | puppetlabs-bolt | rb |
@@ -160,7 +160,12 @@ func (fs *KBFSOpsStandard) DeleteFavorite(ctx context.Context,
return fs.opsByFav[fav]
}()
if ops != nil {
- return ops.deleteFromFavorites(ctx, fs.favs)
+ err := ops.deleteFromFavorites(ctx, fs.favs)
+ if _, ok := err.(OpsCantHandleFavorite); !ok {
+ return err
+ }
+ // If the ops couldn't handle the delete, fall through to
+ // going directly via Favorites.
}
if isLoggedIn { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"fmt"
"sync"
"time"
"github.com/keybase/client/go/logger"
"golang.org/x/net/context"
)
// KBFSOpsStandard implements the KBFSOps interface, and is go-routine
// safe by forwarding requests to individual per-folder-branch
// handlers that are go-routine-safe.
type KBFSOpsStandard struct {
config Config
log logger.Logger
deferLog logger.Logger
ops map[FolderBranch]*folderBranchOps
opsByFav map[Favorite]*folderBranchOps
opsLock sync.RWMutex
// reIdentifyControlChan controls reidentification.
// Sending a value to this channel forces all fbos
// to be marked for revalidation.
// Closing this channel will shutdown the reidentification
// watcher.
reIdentifyControlChan chan struct{}
favs *Favorites
currentStatus kbfsCurrentStatus
}
var _ KBFSOps = (*KBFSOpsStandard)(nil)
// NewKBFSOpsStandard constructs a new KBFSOpsStandard object.
func NewKBFSOpsStandard(config Config) *KBFSOpsStandard {
log := config.MakeLogger("")
kops := &KBFSOpsStandard{
config: config,
log: log,
deferLog: log.CloneWithAddedDepth(1),
ops: make(map[FolderBranch]*folderBranchOps),
opsByFav: make(map[Favorite]*folderBranchOps),
reIdentifyControlChan: make(chan struct{}),
favs: NewFavorites(config),
}
kops.currentStatus.Init()
go kops.markForReIdentifyIfNeededLoop()
return kops
}
func (fs *KBFSOpsStandard) markForReIdentifyIfNeededLoop() {
maxValid := fs.config.TLFValidDuration()
// Tests and some users fail to set this properly.
if maxValid <= 10*time.Second || maxValid > 24*365*time.Hour {
maxValid = tlfValidDurationDefault
}
// Tick ten times the rate of valid duration allowing only overflows of +-10%
ticker := time.NewTicker(maxValid / 10)
for {
var now time.Time
select {
// Normal case: feed the current time from config and mark fbos needing validation.
case <-ticker.C:
now = fs.config.Clock().Now()
// Mark everything for reidentification via now being the empty value or quit.
case _, ok := <-fs.reIdentifyControlChan:
if !ok {
ticker.Stop()
return
}
}
fs.markForReIdentifyIfNeeded(now, maxValid)
}
}
func (fs *KBFSOpsStandard) markForReIdentifyIfNeeded(now time.Time, maxValid time.Duration) {
fs.opsLock.Lock()
defer fs.opsLock.Unlock()
for _, fbo := range fs.ops {
fbo.markForReIdentifyIfNeeded(now, maxValid)
}
}
// Shutdown safely shuts down any background goroutines that may have
// been launched by KBFSOpsStandard.
func (fs *KBFSOpsStandard) Shutdown() error {
close(fs.reIdentifyControlChan)
fs.favs.Shutdown()
var errors []error
for _, ops := range fs.ops {
if err := ops.Shutdown(); err != nil {
errors = append(errors, err)
// Continue on and try to shut down the other FBOs.
}
}
if len(errors) == 1 {
return errors[0]
} else if len(errors) > 1 {
// Aggregate errors
return fmt.Errorf("Multiple errors on shutdown: %v", errors)
}
return nil
}
// PushConnectionStatusChange pushes human readable connection status changes.
func (fs *KBFSOpsStandard) PushConnectionStatusChange(service string, newStatus error) {
fs.currentStatus.PushConnectionStatusChange(service, newStatus)
}
// GetFavorites implements the KBFSOps interface for
// KBFSOpsStandard.
func (fs *KBFSOpsStandard) GetFavorites(ctx context.Context) (
[]Favorite, error) {
return fs.favs.Get(ctx)
}
// RefreshCachedFavorites implements the KBFSOps interface for
// KBFSOpsStandard.
func (fs *KBFSOpsStandard) RefreshCachedFavorites(ctx context.Context) {
fs.favs.RefreshCache(ctx)
}
// AddFavorite implements the KBFSOps interface for KBFSOpsStandard.
func (fs *KBFSOpsStandard) AddFavorite(ctx context.Context,
fav Favorite) error {
kbpki := fs.config.KBPKI()
_, _, err := kbpki.GetCurrentUserInfo(ctx)
isLoggedIn := err == nil
if isLoggedIn {
err := fs.favs.Add(ctx, favToAdd{Favorite: fav, created: false})
if err != nil {
return err
}
}
return nil
}
// DeleteFavorite implements the KBFSOps interface for
// KBFSOpsStandard.
func (fs *KBFSOpsStandard) DeleteFavorite(ctx context.Context,
fav Favorite) error {
kbpki := fs.config.KBPKI()
_, _, err := kbpki.GetCurrentUserInfo(ctx)
isLoggedIn := err == nil
// Let this ops remove itself, if we have one available.
ops := func() *folderBranchOps {
fs.opsLock.Lock()
defer fs.opsLock.Unlock()
return fs.opsByFav[fav]
}()
if ops != nil {
return ops.deleteFromFavorites(ctx, fs.favs)
}
if isLoggedIn {
err := fs.favs.Delete(ctx, fav)
if err != nil {
return err
}
}
// TODO: Shut down the running folderBranchOps, if one exists?
// What about open file handles?
return nil
}
func (fs *KBFSOpsStandard) getOpsNoAdd(fb FolderBranch) *folderBranchOps {
fs.opsLock.RLock()
if ops, ok := fs.ops[fb]; ok {
fs.opsLock.RUnlock()
return ops
}
fs.opsLock.RUnlock()
fs.opsLock.Lock()
defer fs.opsLock.Unlock()
// look it up again in case someone else got the lock
ops, ok := fs.ops[fb]
if !ok {
// TODO: add some interface for specifying the type of the
// branch; for now assume online and read-write.
ops = newFolderBranchOps(fs.config, fb, standard)
fs.ops[fb] = ops
}
return ops
}
func (fs *KBFSOpsStandard) getOps(
ctx context.Context, fb FolderBranch) *folderBranchOps {
ops := fs.getOpsNoAdd(fb)
if err := ops.addToFavorites(ctx, fs.favs, false); err != nil {
// Failure to favorite shouldn't cause a failure. Just log
// and move on.
fs.log.CDebugf(ctx, "Couldn't add favorite: %v", err)
}
return ops
}
func (fs *KBFSOpsStandard) getOpsByNode(ctx context.Context,
node Node) *folderBranchOps {
return fs.getOps(ctx, node.GetFolderBranch())
}
func (fs *KBFSOpsStandard) getOpsByHandle(ctx context.Context,
handle *TlfHandle, fb FolderBranch) *folderBranchOps {
ops := fs.getOps(ctx, fb)
fs.opsLock.Lock()
defer fs.opsLock.Unlock()
// Track under its name, so we can later tell it to remove itself
// from the favorites list. TODO: fix this when unresolved
// assertions are allowed and become resolved.
fs.opsByFav[handle.ToFavorite()] = ops
return ops
}
// GetOrCreateRootNode implements the KBFSOps interface for
// KBFSOpsStandard
func (fs *KBFSOpsStandard) GetOrCreateRootNode(
ctx context.Context, h *TlfHandle, branch BranchName) (
node Node, ei EntryInfo, err error) {
fs.log.CDebugf(ctx, "GetOrCreateRootNode(%s, %v)",
h.GetCanonicalPath(), branch)
defer func() { fs.deferLog.CDebugf(ctx, "Done: %#v", err) }()
// Do GetForHandle() unlocked -- no cache lookups, should be fine
mdops := fs.config.MDOps()
// TODO: only do this the first time, cache the folder ID after that
md, err := mdops.GetUnmergedForHandle(ctx, h)
if err != nil {
return nil, EntryInfo{}, err
}
if md == nil {
md, err = mdops.GetForHandle(ctx, h)
if err != nil {
return nil, EntryInfo{}, err
}
}
fb := FolderBranch{Tlf: md.ID, Branch: branch}
// we might not be able to read the metadata if we aren't in the
// key group yet.
if err := md.isReadableOrError(ctx, fs.config); err != nil {
fs.opsLock.Lock()
defer fs.opsLock.Unlock()
// If we already have an FBO for this ID, trigger a rekey
// prompt in the background, if possible.
if ops, ok := fs.ops[fb]; ok {
fs.log.CDebugf(ctx, "Triggering a paper prompt rekey on folder "+
"access due to unreadable MD for %s", h.GetCanonicalPath())
go ops.rekeyWithPrompt()
}
return nil, EntryInfo{}, err
}
ops := fs.getOpsByHandle(ctx, h, fb)
var created bool
if branch == MasterBranch {
// For now, only the master branch can be initialized with a
// branch new MD object.
created, err = ops.CheckForNewMDAndInit(ctx, md)
if err != nil {
return nil, EntryInfo{}, err
}
}
node, ei, _, err = ops.getRootNode(ctx)
if err != nil {
return nil, EntryInfo{}, err
}
if err := ops.addToFavorites(ctx, fs.favs, created); err != nil {
// Failure to favorite shouldn't cause a failure. Just log
// and move on.
fs.log.CDebugf(ctx, "Couldn't add favorite: %v", err)
}
return node, ei, nil
}
// GetDirChildren implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) GetDirChildren(ctx context.Context, dir Node) (
map[string]EntryInfo, error) {
ops := fs.getOpsByNode(ctx, dir)
return ops.GetDirChildren(ctx, dir)
}
// Lookup implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Lookup(ctx context.Context, dir Node, name string) (
Node, EntryInfo, error) {
ops := fs.getOpsByNode(ctx, dir)
return ops.Lookup(ctx, dir, name)
}
// Stat implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Stat(ctx context.Context, node Node) (
EntryInfo, error) {
ops := fs.getOpsByNode(ctx, node)
return ops.Stat(ctx, node)
}
// CreateDir implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) CreateDir(
ctx context.Context, dir Node, name string) (Node, EntryInfo, error) {
ops := fs.getOpsByNode(ctx, dir)
return ops.CreateDir(ctx, dir, name)
}
// CreateFile implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) CreateFile(
ctx context.Context, dir Node, name string, isExec bool) (
Node, EntryInfo, error) {
ops := fs.getOpsByNode(ctx, dir)
return ops.CreateFile(ctx, dir, name, isExec)
}
// CreateLink implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) CreateLink(
ctx context.Context, dir Node, fromName string, toPath string) (
EntryInfo, error) {
ops := fs.getOpsByNode(ctx, dir)
return ops.CreateLink(ctx, dir, fromName, toPath)
}
// RemoveDir implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) RemoveDir(
ctx context.Context, dir Node, name string) error {
ops := fs.getOpsByNode(ctx, dir)
return ops.RemoveDir(ctx, dir, name)
}
// RemoveEntry implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) RemoveEntry(
ctx context.Context, dir Node, name string) error {
ops := fs.getOpsByNode(ctx, dir)
return ops.RemoveEntry(ctx, dir, name)
}
// Rename implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Rename(
ctx context.Context, oldParent Node, oldName string, newParent Node,
newName string) error {
oldFB := oldParent.GetFolderBranch()
newFB := newParent.GetFolderBranch()
// only works for nodes within the same topdir
if oldFB != newFB {
return RenameAcrossDirsError{}
}
ops := fs.getOpsByNode(ctx, oldParent)
return ops.Rename(ctx, oldParent, oldName, newParent, newName)
}
// Read implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Read(
ctx context.Context, file Node, dest []byte, off int64) (
numRead int64, err error) {
ops := fs.getOpsByNode(ctx, file)
return ops.Read(ctx, file, dest, off)
}
// Write implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Write(
ctx context.Context, file Node, data []byte, off int64) error {
ops := fs.getOpsByNode(ctx, file)
return ops.Write(ctx, file, data, off)
}
// Truncate implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Truncate(
ctx context.Context, file Node, size uint64) error {
ops := fs.getOpsByNode(ctx, file)
return ops.Truncate(ctx, file, size)
}
// SetEx implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) SetEx(
ctx context.Context, file Node, ex bool) error {
ops := fs.getOpsByNode(ctx, file)
return ops.SetEx(ctx, file, ex)
}
// SetMtime implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) SetMtime(
ctx context.Context, file Node, mtime *time.Time) error {
ops := fs.getOpsByNode(ctx, file)
return ops.SetMtime(ctx, file, mtime)
}
// Sync implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Sync(ctx context.Context, file Node) error {
ops := fs.getOpsByNode(ctx, file)
return ops.Sync(ctx, file)
}
// FolderStatus implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) FolderStatus(
ctx context.Context, folderBranch FolderBranch) (
FolderBranchStatus, <-chan StatusUpdate, error) {
ops := fs.getOps(ctx, folderBranch)
return ops.FolderStatus(ctx, folderBranch)
}
// Status implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Status(ctx context.Context) (
KBFSStatus, <-chan StatusUpdate, error) {
username, _, err := fs.config.KBPKI().GetCurrentUserInfo(ctx)
var usageBytes int64 = -1
var limitBytes int64 = -1
// Don't request the quota info until we're sure we've
// authenticated with our password. TODO: fix this in the
// service/GUI by handling multiple simultaneous passphrase
// requests at once.
if err == nil && fs.config.MDServer().IsConnected() {
quotaInfo, err := fs.config.BlockServer().GetUserQuotaInfo(ctx)
if err == nil {
limitBytes = quotaInfo.Limit
if quotaInfo.Total != nil {
usageBytes = quotaInfo.Total.Bytes[UsageWrite]
} else {
usageBytes = 0
}
}
}
failures, ch := fs.currentStatus.CurrentStatus()
return KBFSStatus{
CurrentUser: username.String(),
IsConnected: fs.config.MDServer().IsConnected(),
UsageBytes: usageBytes,
LimitBytes: limitBytes,
FailingServices: failures,
}, ch, err
}
// UnstageForTesting implements the KBFSOps interface for KBFSOpsStandard
// TODO: remove once we have automatic conflict resolution
func (fs *KBFSOpsStandard) UnstageForTesting(
ctx context.Context, folderBranch FolderBranch) error {
ops := fs.getOps(ctx, folderBranch)
return ops.UnstageForTesting(ctx, folderBranch)
}
// Rekey implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) Rekey(ctx context.Context, id TlfID) error {
// We currently only support rekeys of master branches.
ops := fs.getOpsNoAdd(FolderBranch{Tlf: id, Branch: MasterBranch})
return ops.Rekey(ctx, id)
}
// SyncFromServerForTesting implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) SyncFromServerForTesting(
ctx context.Context, folderBranch FolderBranch) error {
ops := fs.getOps(ctx, folderBranch)
return ops.SyncFromServerForTesting(ctx, folderBranch)
}
// GetUpdateHistory implements the KBFSOps interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) GetUpdateHistory(ctx context.Context,
folderBranch FolderBranch) (history TLFUpdateHistory, err error) {
ops := fs.getOps(ctx, folderBranch)
return ops.GetUpdateHistory(ctx, folderBranch)
}
// Notifier:
var _ Notifier = (*KBFSOpsStandard)(nil)
// RegisterForChanges implements the Notifer interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) RegisterForChanges(
folderBranches []FolderBranch, obs Observer) error {
for _, fb := range folderBranches {
// TODO: add branch parameter to notifier interface
ops := fs.getOpsNoAdd(fb)
return ops.RegisterForChanges(obs)
}
return nil
}
// UnregisterFromChanges implements the Notifer interface for KBFSOpsStandard
func (fs *KBFSOpsStandard) UnregisterFromChanges(
folderBranches []FolderBranch, obs Observer) error {
for _, fb := range folderBranches {
// TODO: add branch parameter to notifier interface
ops := fs.getOpsNoAdd(fb)
return ops.UnregisterFromChanges(obs)
}
return nil
}
| 1 | 11,883 | Could the `deleteFromFavorites` happen when `head == nil` because it's not initialized yet somehow but not because TLF doesn't exist? I was concerned if this would make it possible in any way to have favorites seemingly deleted on a device while it's not actually happened on server. | keybase-kbfs | go |
@@ -457,7 +457,7 @@ void CmpSeabaseDDL::dropSeabaseSchema(StmtDDLDropSchema * dropSchemaNode)
bool isVolatile = (memcmp(schName.data(),"VOLATILE_SCHEMA",strlen("VOLATILE_SCHEMA")) == 0);
int32_t length = 0;
- int32_t rowCount = 0;
+ Int64 rowCount = 0;
bool someObjectsCouldNotBeDropped = false;
char errorObjs[1010];
Queue * objectsQueue = NULL; | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: CmpSeabaseDDLschema.cpp
* Description: Implements ddl operations for Seabase schemas.
*
*
* Created: 10/30/2014
* Language: C++
*
*
*****************************************************************************
*/
#include "CmpSeabaseDDLincludes.h"
#include "StmtDDLCreateSchema.h"
#include "StmtDDLDropSchema.h"
#include "StmtDDLAlterSchema.h"
#include "StmtDDLGive.h"
#include "ElemDDLColDefault.h"
#include "NumericType.h"
#include "ComUser.h"
#include "keycolumns.h"
#include "ElemDDLColRef.h"
#include "ElemDDLColName.h"
#include "CmpDDLCatErrorCodes.h"
#include "Globals.h"
#include "CmpMain.h"
#include "Context.h"
#include "PrivMgrCommands.h"
#include "PrivMgrObjects.h"
#include <vector>
static bool dropOneTable(
ExeCliInterface & cliInterface,
const char * catalogName,
const char * schemaName,
const char * objectName,
bool isVolatile,
bool ifExists,
bool ddlXns);
static bool transferObjectPrivs(
const char * systemCatalogName,
const char * catalogName,
const char * schemaName,
const int32_t newOwnerID,
const char * newOwnerName);
// *****************************************************************************
// * *
// * Function: CmpSeabaseDDL::addSchemaObject *
// * *
// * Inserts a schema object row into the OBJECTS table. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <cliInterface> ExeCliInterface & In *
// * is a reference to an Executor CLI interface handle. *
// * *
// * <schemaName> const ComSchemaName & In *
// * is a reference to a ComSchemaName instance. The catalog name must be *
// * set. *
// * *
// * <schemaClass> ComSchemaClass In *
// * is the class (private or shared) of the schema to be added. *
// * *
// * <ownerID> Int32 In *
// * is the authorization ID that will own the schema. *
// * *
// * <ignoreIfExists> NABoolean In *
// * do not return an error is schema already exists *
// *****************************************************************************
// * *
// * Returns: status
// * *
// * 0: Schema was added *
// * -1: Schema was not added. A CLI error is put into the diags area. *
// * 1: Schema already exists and ignoreIfExists is specified. *
// * No error is added to the diags area. *
// * *
// *****************************************************************************
int CmpSeabaseDDL::addSchemaObject(
ExeCliInterface & cliInterface,
const ComSchemaName & schemaName,
ComSchemaClass schemaClass,
Int32 ownerID,
NABoolean ignoreIfExists)
{
NAString catalogName = schemaName.getCatalogNamePartAsAnsiString();
ComAnsiNamePart schemaNameAsComAnsi = schemaName.getSchemaNamePart();
NAString schemaNamePart = schemaNameAsComAnsi.getInternalName();
ComObjectName objName(catalogName,schemaNamePart,NAString(SEABASE_SCHEMA_OBJECTNAME),
COM_TABLE_NAME,TRUE);
if (isSeabaseReservedSchema(objName) &&
!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
*CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_METADATA_SCHEMA_NAME)
<< DgSchemaName(schemaName.getExternalName().data());
return -1;
}
NAString objectNamePart = objName.getObjectNamePartAsAnsiString(TRUE);
Lng32 retcode = existsInSeabaseMDTable(&cliInterface,catalogName,schemaNamePart,
objectNamePart, COM_UNKNOWN_OBJECT, FALSE);
if (retcode < 0)
return -1;
if (retcode == 1 ) // already exists
{
if (ignoreIfExists)
return 1;
else
*CmpCommon::diags() << DgSqlCode(-CAT_SCHEMA_ALREADY_EXISTS)
<< DgSchemaName(schemaName.getExternalName().data());
return -1;
}
char buf[4000];
ComUID schemaUID;
schemaUID.make_UID();
Int64 schemaObjectUID = schemaUID.get_value();
Int64 createTime = NA_JulianTimestamp();
NAString quotedSchName;
NAString quotedObjName;
ToQuotedString(quotedSchName,schemaNamePart,FALSE);
ToQuotedString(quotedObjName,NAString(SEABASE_SCHEMA_OBJECTNAME),FALSE);
char schemaObjectLit[3] = {0};
switch (schemaClass)
{
case COM_SCHEMA_CLASS_PRIVATE:
{
strncpy(schemaObjectLit,COM_PRIVATE_SCHEMA_OBJECT_LIT,2);
break;
}
case COM_SCHEMA_CLASS_SHARED:
{
strncpy(schemaObjectLit,COM_SHARED_SCHEMA_OBJECT_LIT,2);
break;
}
case COM_SCHEMA_CLASS_DEFAULT:
default:
{
// Schemas are private by default, but could choose a different
// default class here based on CQD or other attribute.
strncpy(schemaObjectLit,COM_PRIVATE_SCHEMA_OBJECT_LIT,2);
break;
}
}
str_sprintf(buf, "insert into %s.\"%s\".%s values ('%s', '%s', '%s', '%s', %Ld, %Ld, %Ld, '%s', '%s', %d, %d, 0)",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
catalogName.data(), quotedSchName.data(), quotedObjName.data(),
schemaObjectLit,
schemaObjectUID,
createTime,
createTime,
COM_YES_LIT, // valid_def
COM_NO_LIT, // droppable
ownerID,ownerID);
Int32 cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
return 0;
}
//******************* End of CmpSeabaseDDL::addSchemaObject ********************
// *****************************************************************************
// * *
// * Function: CmpSeabaseDDL::createSeabaseSchema *
// * *
// * Implements the CREATE SCHEMA command. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <createSchemaNode> StmtDDLCreateSchema * In *
// * is a pointer to a create schema parser node. *
// * *
// * <currentCatalogName> NAString & In *
// * is the name of the current catalog. *
// * *
// *****************************************************************************
void CmpSeabaseDDL::createSeabaseSchema(
StmtDDLCreateSchema * createSchemaNode,
NAString & currentCatalogName)
{
ComSchemaName schemaName (createSchemaNode->getSchemaName());
if (schemaName.getCatalogNamePart().isEmpty())
schemaName.setCatalogNamePart(currentCatalogName);
NAString catName = schemaName.getCatalogNamePartAsAnsiString();
ComAnsiNamePart schNameAsComAnsi = schemaName.getSchemaNamePart();
NAString schName = schNameAsComAnsi.getInternalName();
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ComSchemaClass schemaClass;
Int32 objectOwner = NA_UserIdDefault;
Int32 schemaOwner = NA_UserIdDefault;
// If creating the hive statistics schema, make owners
// the HIVE_ROLE_ID and skip authorization check.
// Schema is being created as part of an update statistics cmd
if (schName == HIVE_STATS_SCHEMA_NO_QUOTES &&
Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
objectOwner = HIVE_ROLE_ID;
schemaOwner = HIVE_ROLE_ID;
}
else
{
int32_t retCode = verifyDDLCreateOperationAuthorized(&cliInterface,
SQLOperation::CREATE_SCHEMA,
catName,
schName,
schemaClass,
objectOwner,
schemaOwner);
if (retCode != 0)
{
handleDDLCreateAuthorizationError(retCode,catName,schName);
return;
}
}
Int32 schemaOwnerID = NA_UserIdDefault;
// If the AUTHORIZATION clause was not specified, the current user becomes
// the schema owner.
if (createSchemaNode->getAuthorizationID().isNull())
schemaOwnerID = ComUser::getCurrentUser();
else
if (ComUser::getAuthIDFromAuthName(createSchemaNode->getAuthorizationID().data(),
schemaOwnerID) != 0)
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR)
<< DgString0(createSchemaNode->getAuthorizationID().data());
return;
}
if (addSchemaObject(cliInterface,
schemaName,
createSchemaNode->getSchemaClass(),
schemaOwnerID,
createSchemaNode->createIfNotExists()))
return;
// Create histogram tables for schema, if the schema is not volatile and
// not reserved
NAString tableNotCreated;
if (!createSchemaNode->isVolatile() && !ComIsTrafodionReservedSchemaName(schName))
{
if (createHistogramTables(&cliInterface, schemaName.getExternalName(),
FALSE, tableNotCreated))
{
*CmpCommon::diags() << DgSqlCode(-CAT_HISTOGRAM_TABLE_NOT_CREATED)
<< DgTableName(tableNotCreated.data());
return;
}
}
}
//***************** End of CmpSeabaseDDL::createSeabaseSchema ******************
// *****************************************************************************
// * *
// * Function: CmpSeabaseDDL::describeSchema *
// * *
// * Provides text for SHOWDDL SCHEMA comnmand. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <catalogName> const NAString & In *
// * is a reference to a catalog name. *
// * *
// * <schemaName> const NAString & In *
// * is a reference to a schema name. *
// * *
// * <output> NAString & Out *
// * passes back text for the SHOWDDL SCHEMA command, specifically the *
// * command to create the specified schema. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Text returned for specified schema. *
// * false: Could not retrieve information for specified schema. *
// * *
// *****************************************************************************
bool CmpSeabaseDDL::describeSchema(
const NAString & catalogName,
const NAString & schemaName,
NAString & output)
{
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
ComSchemaClass schemaClass;
Int32 objectOwner;
Int32 schemaOwner;
ComObjectType objectType;
CmpSeabaseDDL cmpSBD(STMTHEAP);
if (cmpSBD.switchCompiler())
{
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_RETRIEVE_PRIVS);
return false;
}
Int64 schemaUID = getObjectTypeandOwner(&cliInterface,
catalogName.data(),
schemaName.data(),
SEABASE_SCHEMA_OBJECTNAME,
objectType,
objectOwner);
if (schemaUID < 0)
{
*CmpCommon::diags() << DgSqlCode(-CAT_SCHEMA_DOES_NOT_EXIST_ERROR)
<< DgSchemaName(catalogName + "." + schemaName);
cmpSBD.switchBackCompiler();
return false;
}
char username[MAX_USERNAME_LEN+1];
Int32 lActualLen = 0;
Int16 status = ComUser::getAuthNameFromAuthID(objectOwner,username,
MAX_USERNAME_LEN,lActualLen);
if (status != FEOK)
{
*CmpCommon::diags() << DgSqlCode(-20235) // Error converting user ID.
<< DgInt0(status)
<< DgInt1(objectOwner);
cmpSBD.switchBackCompiler();
return false;
}
// Generate output text
output = "CREATE ";
switch (objectType)
{
case COM_PRIVATE_SCHEMA_OBJECT:
output += "PRIVATE";
break;
case COM_SHARED_SCHEMA_OBJECT:
output += "SHARED";
break;
default:
return false;
}
output += " SCHEMA \"";
output += catalogName.data();
output += "\".\"";
output += schemaName.data();
// AUTHORIZATION clause is rarely used, but include it for replay.
output += "\" AUTHORIZATION \"";
output += username;
output += "\";";
cmpSBD.switchBackCompiler();
return true;
}
//******************* End of CmpSeabaseDDL::describeSchema *********************
// *****************************************************************************
// * *
// * Function: CmpSeabaseDDL::dropSeabaseSchema *
// * *
// * Implements the DROP SCHEMA command. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <dropSchemaNode> StmtDDLDropSchema * In *
// * is a pointer to a create schema parser node. *
// * *
// *****************************************************************************
void CmpSeabaseDDL::dropSeabaseSchema(StmtDDLDropSchema * dropSchemaNode)
{
Lng32 cliRC = 0;
ComSchemaName schemaName(dropSchemaNode->getSchemaName());
NAString catName = schemaName.getCatalogNamePartAsAnsiString();
ComAnsiNamePart schNameAsComAnsi = schemaName.getSchemaNamePart();
NAString schName = schNameAsComAnsi.getInternalName();
ComObjectName objName(catName,schName,NAString("dummy"),COM_TABLE_NAME,TRUE);
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
ComObjectType objectType;
bool isVolatile = (memcmp(schName.data(),"VOLATILE_SCHEMA",strlen("VOLATILE_SCHEMA")) == 0);
int32_t length = 0;
int32_t rowCount = 0;
bool someObjectsCouldNotBeDropped = false;
char errorObjs[1010];
Queue * objectsQueue = NULL;
Queue * otherObjectsQueue = NULL;
NABoolean dirtiedMetadata = FALSE;
errorObjs[0] = 0;
Int64 schemaUID = getObjectTypeandOwner(&cliInterface,catName.data(),schName.data(),
SEABASE_SCHEMA_OBJECTNAME,objectType,schemaOwnerID);
// if schemaUID == -1, then either the schema does not exist or an unexpected error occurred
if (schemaUID == -1)
{
// If an error occurred, return
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) > 0)
goto label_error;
// schema does not exist and IF EXISTS specified, then ignore and continue
if (dropSchemaNode->dropIfExists())
goto label_error;
// A Trafodion schema does not exist if the schema object row is not
// present: CATALOG-NAME.SCHEMA-NAME.__SCHEMA__.
*CmpCommon::diags() << DgSqlCode(-CAT_SCHEMA_DOES_NOT_EXIST_ERROR)
<< DgSchemaName(schemaName.getExternalName().data());
goto label_error;
}
if (!isDDLOperationAuthorized(SQLOperation::DROP_SCHEMA,
schemaOwnerID,schemaOwnerID))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
goto label_error;
}
if ((isSeabaseReservedSchema(objName) ||
(schName == SEABASE_SYSTEM_SCHEMA)) &&
!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
*CmpCommon::diags() << DgSqlCode(-CAT_USER_CANNOT_DROP_SMD_SCHEMA)
<< DgSchemaName(schemaName.getExternalName().data());
goto label_error;
}
// Can't drop a schema whose name begins with VOLATILE_SCHEMA unless the
// keyword VOLATILE was specified in the DROP SCHEMA command.
if (isVolatile && !dropSchemaNode->isVolatile())
{
*CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_METADATA_SCHEMA_NAME)
<< DgTableName(schName);
goto label_error;
}
// Get a list of all objects in the schema, excluding the schema object itself.
char query[4000];
// select objects in the schema to drop, don't return PRIMARY_KEY_CONSTRAINTS,
// they always get removed when the parent table is dropped.
// Filter out the LOB depenedent tables too - they will get dropped when
//the main LOB table is dropped.
str_sprintf(query,"SELECT distinct TRIM(object_name), TRIM(object_type) "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND schema_name = '%s' AND "
"object_name <> '"SEABASE_SCHEMA_OBJECTNAME"' AND "
"object_type <> 'PK' "
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data());
cliRC = cliInterface.fetchAllRows(objectsQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
// Check to see if non histogram objects exist in schema, if so, then
// cascade is required
if (dropSchemaNode->getDropBehavior() == COM_RESTRICT_DROP_BEHAVIOR)
{
objectsQueue->position();
for (size_t i = 0; i < objectsQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString objName = vi->get(0);
if (!isHistogramTable(objName))
{
OutputInfo * oi = (OutputInfo*)objectsQueue->getCurr();
*CmpCommon::diags() << DgSqlCode(-CAT_SCHEMA_IS_NOT_EMPTY)
<< DgTableName(objName.data());
goto label_error;
}
}
}
// Drop procedures (SPJs), UDFs (functions), and views
objectsQueue->position();
for (int idx = 0; idx < objectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
char * objName = vi->get(0);
NAString objectTypeLit = vi->get(1);
ComObjectType objectType = PrivMgr::ObjectLitToEnum(objectTypeLit.data());
char buf[1000];
NAString objectTypeString;
NAString cascade = " ";
switch (objectType)
{
// These object types are handled later and can be ignored for now.
case COM_BASE_TABLE_OBJECT:
case COM_INDEX_OBJECT:
case COM_CHECK_CONSTRAINT_OBJECT:
case COM_NOT_NULL_CONSTRAINT_OBJECT:
case COM_REFERENTIAL_CONSTRAINT_OBJECT:
case COM_SEQUENCE_GENERATOR_OBJECT:
case COM_UNIQUE_CONSTRAINT_OBJECT:
case COM_LIBRARY_OBJECT:
{
continue;
}
// If the library where procedures and functions reside is dropped
// before its procedures and routines, then these objects may
// not exist anymore, use the IF EXISTS to prevent the drop from
// incurring errors.
case COM_STORED_PROCEDURE_OBJECT:
{
objectTypeString = "PROCEDURE IF EXISTS ";
break;
}
case COM_USER_DEFINED_ROUTINE_OBJECT:
{
objectTypeString = "FUNCTION IF EXISTS ";
cascade = "CASCADE";
break;
}
case COM_VIEW_OBJECT:
{
objectTypeString = "VIEW";
cascade = "CASCADE";
break;
}
// These object types should not be seen.
case COM_MV_OBJECT:
case COM_MVRG_OBJECT:
case COM_TRIGGER_OBJECT:
case COM_LOB_TABLE_OBJECT:
case COM_TRIGGER_TABLE_OBJECT:
case COM_SYNONYM_OBJECT:
case COM_PRIVATE_SCHEMA_OBJECT:
case COM_SHARED_SCHEMA_OBJECT:
case COM_EXCEPTION_TABLE_OBJECT:
case COM_LOCK_OBJECT:
case COM_MODULE_OBJECT:
default:
SEABASEDDL_INTERNAL_ERROR("Unrecognized object type in schema");
goto label_error;
}
dirtiedMetadata = TRUE;
str_sprintf(buf, "drop %s \"%s\".\"%s\".\"%s\" %s",
objectTypeString.data(),(char*)catName.data(),(char*)schName.data(),
objName,cascade.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
appendErrorObjName(errorObjs, objName);
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
// Drop libraries in the schema
objectsQueue->position();
for (int idx = 0; idx < objectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
char * objName = vi->get(0);
NAString objType = vi->get(1);
if (objType == COM_LIBRARY_OBJECT_LIT)
{
char buf [1000];
dirtiedMetadata = TRUE;
str_sprintf(buf, "DROP LIBRARY \"%s\".\"%s\".\"%s\" CASCADE",
(char*)catName.data(), (char*)schName.data(), objName);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
appendErrorObjName(errorObjs, objName);
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
}
// Drop all tables in the schema. This will also drop any associated constraints.
objectsQueue->position();
for (int idx = 0; idx < objectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString objName = vi->get(0);
NAString objType = vi->get(1);
// drop user objects first
if (objType == COM_BASE_TABLE_OBJECT_LIT)
{
// Histogram tables are dropped later. Sample tables
// are dropped when their corresponding tables are dropped
// so we don't need to drop them directly. Also,
// avoid any tables that match LOB dependent tablenames
// (there is no special type for these tables).
if (!isHistogramTable(objName) &&
!isSampleTable(objName) &&
!isLOBDependentNameMatch(objName))
{
dirtiedMetadata = TRUE;
if (dropOneTable(cliInterface,(char*)catName.data(),
(char*)schName.data(),(char*)objName.data(),
isVolatile, FALSE,dropSchemaNode->ddlXns()))
{
appendErrorObjName(errorObjs, objName.data());
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
}
}
// If there are any user tables having the LOB dependent name pattern, they
// will still be around. Drop those. The real LOB dependent tables, would
//have been dropped in the previous step
objectsQueue->position();
for (int idx = 0; idx < objectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString objName = vi->get(0);
NAString objType = vi->get(1);
if (objType == COM_BASE_TABLE_OBJECT_LIT)
{
if (!isHistogramTable(objName) && isLOBDependentNameMatch(objName))
{
dirtiedMetadata = TRUE;
// Pass in TRUE for "ifExists" since the lobDependent tables
// would have already been dropped and we don't want those to
// raise errors. We just want to catch any user tables that
// happen to have the same name patterns.
if (dropOneTable(cliInterface,(char*)catName.data(),
(char*)schName.data(),(char*)objName.data(),
isVolatile,TRUE, dropSchemaNode->ddlXns()))
{
appendErrorObjName(errorObjs, objName.data());
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
}
}
// Drop any remaining indexes.
str_sprintf(query,"SELECT TRIM(object_name), TRIM(object_type) "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND "
" schema_name = '%s' AND "
" object_type = '%s' "
"FOR READ COMMITTED ACCESS ",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data(),
COM_INDEX_OBJECT_LIT);
cliRC = cliInterface.fetchAllRows(otherObjectsQueue,query,0,FALSE,FALSE,TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
otherObjectsQueue->position();
for (int idx = 0; idx < otherObjectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)otherObjectsQueue->getNext();
char * objName = vi->get(0);
NAString objType = vi->get(1);
if (objType == COM_INDEX_OBJECT_LIT)
{
char buf [1000];
dirtiedMetadata = TRUE;
str_sprintf(buf, "DROP INDEX \"%s\".\"%s\".\"%s\" CASCADE",
(char*)catName.data(), (char*)schName.data(), objName);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
appendErrorObjName(errorObjs, objName);
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
}
// Drop any remaining sequences.
str_sprintf(query,"SELECT TRIM(object_name), TRIM(object_type) "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND "
" schema_name = '%s' AND "
" object_type = '%s' "
"FOR READ COMMITTED ACCESS ",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data(),
COM_SEQUENCE_GENERATOR_OBJECT_LIT);
cliRC = cliInterface.fetchAllRows(otherObjectsQueue,query,0,FALSE,FALSE,TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
otherObjectsQueue->position();
for (int idx = 0; idx < otherObjectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)otherObjectsQueue->getNext();
char * objName = vi->get(0);
NAString objType = vi->get(1);
if (objType == COM_SEQUENCE_GENERATOR_OBJECT_LIT)
{
char buf [1000];
dirtiedMetadata = TRUE;
str_sprintf(buf, "DROP SEQUENCE \"%s\".\"%s\".\"%s\"",
(char*)catName.data(), (char*)schName.data(), objName);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
appendErrorObjName(errorObjs, objName);
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
}
// Drop histogram tables last
objectsQueue->position();
for (size_t i = 0; i < objectsQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString objName = vi->get(0);
if (isHistogramTable(objName))
{
dirtiedMetadata = TRUE;
if (dropOneTable(cliInterface,(char*)catName.data(),
(char*)schName.data(),(char*)objName.data(),
isVolatile, FALSE, dropSchemaNode->ddlXns()))
{
appendErrorObjName(errorObjs, objName.data());
if (dropSchemaNode->ddlXns())
goto label_error;
else
someObjectsCouldNotBeDropped = true;
}
}
}
if (someObjectsCouldNotBeDropped)
{
NAString reason;
reason = "Reason: Some objects could not be dropped in schema "
+ schName + ". ObjectsInSchema: "
+ errorObjs;
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_DROP_SCHEMA)
<< DgSchemaName(catName + "." + schName)
<< DgString0(reason);
goto label_error;
}
// For volatile schemas, sometimes only the objects get dropped.
// If the dropObjectsOnly flag is set, just exit now, we are done.
if (dropSchemaNode->dropObjectsOnly())
return;
// Verify all objects in the schema have been dropped.
str_sprintf(query,"SELECT COUNT(*) "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND schema_name = '%s' AND "
"object_name <> '"SEABASE_SCHEMA_OBJECTNAME"'"
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data());
cliRC = cliInterface.executeImmediate(query,(char*)&rowCount,&length,NULL);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
if (rowCount > 0)
{
CmpCommon::diags()->clear();
str_sprintf(query,"SELECT TRIM(object_name) "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND schema_name = '%s' AND "
"object_name <> '"SEABASE_SCHEMA_OBJECTNAME"' AND "
"object_type <> 'PK' "
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data());
cliRC = cliInterface.fetchAllRows(objectsQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
for (int i = 0; i < objectsQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString objName = vi->get(0);
appendErrorObjName(errorObjs, objName.data());
}
NAString reason;
reason = "Reason: schema "
+ schName + " is not empty. ObjectsInSchema: "
+ errorObjs;
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_DROP_SCHEMA)
<< DgSchemaName(catName + "." + schName)
<< DgString0(reason);
goto label_error;
}
// After all objects in the schema have been dropped, drop the schema object itself.
char buf [1000];
dirtiedMetadata = TRUE;
str_sprintf(buf,"DELETE FROM %s.\"%s\".%s "
"WHERE CATALOG_NAME = '%s' AND SCHEMA_NAME = '%s' AND "
"OBJECT_NAME = '"SEABASE_SCHEMA_OBJECTNAME"'",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
NAString reason;
reason = "Reason: Delete of object " +
NAString(SEABASE_SCHEMA_OBJECTNAME) + " failed.";
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_DROP_SCHEMA)
<< DgSchemaName(catName + "." + schName)
<< DgString0(reason);
goto label_error;
}
// Everything succeeded, return
return;
label_error:
// If metadata has not been changed, just return
if (!dirtiedMetadata)
{
return;
}
// Add an error asking for user to cleanup schema
*CmpCommon::diags() << DgSqlCode(-CAT_ATTEMPT_CLEANUP_SCHEMA)
<< DgSchemaName(catName + "." + schName);
return;
}
//****************** End of CmpSeabaseDDL::dropSeabaseSchema *******************
// *****************************************************************************
// * *
// * Function: CmpSeabaseDDL::alterSeabaseSchema *
// * *
// * Implements the ALTER SCHEMA command. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <alterSchemaNode> StmtDDLAlterSchema * In *
// * is a pointer to a create schema parser node. *
// * *
// *****************************************************************************
void CmpSeabaseDDL::alterSeabaseSchema(StmtDDLAlterSchema * alterSchemaNode)
{
Lng32 cliRC = 0;
ComSchemaName schemaName(alterSchemaNode->getSchemaName());
NAString catName = schemaName.getCatalogNamePartAsAnsiString();
ComAnsiNamePart schNameAsComAnsi = schemaName.getSchemaNamePart();
NAString schName = schNameAsComAnsi.getInternalName();
ComObjectName objName(catName,schName,NAString("dummy"),COM_TABLE_NAME,TRUE);
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
ComObjectType objectType;
bool isVolatile = (memcmp(schName.data(),"VOLATILE_SCHEMA",strlen("VOLATILE_SCHEMA")) == 0);
int32_t length = 0;
int32_t rowCount = 0;
bool someObjectsCouldNotBeAltered = false;
char errorObjs[1010];
Queue * objectsQueue = NULL;
Queue * otherObjectsQueue = NULL;
NABoolean dirtiedMetadata = FALSE;
Int32 checkErr = 0;
StmtDDLAlterTableStoredDesc::AlterStoredDescType sdo =
alterSchemaNode->getStoredDescOperation();
errorObjs[0] = 0;
Int64 schemaUID = getObjectTypeandOwner(&cliInterface,catName.data(),schName.data(),
SEABASE_SCHEMA_OBJECTNAME,objectType,schemaOwnerID);
// if schemaUID == -1, then either the schema does not exist or an unexpected error occurred
if (schemaUID == -1)
{
// If an error occurred, return
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) > 0)
goto label_error;
// A Trafodion schema does not exist if the schema object row is not
// present: CATALOG-NAME.SCHEMA-NAME.__SCHEMA__.
*CmpCommon::diags() << DgSqlCode(-CAT_SCHEMA_DOES_NOT_EXIST_ERROR)
<< DgSchemaName(schemaName.getExternalName().data());
goto label_error;
}
if (!isDDLOperationAuthorized(SQLOperation::ALTER_SCHEMA,
schemaOwnerID,schemaOwnerID))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
goto label_error;
}
if ((isSeabaseReservedSchema(objName) ||
(schName == SEABASE_SYSTEM_SCHEMA)) &&
!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
*CmpCommon::diags() << DgSqlCode(-CAT_USER_CANNOT_DROP_SMD_SCHEMA)
<< DgSchemaName(schemaName.getExternalName().data());
goto label_error;
}
// Can't alter a schema whose name begins with VOLATILE_SCHEMA unless the
// keyword VOLATILE was specified in the ALTER SCHEMA command.
if (isVolatile && !alterSchemaNode->isVolatile())
{
*CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_METADATA_SCHEMA_NAME)
<< DgTableName(schName);
goto label_error;
}
if (alterSchemaNode->isDropAllTables())
{
// should not reach here, is transformed to DropSchema during parsing
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Should not reach here. Should have been transformed to DropSchema during parsing.");
goto label_error;
}
if (alterSchemaNode->isRenameSchema())
{
// Not yet supported
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Cannot rename a schema.");
goto label_error;
}
if (NOT alterSchemaNode->isAlterStoredDesc())
{
// unsupported option
*CmpCommon::diags() << DgSqlCode(-3242)
<< DgString0("Unsupported option specified.");
goto label_error;
}
// Get a list of all objects in the schema, excluding the schema object itself.
char query[4000];
// select objects in the schema to alter
str_sprintf(query,"SELECT distinct TRIM(object_name), TRIM(object_type), object_uid "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND schema_name = '%s' AND "
"object_name <> '"SEABASE_SCHEMA_OBJECTNAME"' AND "
"(object_type = 'BT' OR "
" object_type = 'VI') "
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
(char*)catName.data(),(char*)schName.data());
cliRC = cliInterface.fetchAllRows(objectsQueue, query, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
goto label_error;
}
objectsQueue->position();
for (int idx = 0; idx < objectsQueue->numEntries(); idx++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString objName = vi->get(0);
NAString objType = vi->get(1);
Int64 objUID = *(Int64*)vi->get(2);
if (sdo == StmtDDLAlterTableStoredDesc::GENERATE)
{
cliRC =
updateObjectRedefTime(&cliInterface,
getSystemCatalog(), schName, objName,
COM_BASE_TABLE_OBJECT_LIT,
-1, objUID, TRUE);
if (cliRC < 0)
{
// append error and move on to next one
appendErrorObjName(errorObjs, objName);
someObjectsCouldNotBeAltered = true;
CmpCommon::diags()->clear();
}
}
else if (sdo == StmtDDLAlterTableStoredDesc::DELETE)
{
cliRC = deleteFromTextTable
(&cliInterface, objUID, COM_STORED_DESC_TEXT, 0);
if (cliRC < 0)
{
// append error and move on to next one
appendErrorObjName(errorObjs, objName);
someObjectsCouldNotBeAltered = true;
CmpCommon::diags()->clear();
}
} // delete stored desc
else if (sdo == StmtDDLAlterTableStoredDesc::ENABLE)
{
Int64 flags = MD_OBJECTS_DISABLE_STORED_DESC;
cliRC = updateObjectFlags(&cliInterface, objUID, flags, TRUE);
if (cliRC < 0)
{
appendErrorObjName(errorObjs, objName);
someObjectsCouldNotBeAltered = true;
CmpCommon::diags()->clear();
}
}
else if (sdo == StmtDDLAlterTableStoredDesc::DISABLE)
{
Int64 flags = MD_OBJECTS_DISABLE_STORED_DESC;
cliRC = updateObjectFlags(&cliInterface, objUID, flags, FALSE);
if (cliRC < 0)
{
appendErrorObjName(errorObjs, objName);
someObjectsCouldNotBeAltered = true;
CmpCommon::diags()->clear();
}
}
else if (sdo == StmtDDLAlterTableStoredDesc::CHECK)
{
cliRC = checkAndGetStoredObjectDesc(&cliInterface, objUID, NULL);
CmpCommon::diags()->clear();
if (cliRC < 0)
{
checkErr = cliRC;
appendErrorObjName(errorObjs, objName);
someObjectsCouldNotBeAltered = true;
}
}
} // for
if (someObjectsCouldNotBeAltered)
{
NAString reason;
if (sdo == StmtDDLAlterTableStoredDesc::CHECK)
{
reason = "Reason: Following objects failed stored descriptor check";
if (checkErr == -1)
reason += " (object could not be accessed) ";
else if (checkErr == -2)
reason += " (object does not exist) ";
else if (checkErr == -3)
reason += " (change in stored structures) ";
reason += ": ";
reason += errorObjs;
}
else
reason = "Reason: Some objects could not be accessed in schema "
+ schName + ". ObjectsInSchema: "
+ errorObjs;
*CmpCommon::diags() << DgSqlCode(-CAT_UNABLE_TO_ALTER_SCHEMA)
<< DgSchemaName(catName + "." + schName)
<< DgString0(reason);
goto label_error;
}
// Everything succeeded, return
return;
label_error:
return;
}
//****************** End of CmpSeabaseDDL::alterSeabaseSchema *****************
// *****************************************************************************
// * *
// * Function: CmpSeabaseDDL::giveSeabaseSchema *
// * *
// * Implements the GIVE SCHEMA command. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <giveSchemaNode> StmtDDLGiveSchema * In *
// * is a pointer to a create schema parser node. *
// * *
// * <currentCatalogName> NAString & In *
// * is the name of the current catalog. *
// * *
// *****************************************************************************
void CmpSeabaseDDL::giveSeabaseSchema(
StmtDDLGiveSchema * giveSchemaNode,
NAString & currentCatalogName)
{
ComDropBehavior dropBehavior = giveSchemaNode->getDropBehavior();
NAString catalogName = giveSchemaNode->getCatalogName();
NAString schemaName = giveSchemaNode->getSchemaName();
if (catalogName.isNull())
catalogName = currentCatalogName;
ExeCliInterface cliInterface(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
Int32 objectOwnerID = 0;
Int32 schemaOwnerID = 0;
ComObjectType objectType;
Int64 schemaUID = getObjectTypeandOwner(&cliInterface,catalogName.data(),
schemaName.data(),SEABASE_SCHEMA_OBJECTNAME,
objectType,schemaOwnerID);
if (schemaUID == -1)
{
// A Trafodion schema does not exist if the schema object row is not
// present: CATALOG-NAME.SCHEMA-NAME.__SCHEMA__.
*CmpCommon::diags() << DgSqlCode(-CAT_SCHEMA_DOES_NOT_EXIST_ERROR)
<< DgSchemaName(schemaName.data());
return;
}
// *****************************************************************************
// * *
// * A schema owner can give their own schema to another authID, but they *
// * cannot give the objects in a shared schema to another authID. Only *
// * DB__ROOT or a user with the ALTER_SCHEMA privilege can change the owners *
// * of objects in a shared schema. So if the schema is private, or if only *
// * the schema is being given, we do standard authentication checking. But *
// * if giving all the objects in a shared schema, we change the check ID to *
// * the default user to force the ALTER_SCHEMA privilege check. *
// * *
// *****************************************************************************
int32_t checkID = schemaOwnerID;
if (objectType == COM_SHARED_SCHEMA_OBJECT &&
dropBehavior == COM_CASCADE_DROP_BEHAVIOR)
checkID = NA_UserIdDefault;
if (!isDDLOperationAuthorized(SQLOperation::ALTER_SCHEMA,checkID,checkID))
{
*CmpCommon::diags() << DgSqlCode(-CAT_NOT_AUTHORIZED);
return;
}
ComObjectName objName(catalogName,schemaName,NAString("dummy"),COM_TABLE_NAME,TRUE);
if (isSeabaseReservedSchema(objName) &&
!Get_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL))
{
*CmpCommon::diags() << DgSqlCode(-CAT_USER_CANNOT_DROP_SMD_SCHEMA)
<< DgSchemaName(schemaName.data());
return;
}
bool isVolatile = (memcmp(schemaName.data(),"VOLATILE_SCHEMA",strlen("VOLATILE_SCHEMA")) == 0);
// Can't give a schema whose name begins with VOLATILE_SCHEMA.
if (isVolatile)
{
*CmpCommon::diags() << DgSqlCode(-CAT_RESERVED_METADATA_SCHEMA_NAME)
<< DgTableName(schemaName);
return;
}
int32_t newOwnerID = -1;
if (ComUser::getAuthIDFromAuthName(giveSchemaNode->getAuthID().data(),
newOwnerID) != 0)
{
*CmpCommon::diags() << DgSqlCode(-CAT_AUTHID_DOES_NOT_EXIST_ERROR)
<< DgString0(giveSchemaNode->getAuthID().data());
return;
}
// *****************************************************************************
// * *
// * Drop behavior is only relevant for shared schemas. For shared schemas, *
// * ownership of the schema OR the schema and all its objects may be given to *
// * another authorization ID. For private schemas, all objects are owned by *
// * the schema owner, so the drop behavior is always CASCADE. *
// * *
// * NOTE: The syntax for drop behavior always defaults to RESTRICT; for *
// * private schemas this is simply ignored, as opposed to requiring *
// * users to always specify CASCASE. *
// * *
// *****************************************************************************
Lng32 cliRC = 0;
char buf[4000];
if (objectType == COM_SHARED_SCHEMA_OBJECT &&
dropBehavior == COM_RESTRICT_DROP_BEHAVIOR)
{
str_sprintf(buf,"UPDATE %s.\"%s\".%s "
"SET object_owner = %d "
"WHERE object_UID = %Ld",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
newOwnerID,schemaUID);
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
//
// At this point, we are giving all objects in the schema (as well as the
// schema itself) to the new authorization ID. If authentication is enabled,
// update the privileges first.
//
if (isAuthorizationEnabled())
{
int32_t rc = transferObjectPrivs(getSystemCatalog(),catalogName.data(),
schemaName.data(),newOwnerID,
giveSchemaNode->getAuthID().data());
if (rc != 0)
{
if (CmpCommon::diags()->getNumber(DgSqlCode::ERROR_) == 0)
{
//TODO: add error
}
return;
}
}
// Now update the object owner for all objects in the schema.
str_sprintf(buf,"UPDATE %s.\"%s\".%s "
"SET object_owner = %d "
"WHERE catalog_name = '%s' AND schema_name = '%s'",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
newOwnerID,catalogName.data(),schemaName.data());
cliRC = cliInterface.executeImmediate(buf);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
// Verify all objects in the schema have been given to the new owner.
str_sprintf(buf,"SELECT COUNT(*) "
"FROM %s.\"%s\".%s "
"WHERE catalog_name = '%s' AND schema_name = '%s' AND "
"object_name <> '"SEABASE_SCHEMA_OBJECTNAME"' AND "
"object_owner <> %d "
"FOR READ COMMITTED ACCESS",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
catalogName.data(),schemaName.data(),newOwnerID);
int32_t length = 0;
int32_t rowCount = 0;
cliRC = cliInterface.executeImmediate(buf,(char*)&rowCount,&length,NULL);
if (cliRC < 0)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
return;
}
if (rowCount > 0)
{
SEABASEDDL_INTERNAL_ERROR("Not all objects in schema were given");
return;
}
}
//****************** End of CmpSeabaseDDL::giveSeabaseSchema *******************
// *****************************************************************************
// Private/static functions
// *****************************************************************************
// *****************************************************************************
// * *
// * Function: createHistogramTables *
// * *
// * Creates all the histogram tables *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <cliInterface> ExeCliInterface * In *
// * is a reference to an Executor CLI interface handle. *
// * *
// * <schemaName> NAString & In *
// * is the catalog.schema of the histogram table to create. *
// * *
// * <ignoreIfExists> NABoolean In *
// * do not return an error if table already exists *
// * *
// * <tableNotCreeated> NAString & Out *
// * returns the name of first histogram table that could not be created *
// * *
// *****************************************************************************
// * *
// * Returns: Int32 *
// * *
// * -mainSQLCODE: Could not create histogram tables. *
// * 0: Create was successful. *
// * *
// *****************************************************************************
short CmpSeabaseDDL::createHistogramTables(
ExeCliInterface *cliInterface,
const NAString &schemaName,
const NABoolean ignoreIfExists,
NAString &tableNotCreated)
{
Int32 cliRC = 0;
tableNotCreated = "";
// allMDHistInfo (CmpSeabaseDDLmd.h) is the list of all histogram tables,
// MDTableInfo describes the table attributes,
// create each table found in the list
Int32 numHistTables = sizeof(allMDHistInfo) / sizeof(MDTableInfo);
NAString prefixText = ignoreIfExists ? "IF NOT EXISTS " : "";
for (Int32 i = 0; i < numHistTables; i++)
{
const MDTableInfo &mdh = allMDHistInfo[i];
Int32 qryArraySize = mdh.sizeOfnewDDL / sizeof(QString);
// Concatenate the create table text into a single string
NAString concatenatedQuery;
for (Int32 j = 0; j < qryArraySize; j++)
{
NAString tempStr = mdh.newDDL[j].str;
concatenatedQuery += tempStr.strip(NAString::leading, ' ');
}
// qualify create table text with (optional) "IF NOT EXISTS" & schema name
// and place in front of the table name:
// "create table <textInsertion> hist-table ..."
std::string tableDDL (concatenatedQuery.data());
NAString textInsertion = prefixText + schemaName + ".";
size_t pos = tableDDL.find_first_of(mdh.newName);
if (pos == string::npos)
{
NAString errorText ("Unexpected error occurred while parsing create text for histogram table ");
errorText += mdh.newName;
SEABASEDDL_INTERNAL_ERROR(errorText.data());
tableNotCreated = mdh.newName;
return -CAT_INTERNAL_EXCEPTION_ERROR;
}
tableDDL = tableDDL.insert(pos, textInsertion.data());
// If the caller does not send in cliInterface, instantiate one now
ExeCliInterface cli;
if (cliInterface == NULL)
{
ExeCliInterface newCli(STMTHEAP, NULL, NULL,
CmpCommon::context()->sqlSession()->getParentQid());
cli = newCli;
}
else
cli = *cliInterface;
// Create the table
cliRC = cli.executeImmediate(tableDDL.c_str());
if (cliRC < 0)
{
cli.retrieveSQLDiagnostics(CmpCommon::diags());
tableNotCreated = mdh.newName;
return cliRC;
}
}
return 0;
}
//************************ End of createHistogramTables ************************
// *****************************************************************************
// * *
// * Function: adjustHiveExternalSchemas *
// * *
// * Changes the ownership and privilege grants to DB__HIVEROLE *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <cliInterface> ExeCliInterface * In *
// * is a reference to an Executor CLI interface handle. *
// *****************************************************************************
// * *
// * Returns: Int32 *
// * *
// * 0: Adjustment was successful *
// * -1: Adjustment failed *
// * *
// *****************************************************************************
short CmpSeabaseDDL::adjustHiveExternalSchemas(ExeCliInterface *cliInterface)
{
char buf[sizeof(SEABASE_MD_SCHEMA) +
sizeof(SEABASE_OBJECTS) +
strlen(getSystemCatalog()) + 300];
// get all the objects in special hive schemas
sprintf(buf, "SELECT catalog_name, schema_name, object_name, object_uid, object_type, object_owner "
" from %s.\"%s\".%s WHERE schema_name like '_HV_%c_'",
getSystemCatalog(), SEABASE_MD_SCHEMA, SEABASE_OBJECTS, '%');
Queue * objectsQueue = NULL;
Int32 cliRC = cliInterface->fetchAllRows(objectsQueue, buf, 0, FALSE, FALSE, TRUE);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
// adjust owner and privilege information for external hive objects
objectsQueue->position();
for (size_t i = 0; i < objectsQueue->numEntries(); i++)
{
OutputInfo * vi = (OutputInfo*)objectsQueue->getNext();
NAString catName = vi->get(0);
NAString schName = vi->get(1);
NAString objName = vi->get(2);
Int64 objUID = *(Int64*)vi->get(3);
NAString objectTypeLit = vi->get(4);
Int32 objOwner = *(Int32*)vi->get(5);
ComObjectType objType = PrivMgr::ObjectLitToEnum(objectTypeLit.data());
// If object owner is already the HIVE_ROLE_ID, then we are done.
if (objOwner == HIVE_ROLE_ID)
continue;
else
{
// only need to adjust privileges on securable items
if (PrivMgr::isSecurableObject(objType))
{
ComObjectName tblName(catName, schName, objName, COM_TABLE_NAME,
ComAnsiNamePart::INTERNAL_FORMAT, STMTHEAP);
NAString extTblName = tblName.getExternalName(TRUE);
// remove existing privs on object
if (!deletePrivMgrInfo(extTblName, objUID, objType))
return -1;
// add owner privs
if (!insertPrivMgrInfo(objUID, extTblName, objType,
HIVE_ROLE_ID, HIVE_ROLE_ID, ComUser::getCurrentUser()))
return -1;
}
// update schema_owner and objectOwner for object
sprintf(buf,"UPDATE %s.\"%s\".%s SET object_owner = %d "
", schema_owner = %d WHERE object_uid = %ld ",
getSystemCatalog(),SEABASE_MD_SCHEMA,SEABASE_OBJECTS,
HIVE_ROLE_ID, HIVE_ROLE_ID, objUID);
cliRC = cliInterface->executeImmediate(buf);
if (cliRC < 0)
{
cliInterface->retrieveSQLDiagnostics(CmpCommon::diags());
return -1;
}
}
}
return 0;
}
//********************* End of adjustHiveExternalTables ************************
// *****************************************************************************
// * *
// * Function: dropOneTable *
// * *
// * Drops a table and all its dependent objects. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <cliInterface> ExeCliInterface & In *
// * is a reference to an Executor CLI interface handle. *
// * *
// * <catalogName> const char * In *
// * is the catalog of the table to drop. *
// * *
// * <schemaName> const char * In *
// * is the schema of the table to drop. *
// * *
// * <objectName> const char * In *
// * is the name of the table to drop. *
// * *
// * <isVolatile> bool In *
// * is true if the object is volatile or part of a volatile schema. *
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Could not drop table or one of its dependent objects. *
// * false: Drop successful or could not set CQD for NATable cache reload. *
// * *
// *****************************************************************************
static bool dropOneTable(
ExeCliInterface & cliInterface,
const char * catalogName,
const char * schemaName,
const char * objectName,
bool isVolatile,
bool ifExists,
bool ddlXns)
{
char buf [1000];
bool someObjectsCouldNotBeDropped = false;
char volatileString[20] = {0};
char ifExistsString[20] = {0};
Lng32 cliRC = 0;
if (isVolatile)
strcpy(volatileString,"VOLATILE");
if (ifExists)
strcpy(ifExistsString,"IF EXISTS");
if (ComIsTrafodionExternalSchemaName(schemaName))
str_sprintf(buf,"DROP EXTERNAL TABLE \"%s\" FOR \"%s\".\"%s\".\"%s\" CASCADE",
objectName,catalogName,schemaName,objectName);
else
str_sprintf(buf,"DROP %s TABLE %s \"%s\".\"%s\".\"%s\" CASCADE",
volatileString, ifExistsString, catalogName,schemaName,objectName);
ULng32 savedParserFlags = Get_SqlParser_Flags(0xFFFFFFFF);
try
{
Set_SqlParser_Flags(INTERNAL_QUERY_FROM_EXEUTIL);
cliRC = cliInterface.executeImmediate(buf);
}
catch (...)
{
// Restore parser flags settings to what they originally were
Assign_SqlParser_Flags(savedParserFlags);
throw;
}
// Restore parser flags settings to what they originally were
Assign_SqlParser_Flags(savedParserFlags);
if (cliRC < 0 && cliRC != -CAT_OBJECT_DOES_NOT_EXIST_IN_TRAFODION)
{
cliInterface.retrieveSQLDiagnostics(CmpCommon::diags());
someObjectsCouldNotBeDropped = true;
}
// remove NATable entry for this table
CorrName cn(objectName,STMTHEAP,schemaName,catalogName);
ActiveSchemaDB()->getNATableDB()->removeNATable
(cn,
ComQiScope::REMOVE_FROM_ALL_USERS, COM_BASE_TABLE_OBJECT,
ddlXns, FALSE);
return someObjectsCouldNotBeDropped;
}
//**************************** End of dropOneTable *****************************
// *****************************************************************************
// * *
// * Function: transferObjectPrivs *
// * *
// * Transfers object privs from current owner to new owner. *
// * *
// *****************************************************************************
// * *
// * Parameters: *
// * *
// * <systemCatalogName> const char * In *
// * is the location of the system catalog. *
// * *
// * <catalogName> const char * In *
// * is the catalog of the schema whose objects are getting a new owner. *
// * *
// * <schemaName> const char * In *
// * is the schema whose objects are getting a new owner. *
// * *
// * <newOwnerID> const int32_t In *
// * is the ID of the new owner for the objects. *
// * *
// * <newOwnerName const char * In *
// * is the database username or role name of the new owner for the objects.*
// * *
// *****************************************************************************
// * *
// * Returns: bool *
// * *
// * true: Privileges for object(s) transferred to new owner. *
// * false: Privileges for object(s) NOT transferred to new owner. *
// * *
// *****************************************************************************
static bool transferObjectPrivs(
const char * systemCatalogName,
const char * catalogName,
const char * schemaName,
const int32_t newOwnerID,
const char * newOwnerName)
{
PrivStatus privStatus = STATUS_GOOD;
// Initiate the privilege manager interface class
NAString privMgrMDLoc;
CONCAT_CATSCH(privMgrMDLoc,systemCatalogName,SEABASE_PRIVMGR_SCHEMA);
PrivMgrCommands privInterface(std::string(privMgrMDLoc.data()),CmpCommon::diags());
std::vector<UIDAndOwner> objectRows;
std::string whereClause(" WHERE catalog_name = '");
whereClause += catalogName;
whereClause += "' AND schema_name = '";
whereClause += schemaName;
whereClause += "'";
std::string orderByClause(" ORDER BY OBJECT_OWNER");
std::string metadataLocation(systemCatalogName);
metadataLocation += ".\"";
metadataLocation += SEABASE_MD_SCHEMA;
metadataLocation += "\"";
PrivMgrObjects objects(metadataLocation,CmpCommon::diags());
privStatus = objects.fetchUIDandOwner(whereClause,orderByClause,objectRows);
if (privStatus != STATUS_GOOD || objectRows.size() == 0)
return false;
int32_t lastOwner = objectRows[0].ownerID;
std::vector<int64_t> objectUIDs;
for (size_t i = 0; i < objectRows.size(); i++)
{
if (objectRows[i].ownerID != lastOwner)
{
privStatus = privInterface.givePrivForObjects(lastOwner,
newOwnerID,
newOwnerName,
objectUIDs);
objectUIDs.clear();
}
objectUIDs.push_back(objectRows[i].UID);
lastOwner = objectRows[i].ownerID;
}
privStatus = privInterface.givePrivForObjects(lastOwner,
newOwnerID,
newOwnerName,
objectUIDs);
return true;
}
//************************ End of transferObjectPrivs **************************
| 1 | 16,373 | It doesn't look like this particular "rowCount" variable is used anywhere. I suppose we could delete it. The code change is harmless though. | apache-trafodion | cpp |
@@ -380,6 +380,10 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
status, _ := s.serveHTTP(w, r)
+ if status == 204 {
+ w.WriteHeader(http.StatusNoContent)
+ }
+
// Fallback error response in case error handling wasn't chained in
if status >= 400 {
DefaultErrorFunc(w, r, status) | 1 | // Copyright 2015 Light Code Labs, LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package httpserver implements an HTTP server on top of Caddy.
package httpserver
import (
"context"
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"github.com/lucas-clemente/quic-go/h2quic"
"github.com/mholt/caddy"
"github.com/mholt/caddy/caddyhttp/staticfiles"
"github.com/mholt/caddy/caddytls"
"github.com/mholt/caddy/telemetry"
)
// Server is the HTTP server implementation.
type Server struct {
Server *http.Server
quicServer *h2quic.Server
listener net.Listener
listenerMu sync.Mutex
sites []*SiteConfig
connTimeout time.Duration // max time to wait for a connection before force stop
tlsGovChan chan struct{} // close to stop the TLS maintenance goroutine
vhosts *vhostTrie
}
// ensure it satisfies the interface
var _ caddy.GracefulServer = new(Server)
var defaultALPN = []string{"h2", "http/1.1"}
// makeTLSConfig extracts TLS settings from each site config to
// build a tls.Config usable in Caddy HTTP servers. The returned
// config will be nil if TLS is disabled for these sites.
func makeTLSConfig(group []*SiteConfig) (*tls.Config, error) {
var tlsConfigs []*caddytls.Config
for i := range group {
if HTTP2 && len(group[i].TLS.ALPN) == 0 {
// if no application-level protocol was configured up to now,
// default to HTTP/2, then HTTP/1.1 if necessary
group[i].TLS.ALPN = defaultALPN
}
tlsConfigs = append(tlsConfigs, group[i].TLS)
}
return caddytls.MakeTLSConfig(tlsConfigs)
}
func getFallbacks(sites []*SiteConfig) []string {
fallbacks := []string{}
for _, sc := range sites {
if sc.FallbackSite {
fallbacks = append(fallbacks, sc.Addr.Host)
}
}
return fallbacks
}
// NewServer creates a new Server instance that will listen on addr
// and will serve the sites configured in group.
func NewServer(addr string, group []*SiteConfig) (*Server, error) {
s := &Server{
Server: makeHTTPServerWithTimeouts(addr, group),
vhosts: newVHostTrie(),
sites: group,
connTimeout: GracefulTimeout,
}
s.vhosts.fallbackHosts = append(s.vhosts.fallbackHosts, getFallbacks(group)...)
s.Server = makeHTTPServerWithHeaderLimit(s.Server, group)
s.Server.Handler = s // this is weird, but whatever
// extract TLS settings from each site config to build
// a tls.Config, which will not be nil if TLS is enabled
tlsConfig, err := makeTLSConfig(group)
if err != nil {
return nil, err
}
s.Server.TLSConfig = tlsConfig
// if TLS is enabled, make sure we prepare the Server accordingly
if s.Server.TLSConfig != nil {
// enable QUIC if desired (requires HTTP/2)
if HTTP2 && QUIC {
s.quicServer = &h2quic.Server{Server: s.Server}
s.Server.Handler = s.wrapWithSvcHeaders(s.Server.Handler)
}
// wrap the HTTP handler with a handler that does MITM detection
tlsh := &tlsHandler{next: s.Server.Handler}
s.Server.Handler = tlsh // this needs to be the "outer" handler when Serve() is called, for type assertion
// when Serve() creates the TLS listener later, that listener should
// be adding a reference the ClientHello info to a map; this callback
// will be sure to clear out that entry when the connection closes.
s.Server.ConnState = func(c net.Conn, cs http.ConnState) {
// when a connection closes or is hijacked, delete its entry
// in the map, because we are done with it.
if tlsh.listener != nil {
if cs == http.StateHijacked || cs == http.StateClosed {
tlsh.listener.helloInfosMu.Lock()
delete(tlsh.listener.helloInfos, c.RemoteAddr().String())
tlsh.listener.helloInfosMu.Unlock()
}
}
}
// As of Go 1.7, if the Server's TLSConfig is not nil, HTTP/2 is enabled only
// if TLSConfig.NextProtos includes the string "h2"
if HTTP2 && len(s.Server.TLSConfig.NextProtos) == 0 {
// some experimenting shows that this NextProtos must have at least
// one value that overlaps with the NextProtos of any other tls.Config
// that is returned from GetConfigForClient; if there is no overlap,
// the connection will fail (as of Go 1.8, Feb. 2017).
s.Server.TLSConfig.NextProtos = defaultALPN
}
}
// Compile custom middleware for every site (enables virtual hosting)
for _, site := range group {
stack := Handler(staticfiles.FileServer{Root: http.Dir(site.Root), Hide: site.HiddenFiles, IndexPages: site.IndexPages})
for i := len(site.middleware) - 1; i >= 0; i-- {
stack = site.middleware[i](stack)
}
site.middlewareChain = stack
s.vhosts.Insert(site.Addr.VHost(), site)
}
return s, nil
}
// makeHTTPServerWithHeaderLimit apply minimum header limit within a group to given http.Server
func makeHTTPServerWithHeaderLimit(s *http.Server, group []*SiteConfig) *http.Server {
var min int64
for _, cfg := range group {
limit := cfg.Limits.MaxRequestHeaderSize
if limit == 0 {
continue
}
// not set yet
if min == 0 {
min = limit
}
// find a better one
if limit < min {
min = limit
}
}
if min > 0 {
s.MaxHeaderBytes = int(min)
}
return s
}
// makeHTTPServerWithTimeouts makes an http.Server from the group of
// configs in a way that configures timeouts (or, if not set, it uses
// the default timeouts) by combining the configuration of each
// SiteConfig in the group. (Timeouts are important for mitigating
// slowloris attacks.)
func makeHTTPServerWithTimeouts(addr string, group []*SiteConfig) *http.Server {
// find the minimum duration configured for each timeout
var min Timeouts
for _, cfg := range group {
if cfg.Timeouts.ReadTimeoutSet &&
(!min.ReadTimeoutSet || cfg.Timeouts.ReadTimeout < min.ReadTimeout) {
min.ReadTimeoutSet = true
min.ReadTimeout = cfg.Timeouts.ReadTimeout
}
if cfg.Timeouts.ReadHeaderTimeoutSet &&
(!min.ReadHeaderTimeoutSet || cfg.Timeouts.ReadHeaderTimeout < min.ReadHeaderTimeout) {
min.ReadHeaderTimeoutSet = true
min.ReadHeaderTimeout = cfg.Timeouts.ReadHeaderTimeout
}
if cfg.Timeouts.WriteTimeoutSet &&
(!min.WriteTimeoutSet || cfg.Timeouts.WriteTimeout < min.WriteTimeout) {
min.WriteTimeoutSet = true
min.WriteTimeout = cfg.Timeouts.WriteTimeout
}
if cfg.Timeouts.IdleTimeoutSet &&
(!min.IdleTimeoutSet || cfg.Timeouts.IdleTimeout < min.IdleTimeout) {
min.IdleTimeoutSet = true
min.IdleTimeout = cfg.Timeouts.IdleTimeout
}
}
// for the values that were not set, use defaults
if !min.ReadTimeoutSet {
min.ReadTimeout = defaultTimeouts.ReadTimeout
}
if !min.ReadHeaderTimeoutSet {
min.ReadHeaderTimeout = defaultTimeouts.ReadHeaderTimeout
}
if !min.WriteTimeoutSet {
min.WriteTimeout = defaultTimeouts.WriteTimeout
}
if !min.IdleTimeoutSet {
min.IdleTimeout = defaultTimeouts.IdleTimeout
}
// set the final values on the server and return it
return &http.Server{
Addr: addr,
ReadTimeout: min.ReadTimeout,
ReadHeaderTimeout: min.ReadHeaderTimeout,
WriteTimeout: min.WriteTimeout,
IdleTimeout: min.IdleTimeout,
}
}
func (s *Server) wrapWithSvcHeaders(previousHandler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
s.quicServer.SetQuicHeaders(w.Header())
previousHandler.ServeHTTP(w, r)
}
}
// Listen creates an active listener for s that can be
// used to serve requests.
func (s *Server) Listen() (net.Listener, error) {
if s.Server == nil {
return nil, fmt.Errorf("Server field is nil")
}
ln, err := net.Listen("tcp", s.Server.Addr)
if err != nil {
var succeeded bool
if runtime.GOOS == "windows" {
// Windows has been known to keep sockets open even after closing the listeners.
// Tests reveal this error case easily because they call Start() then Stop()
// in succession. TODO: Better way to handle this? And why limit this to Windows?
for i := 0; i < 20; i++ {
time.Sleep(100 * time.Millisecond)
ln, err = net.Listen("tcp", s.Server.Addr)
if err == nil {
succeeded = true
break
}
}
}
if !succeeded {
return nil, err
}
}
if tcpLn, ok := ln.(*net.TCPListener); ok {
ln = tcpKeepAliveListener{TCPListener: tcpLn}
}
cln := ln.(caddy.Listener)
for _, site := range s.sites {
for _, m := range site.listenerMiddleware {
cln = m(cln)
}
}
// Very important to return a concrete caddy.Listener
// implementation for graceful restarts.
return cln.(caddy.Listener), nil
}
// ListenPacket creates udp connection for QUIC if it is enabled,
func (s *Server) ListenPacket() (net.PacketConn, error) {
if QUIC {
udpAddr, err := net.ResolveUDPAddr("udp", s.Server.Addr)
if err != nil {
return nil, err
}
return net.ListenUDP("udp", udpAddr)
}
return nil, nil
}
// Serve serves requests on ln. It blocks until ln is closed.
func (s *Server) Serve(ln net.Listener) error {
s.listenerMu.Lock()
s.listener = ln
s.listenerMu.Unlock()
if s.Server.TLSConfig != nil {
// Create TLS listener - note that we do not replace s.listener
// with this TLS listener; tls.listener is unexported and does
// not implement the File() method we need for graceful restarts
// on POSIX systems.
// TODO: Is this ^ still relevant anymore? Maybe we can now that it's a net.Listener...
ln = newTLSListener(ln, s.Server.TLSConfig)
if handler, ok := s.Server.Handler.(*tlsHandler); ok {
handler.listener = ln.(*tlsHelloListener)
}
// Rotate TLS session ticket keys
s.tlsGovChan = caddytls.RotateSessionTicketKeys(s.Server.TLSConfig)
}
err := s.Server.Serve(ln)
if err == http.ErrServerClosed {
err = nil // not an error worth reporting since closing a server is intentional
}
if s.quicServer != nil {
s.quicServer.Close()
}
return err
}
// ServePacket serves QUIC requests on pc until it is closed.
func (s *Server) ServePacket(pc net.PacketConn) error {
if s.quicServer != nil {
err := s.quicServer.Serve(pc.(*net.UDPConn))
return fmt.Errorf("serving QUIC connections: %v", err)
}
return nil
}
// ServeHTTP is the entry point of all HTTP requests.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
defer func() {
// We absolutely need to be sure we stay alive up here,
// even though, in theory, the errors middleware does this.
if rec := recover(); rec != nil {
log.Printf("[PANIC] %v", rec)
DefaultErrorFunc(w, r, http.StatusInternalServerError)
}
}()
// record the User-Agent string (with a cap on its length to mitigate attacks)
ua := r.Header.Get("User-Agent")
if len(ua) > 512 {
ua = ua[:512]
}
uaHash := telemetry.FastHash([]byte(ua)) // this is a normalized field
go telemetry.SetNested("http_user_agent", uaHash, ua)
go telemetry.AppendUnique("http_user_agent_count", uaHash)
go telemetry.Increment("http_request_count")
// copy the original, unchanged URL into the context
// so it can be referenced by middlewares
urlCopy := *r.URL
if r.URL.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *r.URL.User
urlCopy.User = userInfo
}
c := context.WithValue(r.Context(), OriginalURLCtxKey, urlCopy)
r = r.WithContext(c)
// Setup a replacer for the request that keeps track of placeholder
// values across plugins.
replacer := NewReplacer(r, nil, "")
c = context.WithValue(r.Context(), ReplacerCtxKey, replacer)
r = r.WithContext(c)
w.Header().Set("Server", caddy.AppName)
status, _ := s.serveHTTP(w, r)
// Fallback error response in case error handling wasn't chained in
if status >= 400 {
DefaultErrorFunc(w, r, status)
}
}
func (s *Server) serveHTTP(w http.ResponseWriter, r *http.Request) (int, error) {
// strip out the port because it's not used in virtual
// hosting; the port is irrelevant because each listener
// is on a different port.
hostname, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostname = r.Host
}
// look up the virtualhost; if no match, serve error
vhost, pathPrefix := s.vhosts.Match(hostname + r.URL.Path)
c := context.WithValue(r.Context(), caddy.CtxKey("path_prefix"), pathPrefix)
r = r.WithContext(c)
if vhost == nil {
// check for ACME challenge even if vhost is nil;
// could be a new host coming online soon
if caddytls.HTTPChallengeHandler(w, r, "localhost") {
return 0, nil
}
// otherwise, log the error and write a message to the client
remoteHost, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
remoteHost = r.RemoteAddr
}
WriteSiteNotFound(w, r) // don't add headers outside of this function
log.Printf("[INFO] %s - No such site at %s (Remote: %s, Referer: %s)",
hostname, s.Server.Addr, remoteHost, r.Header.Get("Referer"))
return 0, nil
}
// we still check for ACME challenge if the vhost exists,
// because we must apply its HTTP challenge config settings
if caddytls.HTTPChallengeHandler(w, r, vhost.ListenHost) {
return 0, nil
}
// trim the path portion of the site address from the beginning of
// the URL path, so a request to example.com/foo/blog on the site
// defined as example.com/foo appears as /blog instead of /foo/blog.
if pathPrefix != "/" {
r.URL = trimPathPrefix(r.URL, pathPrefix)
}
// enforce strict host matching, which ensures that the SNI
// value (if any), matches the Host header; essential for
// sites that rely on TLS ClientAuth sharing a port with
// sites that do not - if mismatched, close the connection
if vhost.StrictHostMatching && r.TLS != nil &&
strings.ToLower(r.TLS.ServerName) != strings.ToLower(hostname) {
r.Close = true
log.Printf("[ERROR] %s - strict host matching: SNI (%s) and HTTP Host (%s) values differ",
vhost.Addr, r.TLS.ServerName, hostname)
return http.StatusForbidden, nil
}
return vhost.middlewareChain.ServeHTTP(w, r)
}
func trimPathPrefix(u *url.URL, prefix string) *url.URL {
// We need to use URL.EscapedPath() when trimming the pathPrefix as
// URL.Path is ambiguous about / or %2f - see docs. See #1927
trimmedPath := strings.TrimPrefix(u.EscapedPath(), prefix)
if !strings.HasPrefix(trimmedPath, "/") {
trimmedPath = "/" + trimmedPath
}
// After trimming path reconstruct uri string with Query before parsing
trimmedURI := trimmedPath
if u.RawQuery != "" || u.ForceQuery == true {
trimmedURI = trimmedPath + "?" + u.RawQuery
}
if u.Fragment != "" {
trimmedURI = trimmedURI + "#" + u.Fragment
}
trimmedURL, err := url.Parse(trimmedURI)
if err != nil {
log.Printf("[ERROR] Unable to parse trimmed URL %s: %v", trimmedURI, err)
return u
}
return trimmedURL
}
// Address returns the address s was assigned to listen on.
func (s *Server) Address() string {
return s.Server.Addr
}
// Stop stops s gracefully (or forcefully after timeout) and
// closes its listener.
func (s *Server) Stop() error {
ctx, cancel := context.WithTimeout(context.Background(), s.connTimeout)
defer cancel()
err := s.Server.Shutdown(ctx)
if err != nil {
return err
}
// signal any TLS governor goroutines to exit
if s.tlsGovChan != nil {
close(s.tlsGovChan)
}
return nil
}
// OnStartupComplete lists the sites served by this server
// and any relevant information, assuming caddy.Quiet == false.
func (s *Server) OnStartupComplete() {
if caddy.Quiet {
return
}
for _, site := range s.sites {
output := site.Addr.String()
if caddy.IsLoopback(s.Address()) && !caddy.IsLoopback(site.Addr.Host) {
output += " (only accessible on this machine)"
}
fmt.Println(output)
log.Println(output)
}
}
// defaultTimeouts stores the default timeout values to use
// if left unset by user configuration. NOTE: Most default
// timeouts are disabled (see issues #1464 and #1733).
var defaultTimeouts = Timeouts{IdleTimeout: 5 * time.Minute}
// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted
// connections. It's used by ListenAndServe and ListenAndServeTLS so
// dead TCP connections (e.g. closing laptop mid-download) eventually
// go away.
//
// Borrowed from the Go standard library.
type tcpKeepAliveListener struct {
*net.TCPListener
}
// Accept accepts the connection with a keep-alive enabled.
func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
tc, err := ln.AcceptTCP()
if err != nil {
return
}
tc.SetKeepAlive(true)
tc.SetKeepAlivePeriod(3 * time.Minute)
return tc, nil
}
// File implements caddy.Listener; it returns the underlying file of the listener.
func (ln tcpKeepAliveListener) File() (*os.File, error) {
return ln.TCPListener.File()
}
// ErrMaxBytesExceeded is the error returned by MaxBytesReader
// when the request body exceeds the limit imposed
var ErrMaxBytesExceeded = errors.New("http: request body too large")
// DefaultErrorFunc responds to an HTTP request with a simple description
// of the specified HTTP status code.
func DefaultErrorFunc(w http.ResponseWriter, r *http.Request, status int) {
WriteTextResponse(w, status, fmt.Sprintf("%d %s\n", status, http.StatusText(status)))
}
const httpStatusMisdirectedRequest = 421 // RFC 7540, 9.1.2
// WriteSiteNotFound writes appropriate error code to w, signaling that
// requested host is not served by Caddy on a given port.
func WriteSiteNotFound(w http.ResponseWriter, r *http.Request) {
status := http.StatusNotFound
if r.ProtoMajor >= 2 {
// TODO: use http.StatusMisdirectedRequest when it gets defined
status = httpStatusMisdirectedRequest
}
WriteTextResponse(w, status, fmt.Sprintf("%d Site %s is not served on this interface\n", status, r.Host))
}
// WriteTextResponse writes body with code status to w. The body will
// be interpreted as plain text.
func WriteTextResponse(w http.ResponseWriter, status int, body string) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.Header().Set("X-Content-Type-Options", "nosniff")
w.WriteHeader(status)
w.Write([]byte(body))
}
// SafePath joins siteRoot and reqPath and converts it to a path that can
// be used to access a path on the local disk. It ensures the path does
// not traverse outside of the site root.
//
// If opening a file, use http.Dir instead.
func SafePath(siteRoot, reqPath string) string {
reqPath = filepath.ToSlash(reqPath)
reqPath = strings.Replace(reqPath, "\x00", "", -1) // NOTE: Go 1.9 checks for null bytes in the syscall package
if siteRoot == "" {
siteRoot = "."
}
return filepath.Join(siteRoot, filepath.FromSlash(path.Clean("/"+reqPath)))
}
// OriginalURLCtxKey is the key for accessing the original, incoming URL on an HTTP request.
const OriginalURLCtxKey = caddy.CtxKey("original_url")
| 1 | 12,491 | Hmm, probably we should instead write whatever `status` is returned instead of coding a special case. | caddyserver-caddy | go |
@@ -128,6 +128,10 @@ func parsedRuleToProtoRule(in *ParsedRule) *proto.Rule {
}
}
+ if in.HTTPMatch != nil {
+ out.HttpMatch = &proto.HTTPMatch{Methods: in.HTTPMatch.Methods}
+ }
+
// Fill in the ICMP fields. We can't follow the pattern and make a
// convertICMP() function because we can't name the return type of the
// function (it's private to the protobuf package). | 1 | // Copyright (c) 2016-2018 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package calc
import (
"crypto/sha256"
"encoding/base64"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/libcalico-go/lib/net"
"github.com/projectcalico/libcalico-go/lib/numorstring"
)
const (
// Compromise: shorter is better for occupancy and readability. Longer is better for
// collision-resistance. 16 chars gives us 96 bits of entropy, which is fairly collision
// resistant.
RuleIDLength = 16
)
func parsedRulesToProtoRules(in []*ParsedRule, ruleIDSeed string) (out []*proto.Rule) {
out = make([]*proto.Rule, len(in))
for ii, inRule := range in {
out[ii] = parsedRuleToProtoRule(inRule)
}
fillInRuleIDs(out, ruleIDSeed)
return
}
func fillInRuleIDs(rules []*proto.Rule, ruleIDSeed string) {
s := sha256.New224()
s.Write([]byte(ruleIDSeed))
hash := s.Sum(nil)
for ii, rule := range rules {
// Each hash chains in the previous hash, so that its position in the chain and
// the rules before it affect its hash.
s.Reset()
s.Write(hash)
// We need a form of the rule that we can hash. Convert it to the protobuf
// binary representation, which is deterministic, at least for a given rev of the
// library.
// TODO(smc) Can we do better than hashing the protobuf?
rule.RuleId = ""
data, err := rule.Marshal()
if err != nil {
log.WithError(err).WithField("rule", rule).Panic("Failed to marshal rule")
}
s.Write(data)
hash = s.Sum(hash[0:0])
// Encode the hash using a compact character set. We use the URL-safe base64
// variant because it uses '-' and '_', which are more shell-friendly.
ruleID := base64.RawURLEncoding.EncodeToString(hash)[:RuleIDLength]
if log.GetLevel() >= log.DebugLevel {
log.WithFields(log.Fields{
"rule": rule,
"action": rule.Action,
"position": ii,
"seed": ruleIDSeed,
"ruleID": ruleID,
}).Debug("Calculated rule ID")
}
rule.RuleId = ruleID
}
}
func parsedRuleToProtoRule(in *ParsedRule) *proto.Rule {
out := &proto.Rule{
Action: in.Action,
IpVersion: ipVersionToProtoIPVersion(in.IPVersion, in.Protocol),
Protocol: protocolToProtoProtocol(in.Protocol),
SrcNet: ipNetsToProtoStrings(in.SrcNets),
SrcPorts: portsToProtoPorts(in.SrcPorts),
SrcNamedPortIpSetIds: in.SrcNamedPortIPSetIDs,
DstNet: ipNetsToProtoStrings(in.DstNets),
DstPorts: portsToProtoPorts(in.DstPorts),
DstNamedPortIpSetIds: in.DstNamedPortIPSetIDs,
SrcIpSetIds: in.SrcIPSetIDs,
DstIpSetIds: in.DstIPSetIDs,
NotProtocol: protocolToProtoProtocol(in.NotProtocol),
NotSrcNet: ipNetsToProtoStrings(in.NotSrcNets),
NotSrcPorts: portsToProtoPorts(in.NotSrcPorts),
NotSrcNamedPortIpSetIds: in.NotSrcNamedPortIPSetIDs,
NotDstNet: ipNetsToProtoStrings(in.NotDstNets),
NotDstPorts: portsToProtoPorts(in.NotDstPorts),
NotDstNamedPortIpSetIds: in.NotDstNamedPortIPSetIDs,
NotSrcIpSetIds: in.NotSrcIPSetIDs,
NotDstIpSetIds: in.NotDstIPSetIDs,
// Pass through fields for the policy sync API.
OriginalSrcSelector: in.OriginalSrcSelector,
OriginalSrcNamespaceSelector: in.OriginalSrcNamespaceSelector,
OriginalDstSelector: in.OriginalDstSelector,
OriginalDstNamespaceSelector: in.OriginalDstNamespaceSelector,
OriginalNotSrcSelector: in.OriginalNotSrcSelector,
OriginalNotDstSelector: in.OriginalNotDstSelector,
}
if len(in.OriginalSrcServiceAccountNames) > 0 || in.OriginalSrcServiceAccountSelector != "" {
out.SrcServiceAccountMatch = &proto.ServiceAccountMatch{
Selector: in.OriginalSrcServiceAccountSelector,
Names: in.OriginalSrcServiceAccountNames,
}
}
if len(in.OriginalDstServiceAccountNames) > 0 || in.OriginalDstServiceAccountSelector != "" {
out.DstServiceAccountMatch = &proto.ServiceAccountMatch{
Selector: in.OriginalDstServiceAccountSelector,
Names: in.OriginalDstServiceAccountNames,
}
}
// Fill in the ICMP fields. We can't follow the pattern and make a
// convertICMP() function because we can't name the return type of the
// function (it's private to the protobuf package).
if in.ICMPType != nil {
if in.ICMPCode != nil {
out.Icmp = &proto.Rule_IcmpTypeCode{
IcmpTypeCode: &proto.IcmpTypeAndCode{
Type: int32(*in.ICMPType),
Code: int32(*in.ICMPCode),
},
}
} else {
out.Icmp = &proto.Rule_IcmpType{
IcmpType: int32(*in.ICMPType),
}
}
}
if in.NotICMPType != nil {
if in.NotICMPCode != nil {
out.NotIcmp = &proto.Rule_NotIcmpTypeCode{
NotIcmpTypeCode: &proto.IcmpTypeAndCode{
Type: int32(*in.NotICMPType),
Code: int32(*in.NotICMPCode),
},
}
} else {
out.NotIcmp = &proto.Rule_NotIcmpType{
NotIcmpType: int32(*in.NotICMPType),
}
}
}
log.WithFields(log.Fields{
"in": in,
"out": out,
}).Debug("Converted rule to protobuf format.")
return out
}
func ipVersionToProtoIPVersion(in *int, p *numorstring.Protocol) proto.IPVersion {
if in == nil {
// No explicit version, see if we can work out the version from the protocol.
if p == nil {
return proto.IPVersion_ANY
}
switch p.String() {
case "icmp":
return proto.IPVersion_IPV4
case "icmpv6":
return proto.IPVersion_IPV6
default:
return proto.IPVersion_ANY
}
}
switch *in {
case 4:
return proto.IPVersion_IPV4
case 6:
return proto.IPVersion_IPV6
}
return proto.IPVersion_ANY
}
func protocolToProtoProtocol(in *numorstring.Protocol) (out *proto.Protocol) {
if in != nil {
if in.Type == numorstring.NumOrStringNum {
out = &proto.Protocol{
NumberOrName: &proto.Protocol_Number{
Number: int32(in.NumVal),
},
}
} else {
out = &proto.Protocol{
NumberOrName: &proto.Protocol_Name{Name: in.StrVal},
}
}
}
return
}
func ipNetsToProtoStrings(in []*net.IPNet) (out []string) {
for _, n := range in {
if n != nil {
out = append(out, n.String())
}
}
return
}
func portsToProtoPorts(in []numorstring.Port) (out []*proto.PortRange) {
if len(in) == 0 {
return
}
out = make([]*proto.PortRange, len(in))
for ii, port := range in {
out[ii] = portToProtoPort(port)
}
return
}
func portToProtoPort(in numorstring.Port) (out *proto.PortRange) {
out = &proto.PortRange{
First: int32(in.MinPort),
Last: int32(in.MaxPort),
}
return
}
| 1 | 16,363 | Could using the pointer-to-struct approach here cause confusion later? We've had several problems of that kind in the past. What does it mean if the struct is non-nil but its fields are nil? Is that even allowed? (libcalico-go question) Are there any validation requirements for this new rule addition; should it only be allowed if `Protocol == TCP`, for example? | projectcalico-felix | c |
@@ -11,6 +11,7 @@ class User < ActiveRecord::Base
has_many :approvals
has_many :observations
+ has_many :observers, through: :observers, source: :user
has_many :comments
# we do not use rolify gem (e.g.) but declare relationship like any other. | 1 | class User < ActiveRecord::Base
has_paper_trail class_name: 'C2Version'
validates :client_slug, inclusion: {
in: ->(_) { Proposal.client_slugs },
message: "'%{value}' is not in Proposal.client_slugs #{Proposal.client_slugs.inspect}",
allow_blank: true
}
validates :email_address, presence: true, uniqueness: true
validates_email_format_of :email_address
has_many :approvals
has_many :observations
has_many :comments
# we do not use rolify gem (e.g.) but declare relationship like any other.
has_many :user_roles
has_many :roles, through: :user_roles
# TODO rename to _delegations, and add relations for the Users
has_many :outgoing_delegates, class_name: 'ApprovalDelegate', foreign_key: 'assigner_id'
has_many :incoming_delegates, class_name: 'ApprovalDelegate', foreign_key: 'assignee_id'
# this is for user_roles specifically, not proposals or any other objects for which
# this user might have roles.
# rubocop:disable Style/PredicateName
def has_role?(name_or_role)
if name_or_role.is_a?(Role)
self.roles.include?(name_or_role)
else
self.roles.exists?(name: name_or_role)
end
end
# rubocop:enable Style/PredicateName
def add_role(name_or_role)
if name_or_role.is_a?(Role)
role = name_or_role
else
role = Role.find_or_create_by!(name: name_or_role)
end
self.user_roles.find_or_create_by!(role: role)
end
def self.with_role(name_or_role)
if name_or_role.is_a?(Role)
name_or_role.users
else
User.joins(:roles).where(roles: { name: name_or_role })
end
end
def self.sql_for_role_slug(role, slug)
self.with_role(role).select(:id).where(client_slug: slug).to_sql
end
def full_name
if first_name.present? && last_name.present?
"#{first_name} #{last_name}"
else
email_address
end
end
def requested_proposals
Proposal.where(requester_id: self.id)
end
def last_requested_proposal
self.requested_proposals.order('created_at DESC').first
end
def add_delegate(other)
self.outgoing_delegates.create!(assignee: other)
end
def delegates_to?(other)
self.outgoing_delegates.exists?(assignee_id: other.id)
end
def client_admin?
self.has_role?('client_admin')
end
def admin?
has_role?('admin')
end
def not_admin?
!admin?
end
def self.for_email(email)
User.find_or_create_by(email_address: email.strip.downcase)
end
def self.from_oauth_hash(auth_hash)
user_data = auth_hash.extra.raw_info.to_hash
self.find_or_create_by(email_address: user_data['email'])
end
def role_on(proposal)
RolePicker.new(self, proposal)
end
end
| 1 | 14,963 | why this recursive relationship definition? | 18F-C2 | rb |
@@ -40,7 +40,8 @@ type Procedure struct {
// HandlerSpec specifiying which handler and rpc type.
HandlerSpec HandlerSpec
- // Encoding of the handler, for introspection.
+ // Encoding of the handler, optional, used for introspection, and used for
+ // routing if present.
Encoding Encoding
// Signature of the handler, for introspection. This should be a snippet of | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package transport
import (
"context"
"go.uber.org/zap/zapcore"
)
// TODO: Until golang/mock#4 is fixed, imports in the generated code have to
// be fixed by hand. They use vendor/* import paths rather than direct.
// Procedure specifies a single handler registered in the RouteTable.
type Procedure struct {
// Name of the procedure.
Name string
// Service or empty to use the default service name.
Service string
// HandlerSpec specifiying which handler and rpc type.
HandlerSpec HandlerSpec
// Encoding of the handler, for introspection.
Encoding Encoding
// Signature of the handler, for introspection. This should be a snippet of
// Go code representing the function definition.
Signature string
}
// MarshalLogObject implements zap.ObjectMarshaler.
func (p Procedure) MarshalLogObject(enc zapcore.ObjectEncoder) error {
// Passing a Procedure as a zap.ObjectMarshaler allocates, so we shouldn't
// do it on the request path.
enc.AddString("name", p.Name)
enc.AddString("service", p.Service)
enc.AddString("encoding", string(p.Encoding))
enc.AddString("signature", p.Signature)
return enc.AddObject("handler", p.HandlerSpec)
}
// Less orders procedures lexicographically on (Service, Name).
func (p Procedure) Less(other Procedure) bool {
if p.Service == other.Service {
return p.Name < other.Name
}
return p.Service < other.Service
}
// Router maintains and provides access to a collection of procedures
type Router interface {
// Procedures returns a list of procedures that
// have been registered so far.
Procedures() []Procedure
// Choose decides a handler based on a context and transport request
// metadata, or returns an UnrecognizedProcedureError if no handler exists
// for the request. This is the interface for use in inbound transports to
// select a handler for a request.
Choose(ctx context.Context, req *Request) (HandlerSpec, error)
}
// RouteTable is an mutable interface for a Router that allows Registering new
// Procedures
type RouteTable interface {
Router
// Registers zero or more procedures with the route table.
Register([]Procedure)
}
| 1 | 13,900 | More sentence, less list please. > Encoding of the handler. This field is optional. We don't need to mention exactly what it's used for because that list can expand (as it has already in this change). | yarpc-yarpc-go | go |
@@ -66,6 +66,10 @@
// are included because they call NewRangeReader.)
// - NewWriter, from creation until the call to Close.
//
+// It also collects the following metrics:
+// - gocloud.dev/blob/bytes_read: the total number of bytes read, by provider.
+// - gocloud.dev/blob/bytes_written: the total number of bytes written, by provider.
+//
// To enable trace collection in your application, see "Configure Exporter" at
// https://opencensus.io/quickstart/go/tracing.
// To enable metric collection in your application, see "Exporting stats" at | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package blob provides an easy and portable way to interact with blobs
// within a storage location, hereafter called a "bucket".
//
// It supports operations like reading and writing blobs (using standard
// interfaces from the io package), deleting blobs, and listing blobs in a
// bucket.
//
// Subpackages contain distinct implementations of blob for various providers,
// including Cloud and on-prem solutions. For example, "fileblob" supports
// blobs backed by a filesystem. Your application should import one of these
// provider-specific subpackages and use its exported function(s) to create a
// *Bucket; do not use the NewBucket function in this package. For example:
//
// bucket, err := fileblob.OpenBucket("path/to/dir", nil)
// if err != nil {
// return fmt.Errorf("could not open bucket: %v", err)
// }
// buf, err := bucket.ReadAll(context.Background(), "myfile.txt")
// ...
//
// Then, write your application code using the *Bucket type. You can easily
// reconfigure your initialization code to choose a different provider.
// You can develop your application locally using fileblob, or deploy it to
// multiple Cloud providers. You may find http://github.com/google/wire useful
// for managing your initialization code.
//
// Alternatively, you can construct a *Bucket using blob.Open by providing
// a URL that's supported by a blob subpackage that you have linked
// in to your application.
//
//
// Errors
//
// The errors returned from this package can be inspected in several ways:
//
// The Code function from gocloud.dev/gcerrors will return an error code, also
// defined in that package, when invoked on an error.
//
// The Bucket.ErrorAs method can retrieve the driver error underlying the returned
// error.
//
//
// OpenCensus Integration
//
// OpenCensus supports tracing and metric collection for multiple languages and
// backend providers. See https://opencensus.io.
//
// This API collects OpenCensus traces and metrics for the following methods:
// - Attributes
// - Delete
// - NewRangeReader, from creation until the call to Close. (NewReader and ReadAll
// are included because they call NewRangeReader.)
// - NewWriter, from creation until the call to Close.
//
// To enable trace collection in your application, see "Configure Exporter" at
// https://opencensus.io/quickstart/go/tracing.
// To enable metric collection in your application, see "Exporting stats" at
// https://opencensus.io/quickstart/go/metrics.
package blob // import "gocloud.dev/blob"
import (
"bytes"
"context"
"crypto/md5"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"net/url"
"reflect"
"strings"
"sync"
"time"
"gocloud.dev/blob/driver"
"gocloud.dev/internal/gcerr"
"gocloud.dev/internal/oc"
)
// Reader reads bytes from a blob.
// It implements io.ReadCloser, and must be closed after
// reads are finished.
type Reader struct {
b driver.Bucket
r driver.Reader
end func(error) // called at Close to finish trace and metric collection
}
// Read implements io.Reader (https://golang.org/pkg/io/#Reader).
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.r.Read(p)
return n, wrapError(r.b, err)
}
// Close implements io.Closer (https://golang.org/pkg/io/#Closer).
func (r *Reader) Close() error {
err := wrapError(r.b, r.r.Close())
r.end(err)
return err
}
// ContentType returns the MIME type of the blob.
func (r *Reader) ContentType() string {
return r.r.Attributes().ContentType
}
// ModTime returns the time the blob was last modified.
func (r *Reader) ModTime() time.Time {
return r.r.Attributes().ModTime
}
// Size returns the size of the blob content in bytes.
func (r *Reader) Size() int64 {
return r.r.Attributes().Size
}
// As converts i to provider-specific types.
// See Bucket.As for more details.
func (r *Reader) As(i interface{}) bool {
return r.r.As(i)
}
// Attributes contains attributes about a blob.
type Attributes struct {
// CacheControl specifies caching attributes that providers may use
// when serving the blob.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
// ContentDisposition specifies whether the blob content is expected to be
// displayed inline or as an attachment.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
ContentDisposition string
// ContentEncoding specifies the encoding used for the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
ContentEncoding string
// ContentLanguage specifies the language used in the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
ContentLanguage string
// ContentType is the MIME type of the blob. It will not be empty.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string
// Metadata holds key/value pairs associated with the blob.
// Keys are guaranteed to be in lowercase, even if the backend provider
// has case-sensitive keys (although note that Metadata written via
// this package will always be lowercased). If there are duplicate
// case-insensitive keys (e.g., "foo" and "FOO"), only one value
// will be kept, and it is undefined which one.
Metadata map[string]string
// ModTime is the time the blob was last modified.
ModTime time.Time
// Size is the size of the blob's content in bytes.
Size int64
// MD5 is an MD5 hash of the blob contents or nil if not available.
MD5 []byte
asFunc func(interface{}) bool
}
// As converts i to provider-specific types.
// See Bucket.As for more details.
func (a *Attributes) As(i interface{}) bool {
if a.asFunc == nil {
return false
}
return a.asFunc(i)
}
// Writer writes bytes to a blob.
//
// It implements io.WriteCloser (https://golang.org/pkg/io/#Closer), and must be
// closed after all writes are done.
type Writer struct {
b driver.Bucket
w driver.Writer
end func(error) // called at Close to finish trace and metric collection
cancel func() // cancels the ctx provided to NewTypedWriter if contentMD5 verification fails
contentMD5 []byte
md5hash hash.Hash
// These fields exist only when w is not yet created.
//
// A ctx is stored in the Writer since we need to pass it into NewTypedWriter
// when we finish detecting the content type of the blob and create the
// underlying driver.Writer. This step happens inside Write or Close and
// neither of them take a context.Context as an argument. The ctx is set
// to nil after we have passed it to NewTypedWriter.
ctx context.Context
key string
opts *driver.WriterOptions
buf *bytes.Buffer
}
// sniffLen is the byte size of Writer.buf used to detect content-type.
const sniffLen = 512
// Write implements the io.Writer interface (https://golang.org/pkg/io/#Writer).
//
// Writes may happen asynchronously, so the returned error can be nil
// even if the actual write eventually fails. The write is only guaranteed to
// have succeeded if Close returns no error.
func (w *Writer) Write(p []byte) (n int, err error) {
if len(w.contentMD5) > 0 {
if _, err := w.md5hash.Write(p); err != nil {
return 0, err
}
}
if w.w != nil {
n, err := w.w.Write(p)
return n, wrapError(w.b, err)
}
// If w is not yet created due to no content-type being passed in, try to sniff
// the MIME type based on at most 512 bytes of the blob content of p.
// Detect the content-type directly if the first chunk is at least 512 bytes.
if w.buf.Len() == 0 && len(p) >= sniffLen {
return w.open(p)
}
// Store p in w.buf and detect the content-type when the size of content in
// w.buf is at least 512 bytes.
w.buf.Write(p)
if w.buf.Len() >= sniffLen {
return w.open(w.buf.Bytes())
}
return len(p), nil
}
// Close closes the blob writer. The write operation is not guaranteed to have succeeded until
// Close returns with no error.
// Close may return an error if the context provided to create the Writer is
// canceled or reaches its deadline.
func (w *Writer) Close() (err error) {
defer func() { w.end(err) }()
if len(w.contentMD5) > 0 {
// Verify the MD5 hash of what was written matches the ContentMD5 provided
// by the user.
md5sum := w.md5hash.Sum(nil)
if !bytes.Equal(md5sum, w.contentMD5) {
// No match! Return an error, but first cancel the context and call the
// driver's Close function to ensure the write is aborted.
w.cancel()
if w.w != nil {
_ = w.w.Close()
}
return fmt.Errorf("blob: the ContentMD5 you specified (%X) did not match what was written (%X)", w.contentMD5, md5sum)
}
}
defer w.cancel()
if w.w != nil {
return wrapError(w.b, w.w.Close())
}
if _, err := w.open(w.buf.Bytes()); err != nil {
return err
}
return wrapError(w.b, w.w.Close())
}
// open tries to detect the MIME type of p and write it to the blob.
// The error it returns is wrapped.
func (w *Writer) open(p []byte) (int, error) {
ct := http.DetectContentType(p)
var err error
if w.w, err = w.b.NewTypedWriter(w.ctx, w.key, ct, w.opts); err != nil {
return 0, wrapError(w.b, err)
}
w.buf = nil
w.ctx = nil
w.key = ""
w.opts = nil
n, err := w.w.Write(p)
return n, wrapError(w.b, err)
}
// ListOptions sets options for listing blobs via Bucket.List.
type ListOptions struct {
// Prefix indicates that only blobs with a key starting with this prefix
// should be returned.
Prefix string
// Delimiter sets the delimiter used to define a hierarchical namespace,
// like a filesystem with "directories".
//
// An empty delimiter means that the bucket is treated as a single flat
// namespace.
//
// A non-empty delimiter means that any result with the delimiter in its key
// after Prefix is stripped will be returned with ListObject.IsDir = true,
// ListObject.Key truncated after the delimiter, and zero values for other
// ListObject fields. These results represent "directories". Multiple results
// in a "directory" are returned as a single result.
Delimiter string
// BeforeList is a callback that will be called before each call to the
// the underlying provider's list functionality.
// asFunc converts its argument to provider-specific types.
// See Bucket.As for more details.
BeforeList func(asFunc func(interface{}) bool) error
}
// ListIterator iterates over List results.
type ListIterator struct {
b driver.Bucket
opts *driver.ListOptions
page *driver.ListPage
nextIdx int
}
// Next returns a *ListObject for the next blob. It returns (nil, io.EOF) if
// there are no more.
func (i *ListIterator) Next(ctx context.Context) (*ListObject, error) {
if i.page != nil {
// We've already got a page of results.
if i.nextIdx < len(i.page.Objects) {
// Next object is in the page; return it.
dobj := i.page.Objects[i.nextIdx]
i.nextIdx++
return &ListObject{
Key: dobj.Key,
ModTime: dobj.ModTime,
Size: dobj.Size,
MD5: dobj.MD5,
IsDir: dobj.IsDir,
asFunc: dobj.AsFunc,
}, nil
}
if len(i.page.NextPageToken) == 0 {
// Done with current page, and there are no more; return io.EOF.
return nil, io.EOF
}
// We need to load the next page.
i.opts.PageToken = i.page.NextPageToken
}
// Loading a new page.
p, err := i.b.ListPaged(ctx, i.opts)
if err != nil {
return nil, wrapError(i.b, err)
}
i.page = p
i.nextIdx = 0
return i.Next(ctx)
}
// ListObject represents a single blob returned from List.
type ListObject struct {
// Key is the key for this blob.
Key string
// ModTime is the time the blob was last modified.
ModTime time.Time
// Size is the size of the blob's content in bytes.
Size int64
// MD5 is an MD5 hash of the blob contents or nil if not available.
MD5 []byte
// IsDir indicates that this result represents a "directory" in the
// hierarchical namespace, ending in ListOptions.Delimiter. Key can be
// passed as ListOptions.Prefix to list items in the "directory".
// Fields other than Key and IsDir will not be set if IsDir is true.
IsDir bool
asFunc func(interface{}) bool
}
// As converts i to provider-specific types.
// See Bucket.As for more details.
func (o *ListObject) As(i interface{}) bool {
if o.asFunc == nil {
return false
}
return o.asFunc(i)
}
// Bucket provides an easy and portable way to interact with blobs
// within a "bucket", including read, write, and list operations.
// To create a Bucket, use constructors found in provider-specific
// subpackages.
type Bucket struct {
b driver.Bucket
tracer *oc.Tracer
}
const pkgName = "gocloud.dev/blob"
var (
latencyMeasure = oc.LatencyMeasure(pkgName)
// OpenCensusViews are predefined views for OpenCensus metrics.
// The views include counts and latency distributions for API method calls.
// See the example at https://godoc.org/go.opencensus.io/stats/view for usage.
OpenCensusViews = oc.Views(pkgName, latencyMeasure)
)
// NewBucket is intended for use by provider implementations.
var NewBucket = newBucket
// newBucket creates a new *Bucket based on a specific driver implementation.
// End users should use subpackages to construct a *Bucket instead of this
// function; see the package documentation for details.
func newBucket(b driver.Bucket) *Bucket {
return &Bucket{
b: b,
tracer: &oc.Tracer{
Package: pkgName,
Provider: oc.ProviderName(b),
LatencyMeasure: latencyMeasure,
},
}
}
// As converts i to provider-specific types.
//
// This function (and the other As functions in this package) are inherently
// provider-specific, and using them will make that part of your application
// non-portable, so use with care.
//
// See the documentation for the subpackage used to instantiate Bucket to see
// which type(s) are supported.
//
// Usage:
//
// 1. Declare a variable of the provider-specific type you want to access.
//
// 2. Pass a pointer to it to As.
//
// 3. If the type is supported, As will return true and copy the
// provider-specific type into your variable. Otherwise, it will return false.
//
// Provider-specific types that are intended to be mutable will be exposed
// as a pointer to the underlying type.
//
// See
// https://github.com/google/go-cloud/blob/master/internal/docs/design.md#as
// for more background.
func (b *Bucket) As(i interface{}) bool {
if i == nil {
return false
}
return b.b.As(i)
}
// ErrorAs converts i to provider-specific types.
// ErrorAs panics if i is nil or not a pointer.
// See Bucket.As for more details.
func (b *Bucket) ErrorAs(err error, i interface{}) bool {
if i == nil || reflect.TypeOf(i).Kind() != reflect.Ptr {
panic("blob: ErrorAs i must be a non-nil pointer")
}
if e, ok := err.(*gcerr.Error); ok {
return b.b.ErrorAs(e.Unwrap(), i)
}
return b.b.ErrorAs(err, i)
}
// ReadAll is a shortcut for creating a Reader via NewReader with nil
// ReaderOptions, and reading the entire blob.
func (b *Bucket) ReadAll(ctx context.Context, key string) (_ []byte, err error) {
r, err := b.NewReader(ctx, key, nil)
if err != nil {
return nil, err
}
defer r.Close()
return ioutil.ReadAll(r)
}
// List returns a ListIterator that can be used to iterate over blobs in a
// bucket, in lexicographical order of UTF-8 encoded keys. The underlying
// implementation fetches results in pages.
//
// A nil ListOptions is treated the same as the zero value.
//
// List is not guaranteed to include all recently-written blobs;
// some providers are only eventually consistent.
func (b *Bucket) List(opts *ListOptions) *ListIterator {
if opts == nil {
opts = &ListOptions{}
}
dopts := &driver.ListOptions{
Prefix: opts.Prefix,
Delimiter: opts.Delimiter,
BeforeList: opts.BeforeList,
}
return &ListIterator{b: b.b, opts: dopts}
}
// Attributes returns attributes for the blob stored at key.
//
// If the blob does not exist, Attributes returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
func (b *Bucket) Attributes(ctx context.Context, key string) (_ Attributes, err error) {
ctx = b.tracer.Start(ctx, "Attributes")
defer func() { b.tracer.End(ctx, err) }()
a, err := b.b.Attributes(ctx, key)
if err != nil {
return Attributes{}, wrapError(b.b, err)
}
var md map[string]string
if len(a.Metadata) > 0 {
// Providers are inconsistent, but at least some treat keys
// as case-insensitive. To make the behavior consistent, we
// force-lowercase them when writing and reading.
md = make(map[string]string, len(a.Metadata))
for k, v := range a.Metadata {
md[strings.ToLower(k)] = v
}
}
return Attributes{
CacheControl: a.CacheControl,
ContentDisposition: a.ContentDisposition,
ContentEncoding: a.ContentEncoding,
ContentLanguage: a.ContentLanguage,
ContentType: a.ContentType,
Metadata: md,
ModTime: a.ModTime,
Size: a.Size,
MD5: a.MD5,
asFunc: a.AsFunc,
}, nil
}
// NewReader is a shortcut for NewRangedReader with offset=0 and length=-1.
func (b *Bucket) NewReader(ctx context.Context, key string, opts *ReaderOptions) (*Reader, error) {
return b.NewRangeReader(ctx, key, 0, -1, opts)
}
// NewRangeReader returns a Reader to read content from the blob stored at key.
// It reads at most length bytes starting at offset (>= 0).
// If length is negative, it will read till the end of the blob.
//
// If the blob does not exist, NewRangeReader returns an error for which
// gcerrors.Code will return gcerrors.NotFound. Attributes is a lighter-weight way to
// check for existence.
//
// A nil ReaderOptions is treated the same as the zero value.
//
// The caller must call Close on the returned Reader when done reading.
func (b *Bucket) NewRangeReader(ctx context.Context, key string, offset, length int64, opts *ReaderOptions) (_ *Reader, err error) {
if offset < 0 {
return nil, errors.New("blob.NewRangeReader: offset must be non-negative")
}
if opts == nil {
opts = &ReaderOptions{}
}
dopts := &driver.ReaderOptions{}
tctx := b.tracer.Start(ctx, "NewRangeReader")
defer func() {
// If err == nil, we handed the end closure off to the returned *Writer; it
// will be called when the Writer is Closed.
if err != nil {
b.tracer.End(tctx, err)
}
}()
r, err := b.b.NewRangeReader(ctx, key, offset, length, dopts)
if err != nil {
return nil, wrapError(b.b, err)
}
end := func(err error) { b.tracer.End(tctx, err) }
return &Reader{b: b.b, r: r, end: end}, nil
}
// WriteAll is a shortcut for creating a Writer via NewWriter and writing p.
func (b *Bucket) WriteAll(ctx context.Context, key string, p []byte, opts *WriterOptions) (err error) {
w, err := b.NewWriter(ctx, key, opts)
if err != nil {
return err
}
if _, err := w.Write(p); err != nil {
_ = w.Close()
return err
}
return w.Close()
}
// NewWriter returns a Writer that writes to the blob stored at key.
// A nil WriterOptions is treated the same as the zero value.
//
// If a blob with this key already exists, it will be replaced.
// The blob being written is not guaranteed to be readable until Close
// has been called; until then, any previous blob will still be readable.
// Even after Close is called, newly written blobs are not guaranteed to be
// returned from List; some providers are only eventually consistent.
//
// The returned Writer will store ctx for later use in Write and/or Close.
// To abort a write, cancel ctx; otherwise, it must remain open until
// Close is called.
//
// The caller must call Close on the returned Writer, even if the write is
// aborted.
func (b *Bucket) NewWriter(ctx context.Context, key string, opts *WriterOptions) (_ *Writer, err error) {
var dopts *driver.WriterOptions
var w driver.Writer
if opts == nil {
opts = &WriterOptions{}
}
dopts = &driver.WriterOptions{
CacheControl: opts.CacheControl,
ContentDisposition: opts.ContentDisposition,
ContentEncoding: opts.ContentEncoding,
ContentLanguage: opts.ContentLanguage,
ContentMD5: opts.ContentMD5,
BufferSize: opts.BufferSize,
BeforeWrite: opts.BeforeWrite,
}
if len(opts.Metadata) > 0 {
// Providers are inconsistent, but at least some treat keys
// as case-insensitive. To make the behavior consistent, we
// force-lowercase them when writing and reading.
md := make(map[string]string, len(opts.Metadata))
for k, v := range opts.Metadata {
if k == "" {
return nil, errors.New("blob.NewWriter: WriterOptions.Metadata keys may not be empty strings")
}
lowerK := strings.ToLower(k)
if _, found := md[lowerK]; found {
return nil, fmt.Errorf("blob.NewWriter: duplicate case-insensitive metadata key %q", lowerK)
}
md[lowerK] = v
}
dopts.Metadata = md
}
ctx, cancel := context.WithCancel(ctx)
tctx := b.tracer.Start(ctx, "NewWriter")
end := func(err error) { b.tracer.End(tctx, err) }
defer func() {
if err != nil {
end(err)
}
}()
if opts.ContentType != "" {
t, p, err := mime.ParseMediaType(opts.ContentType)
if err != nil {
cancel()
return nil, err
}
ct := mime.FormatMediaType(t, p)
w, err = b.b.NewTypedWriter(ctx, key, ct, dopts)
if err != nil {
cancel()
return nil, wrapError(b.b, err)
}
return &Writer{
b: b.b,
w: w,
end: end,
cancel: cancel,
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
}, nil
}
return &Writer{
ctx: ctx,
cancel: cancel,
b: b.b,
end: end,
key: key,
opts: dopts,
buf: bytes.NewBuffer([]byte{}),
contentMD5: opts.ContentMD5,
md5hash: md5.New(),
}, nil
}
// Delete deletes the blob stored at key.
//
// If the blob does not exist, Delete returns an error for which
// gcerrors.Code will return gcerrors.NotFound.
func (b *Bucket) Delete(ctx context.Context, key string) (err error) {
ctx = b.tracer.Start(ctx, "Delete")
defer func() { b.tracer.End(ctx, err) }()
return wrapError(b.b, b.b.Delete(ctx, key))
}
// SignedURL returns a URL that can be used to GET the blob for the duration
// specified in opts.Expiry.
//
// A nil SignedURLOptions is treated the same as the zero value.
//
// It is valid to call SignedURL for a key that does not exist.
//
// If the provider implementation does not support this functionality, SignedURL
// will return an error for which gcerrors.Code will return gcerrors.Unimplemented.
func (b *Bucket) SignedURL(ctx context.Context, key string, opts *SignedURLOptions) (string, error) {
if opts == nil {
opts = &SignedURLOptions{}
}
if opts.Expiry < 0 {
return "", errors.New("blob.SignedURL: SignedURLOptions.Expiry must be >= 0")
}
if opts.Expiry == 0 {
opts.Expiry = DefaultSignedURLExpiry
}
dopts := driver.SignedURLOptions{
Expiry: opts.Expiry,
}
url, err := b.b.SignedURL(ctx, key, &dopts)
return url, wrapError(b.b, err)
}
// DefaultSignedURLExpiry is the default duration for SignedURLOptions.Expiry.
const DefaultSignedURLExpiry = 1 * time.Hour
// SignedURLOptions sets options for SignedURL.
type SignedURLOptions struct {
// Expiry sets how long the returned URL is valid for.
// Defaults to DefaultSignedURLExpiry.
Expiry time.Duration
}
// ReaderOptions sets options for NewReader and NewRangedReader.
// It is provided for future extensibility.
type ReaderOptions struct{}
// WriterOptions sets options for NewWriter.
type WriterOptions struct {
// BufferSize changes the default size in bytes of the chunks that
// Writer will upload in a single request; larger blobs will be split into
// multiple requests.
//
// This option may be ignored by some provider implementations.
//
// If 0, the provider implementation will choose a reasonable default.
//
// If the Writer is used to do many small writes concurrently, using a
// smaller BufferSize may reduce memory usage.
BufferSize int
// CacheControl specifies caching attributes that providers may use
// when serving the blob.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Cache-Control
CacheControl string
// ContentDisposition specifies whether the blob content is expected to be
// displayed inline or as an attachment.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition
ContentDisposition string
// ContentEncoding specifies the encoding used for the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
ContentEncoding string
// ContentLanguage specifies the language used in the blob's content, if any.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Language
ContentLanguage string
// ContentType specifies the MIME type of the blob being written. If not set,
// it will be inferred from the content using the algorithm described at
// http://mimesniff.spec.whatwg.org/.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Type
ContentType string
// ContentMD5 is used as a message integrity check.
// If len(ContentMD5) > 0, the MD5 hash of the bytes written must match
// ContentMD5, or Close will return an error without completing the write.
// https://tools.ietf.org/html/rfc1864
ContentMD5 []byte
// Metadata holds key/value strings to be associated with the blob, or nil.
// Keys may not be empty, and are lowercased before being written.
// Duplicate case-insensitive keys (e.g., "foo" and "FOO") will result in
// an error.
Metadata map[string]string
// BeforeWrite is a callback that will be called exactly once, before
// any data is written (unless NewWriter returns an error, in which case
// it will not be called at all). Note that this is not necessarily during
// or after the first Write call, as providers may buffer bytes before
// sending an upload request.
//
// asFunc converts its argument to provider-specific types.
// See Bucket.As for more details.
BeforeWrite func(asFunc func(interface{}) bool) error
}
// FromURLFunc is intended for use by provider implementations.
// It allows providers to convert a parsed URL from Open to a driver.Bucket.
type FromURLFunc func(context.Context, *url.URL) (driver.Bucket, error)
var (
// registry maps scheme strings to provider-specific instantiation functions.
registry = map[string]FromURLFunc{}
// registryMu protected registry.
registryMu sync.Mutex
)
// Register is for use by provider implementations. It allows providers to
// register an instantiation function for URLs with the given scheme. It is
// expected to be called from the provider implementation's package init
// function.
//
// fn will be called from Open, with a bucket name and options parsed from
// the URL. All option keys will be lowercased.
//
// Register panics if a provider has already registered for scheme.
func Register(scheme string, fn FromURLFunc) {
registryMu.Lock()
defer registryMu.Unlock()
if _, found := registry[scheme]; found {
log.Fatalf("a provider has already registered for scheme %q", scheme)
}
registry[scheme] = fn
}
// fromRegistry looks up the registered function for scheme.
// It returns nil if scheme has not been registered for.
func fromRegistry(scheme string) FromURLFunc {
registryMu.Lock()
defer registryMu.Unlock()
return registry[scheme]
}
// Open creates a *Bucket from a URL.
// See the package documentation in provider-specific subpackages for more
// details on supported scheme(s) and URL parameter(s).
func Open(ctx context.Context, urlstr string) (*Bucket, error) {
u, err := url.Parse(urlstr)
if err != nil {
return nil, err
}
if u.Scheme == "" {
return nil, fmt.Errorf("invalid URL %q, missing scheme", urlstr)
}
fn := fromRegistry(u.Scheme)
if fn == nil {
return nil, fmt.Errorf("no provider registered for scheme %q", u.Scheme)
}
drv, err := fn(ctx, u)
if err != nil {
return nil, err
}
return NewBucket(drv), nil
}
func wrapError(b driver.Bucket, err error) error {
if err == nil {
return nil
}
if gcerr.DoNotWrap(err) {
return err
}
return gcerr.New(b.ErrorCode(err), err, 2, "blob")
}
| 1 | 14,205 | Not part of this PR, but should the section above say what the name of the metric is for each method? | google-go-cloud | go |
@@ -75,13 +75,13 @@ namespace OpenTelemetry.Exporter
case MetricType.LongGauge:
{
- // TODOs
+ valueDisplay = (metric as IGaugeMetric).LastValue.Value.ToString();
break;
}
case MetricType.DoubleGauge:
{
- // TODOs
+ valueDisplay = (metric as IGaugeMetric).LastValue.Value.ToString();
break;
}
| 1 | // <copyright file="ConsoleMetricExporter.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Globalization;
using System.Linq;
using System.Text;
using OpenTelemetry.Metrics;
using OpenTelemetry.Resources;
namespace OpenTelemetry.Exporter
{
public class ConsoleMetricExporter : ConsoleExporter<MetricItem>
{
private Resource resource;
public ConsoleMetricExporter(ConsoleExporterOptions options)
: base(options)
{
}
public override ExportResult Export(in Batch<MetricItem> batch)
{
if (this.resource == null)
{
this.resource = this.ParentProvider.GetResource();
if (this.resource != Resource.Empty)
{
foreach (var resourceAttribute in this.resource.Attributes)
{
if (resourceAttribute.Key.Equals("service.name"))
{
Console.WriteLine("Service.Name" + resourceAttribute.Value);
}
}
}
}
foreach (var metricItem in batch)
{
foreach (var metric in metricItem.Metrics)
{
var tags = metric.Attributes.ToArray().Select(k => $"{k.Key}={k.Value?.ToString()}");
string valueDisplay = string.Empty;
// Switch would be faster than the if.else ladder
// of try and cast.
switch (metric.MetricType)
{
case MetricType.LongSum:
{
valueDisplay = (metric as ISumMetricLong).LongSum.ToString(CultureInfo.InvariantCulture);
break;
}
case MetricType.DoubleSum:
{
valueDisplay = (metric as ISumMetricDouble).DoubleSum.ToString(CultureInfo.InvariantCulture);
break;
}
case MetricType.LongGauge:
{
// TODOs
break;
}
case MetricType.DoubleGauge:
{
// TODOs
break;
}
case MetricType.Histogram:
{
var histogramMetric = metric as IHistogramMetric;
var bucketsBuilder = new StringBuilder();
bucketsBuilder.Append($"Sum: {histogramMetric.PopulationSum} Count: {histogramMetric.PopulationCount} \n");
foreach (var bucket in histogramMetric.Buckets)
{
bucketsBuilder.Append($"({bucket.LowBoundary} - {bucket.HighBoundary}) : {bucket.Count}");
bucketsBuilder.AppendLine();
}
valueDisplay = bucketsBuilder.ToString();
break;
}
case MetricType.Summary:
{
var summaryMetric = metric as ISummaryMetric;
valueDisplay = string.Format("Sum: {0} Count: {1}", summaryMetric.PopulationSum, summaryMetric.PopulationCount);
break;
}
}
string time = $"{metric.StartTimeExclusive.ToLocalTime().ToString("HH:mm:ss.fff")} {metric.EndTimeInclusive.ToLocalTime().ToString("HH:mm:ss.fff")}";
var msg = new StringBuilder($"Export {time} {metric.Name} [{string.Join(";", tags)}] {metric.MetricType}");
if (!string.IsNullOrEmpty(metric.Description))
{
msg.Append($", Description: {metric.Description}");
}
if (!string.IsNullOrEmpty(metric.Unit))
{
msg.Append($", Unit: {metric.Unit}");
}
if (!string.IsNullOrEmpty(metric.Meter.Name))
{
msg.Append($", Meter: {metric.Meter.Name}");
if (!string.IsNullOrEmpty(metric.Meter.Version))
{
msg.Append($"/{metric.Meter.Version}");
}
}
msg.AppendLine();
msg.Append($"Value: {valueDisplay}");
Console.WriteLine(msg);
}
}
return ExportResult.Success;
}
}
}
| 1 | 21,014 | there are changes that are upcoming to this section. For now, this works (to demo the usage of Gauge), and the changes to this section are coming as separate PRs. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -95,7 +95,7 @@ func (t *Terminal) run(ctx context.Context) {
for {
select {
case <-ctx.Done():
- if IsProcessBackground() {
+ if IsProcessBackground(t.fd) {
// ignore all messages, do nothing, we are in the background process group
continue
} | 1 | package termstatus
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"os"
"strings"
"golang.org/x/crypto/ssh/terminal"
)
// Terminal is used to write messages and display status lines which can be
// updated. When the output is redirected to a file, the status lines are not
// printed.
type Terminal struct {
wr *bufio.Writer
fd uintptr
errWriter io.Writer
buf *bytes.Buffer
msg chan message
status chan status
canUpdateStatus bool
// will be closed when the goroutine which runs Run() terminates, so it'll
// yield a default value immediately
closed chan struct{}
clearCurrentLine func(io.Writer, uintptr)
moveCursorUp func(io.Writer, uintptr, int)
}
type message struct {
line string
err bool
}
type status struct {
lines []string
}
type fder interface {
Fd() uintptr
}
// New returns a new Terminal for wr. A goroutine is started to update the
// terminal. It is terminated when ctx is cancelled. When wr is redirected to
// a file (e.g. via shell output redirection) or is just an io.Writer (not the
// open *os.File for stdout), no status lines are printed. The status lines and
// normal output (via Print/Printf) are written to wr, error messages are
// written to errWriter. If disableStatus is set to true, no status messages
// are printed even if the terminal supports it.
func New(wr io.Writer, errWriter io.Writer, disableStatus bool) *Terminal {
t := &Terminal{
wr: bufio.NewWriter(wr),
errWriter: errWriter,
buf: bytes.NewBuffer(nil),
msg: make(chan message),
status: make(chan status),
closed: make(chan struct{}),
}
if disableStatus {
return t
}
if d, ok := wr.(fder); ok && canUpdateStatus(d.Fd()) {
// only use the fancy status code when we're running on a real terminal.
t.canUpdateStatus = true
t.fd = d.Fd()
t.clearCurrentLine = clearCurrentLine(wr, t.fd)
t.moveCursorUp = moveCursorUp(wr, t.fd)
}
return t
}
// Run updates the screen. It should be run in a separate goroutine. When
// ctx is cancelled, the status lines are cleanly removed.
func (t *Terminal) Run(ctx context.Context) {
defer close(t.closed)
if t.canUpdateStatus {
t.run(ctx)
return
}
t.runWithoutStatus(ctx)
}
// run listens on the channels and updates the terminal screen.
func (t *Terminal) run(ctx context.Context) {
var status []string
for {
select {
case <-ctx.Done():
if IsProcessBackground() {
// ignore all messages, do nothing, we are in the background process group
continue
}
t.undoStatus(len(status))
return
case msg := <-t.msg:
if IsProcessBackground() {
// ignore all messages, do nothing, we are in the background process group
continue
}
t.clearCurrentLine(t.wr, t.fd)
var dst io.Writer
if msg.err {
dst = t.errWriter
// assume t.wr and t.errWriter are different, so we need to
// flush clearing the current line
err := t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
} else {
dst = t.wr
}
if _, err := io.WriteString(dst, msg.line); err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
continue
}
t.writeStatus(status)
if err := t.wr.Flush(); err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
case stat := <-t.status:
if IsProcessBackground() {
// ignore all messages, do nothing, we are in the background process group
continue
}
status = status[:0]
status = append(status, stat.lines...)
t.writeStatus(status)
}
}
}
func (t *Terminal) writeStatus(status []string) {
for _, line := range status {
t.clearCurrentLine(t.wr, t.fd)
_, err := t.wr.WriteString(line)
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
}
// flush is needed so that the current line is updated
err = t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
}
if len(status) > 0 {
t.moveCursorUp(t.wr, t.fd, len(status)-1)
}
err := t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
}
// runWithoutStatus listens on the channels and just prints out the messages,
// without status lines.
func (t *Terminal) runWithoutStatus(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case msg := <-t.msg:
var flush func() error
var dst io.Writer
if msg.err {
dst = t.errWriter
} else {
dst = t.wr
flush = t.wr.Flush
}
if _, err := io.WriteString(dst, msg.line); err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
}
if flush == nil {
continue
}
if err := flush(); err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
case <-t.status:
// discard status lines
}
}
}
func (t *Terminal) undoStatus(lines int) {
for i := 0; i < lines; i++ {
t.clearCurrentLine(t.wr, t.fd)
_, err := t.wr.WriteRune('\n')
if err != nil {
fmt.Fprintf(os.Stderr, "write failed: %v\n", err)
}
// flush is needed so that the current line is updated
err = t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
}
t.moveCursorUp(t.wr, t.fd, lines)
err := t.wr.Flush()
if err != nil {
fmt.Fprintf(os.Stderr, "flush failed: %v\n", err)
}
}
// Print writes a line to the terminal.
func (t *Terminal) Print(line string) {
// make sure the line ends with a line break
if line[len(line)-1] != '\n' {
line += "\n"
}
select {
case t.msg <- message{line: line}:
case <-t.closed:
}
}
// Printf uses fmt.Sprintf to write a line to the terminal.
func (t *Terminal) Printf(msg string, args ...interface{}) {
s := fmt.Sprintf(msg, args...)
t.Print(s)
}
// Error writes an error to the terminal.
func (t *Terminal) Error(line string) {
// make sure the line ends with a line break
if line[len(line)-1] != '\n' {
line += "\n"
}
select {
case t.msg <- message{line: line, err: true}:
case <-t.closed:
}
}
// Errorf uses fmt.Sprintf to write an error line to the terminal.
func (t *Terminal) Errorf(msg string, args ...interface{}) {
s := fmt.Sprintf(msg, args...)
t.Error(s)
}
// truncate returns a string that has at most maxlen characters. If maxlen is
// negative, the empty string is returned.
func truncate(s string, maxlen int) string {
if maxlen < 0 {
return ""
}
if len(s) < maxlen {
return s
}
return s[:maxlen]
}
// SetStatus updates the status lines.
func (t *Terminal) SetStatus(lines []string) {
if len(lines) == 0 {
return
}
width, _, err := terminal.GetSize(int(t.fd))
if err != nil || width <= 0 {
// use 80 columns by default
width = 80
}
// make sure that all lines have a line break and are not too long
for i, line := range lines {
line = strings.TrimRight(line, "\n")
line = truncate(line, width-2) + "\n"
lines[i] = line
}
// make sure the last line does not have a line break
last := len(lines) - 1
lines[last] = strings.TrimRight(lines[last], "\n")
select {
case t.status <- status{lines: lines}:
case <-t.closed:
}
}
| 1 | 14,270 | This is equivalent to stdout. Why not just replace stdin with stdout in `IsProcessBackground`? | restic-restic | go |
@@ -61,7 +61,10 @@ void DataMan::add_stream(json p_jmsg)
man->init(p_jmsg);
this->add_next(method, man);
}
- add_man_to_path("zfp", method);
+ if (p_jmsg["compress_method"] != nullptr)
+ {
+ add_man_to_path(p_jmsg["compress_method"], method);
+ }
}
void DataMan::flush() { flush_next(); } | 1 | /*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* DataMan.cpp
*
* Created on: Apr 12, 2017
* Author: Jason Wang
*/
#include "DataMan.h"
int DataMan::init(json p_jmsg) { return 0; }
int DataMan::put(const void *p_data, std::string p_doid, std::string p_var,
std::string p_dtype, std::vector<size_t> p_putshape,
std::vector<size_t> p_varshape, std::vector<size_t> p_offset,
size_t p_timestep, int p_tolerance, int p_priority)
{
return DataMan::put(p_data, p_doid, p_var, p_dtype, p_putshape, p_varshape,
p_offset, p_timestep, p_tolerance, p_priority);
}
int DataMan::put(const void *p_data, json p_jmsg)
{
put_begin(p_data, p_jmsg);
put_end(p_data, p_jmsg);
return 0;
}
void DataMan::add_file(std::string p_method) {}
void DataMan::add_stream(json p_jmsg)
{
std::string method;
if (p_jmsg["method"] != nullptr)
method = p_jmsg["method"];
logging("Streaming method " + method + " added");
if (m_tolerance.size() < m_num_channels)
{
for (int i = 0; i < m_num_channels; i++)
{
m_tolerance.push_back(0);
}
}
if (m_priority.size() < m_num_channels)
{
for (int i = 0; i < m_num_channels; i++)
{
m_priority.push_back(100 / (i + 1));
}
}
auto man = get_man(method);
if (man)
{
man->init(p_jmsg);
this->add_next(method, man);
}
add_man_to_path("zfp", method);
}
void DataMan::flush() { flush_next(); }
int DataMan::get(void *p_data, json &p_jmsg) { return 0; }
| 1 | 11,411 | Rather than test for nullptr, just treat the pointer as a bool, i.e. `if(p_jmsg["compress_method"])` | ornladios-ADIOS2 | cpp |
@@ -146,6 +146,17 @@ public class LibraryFeaturePanel extends FeaturePanel<LibraryFeaturePanel, Libra
// get container
// TODO: smarter way using container manager
final String executablePath = shortcutCreationDTO.getExecutable().getAbsolutePath();
+ if (!executablePath.startsWith(getContainersPath())) {
+ Platform.runLater(() -> {
+ final ErrorDialog errorDialog = ErrorDialog.builder()
+ .withMessage(tr("Creating shortcut to executable outside of a container is not supported"))
+ .withOwner(getScene().getWindow())
+ .build();
+
+ errorDialog.showAndWait();
+ });
+ return;
+ }
final String pathInContainers = executablePath.replace(getContainersPath(), "");
final String[] split = pathInContainers.split("/");
final String engineContainer = split[0]; | 1 | package org.phoenicis.javafx.components.library.control;
import com.fasterxml.jackson.databind.ObjectMapper;
import javafx.application.Platform;
import javafx.beans.property.ObjectProperty;
import javafx.beans.property.SimpleObjectProperty;
import javafx.beans.property.SimpleStringProperty;
import javafx.beans.property.StringProperty;
import javafx.collections.FXCollections;
import javafx.collections.ObservableList;
import javafx.scene.control.Tab;
import org.apache.commons.lang.StringUtils;
import org.graalvm.polyglot.Value;
import org.phoenicis.javafx.components.common.panelstates.None;
import org.phoenicis.javafx.components.common.panelstates.OpenDetailsPanel;
import org.phoenicis.javafx.components.common.control.FeaturePanel;
import org.phoenicis.javafx.components.library.skin.LibraryFeaturePanelSkin;
import org.phoenicis.javafx.controller.library.console.ConsoleController;
import org.phoenicis.javafx.dialogs.ErrorDialog;
import org.phoenicis.javafx.dialogs.SimpleConfirmDialog;
import org.phoenicis.javafx.settings.JavaFxSettingsManager;
import org.phoenicis.javafx.views.mainwindow.console.ConsoleTab;
import org.phoenicis.javafx.views.mainwindow.library.LibraryFilter;
import org.phoenicis.library.ShortcutManager;
import org.phoenicis.library.ShortcutRunner;
import org.phoenicis.library.dto.ShortcutCategoryDTO;
import org.phoenicis.library.dto.ShortcutCreationDTO;
import org.phoenicis.library.dto.ShortcutDTO;
import org.phoenicis.scripts.session.InteractiveScriptSession;
import org.phoenicis.scripts.interpreter.ScriptInterpreter;
import java.util.Collections;
import static org.phoenicis.configuration.localisation.Localisation.tr;
/**
* The component shown inside the Phoenicis "Library" tab
*/
public class LibraryFeaturePanel extends FeaturePanel<LibraryFeaturePanel, LibraryFeaturePanelSkin> {
/**
* The name of the application
*/
private final StringProperty applicationName;
/**
* The path leading to the containers
*/
private final StringProperty containersPath;
/**
* The library filter
*/
private final ObjectProperty<LibraryFilter> filter;
/**
* The JavaFX settings manager
*/
private final ObjectProperty<JavaFxSettingsManager> javaFxSettingsManager;
/**
* The script interpreter
*/
private final ObjectProperty<ScriptInterpreter> scriptInterpreter;
/**
* The object mapper
*/
private final ObjectProperty<ObjectMapper> objectMapper;
/**
* A list of all known shortcut categories
*/
private final ObservableList<ShortcutCategoryDTO> categories;
/**
* A list of all opened tabs
*/
private final ObservableList<Tab> tabs;
/**
* The currently open tab
*/
private final ObjectProperty<Tab> selectedTab;
/**
* The console controller
*/
private final ObjectProperty<ConsoleController> consoleController;
/**
* The shortcut runner
*/
private final ObjectProperty<ShortcutRunner> shortcutRunner;
/**
* The shortcut manager
*/
private final ObjectProperty<ShortcutManager> shortcutManager;
/**
* The currently selected shortcut
*/
private final ObjectProperty<ShortcutDTO> selectedShortcut;
/**
* The currently opened details panel
*/
private final ObjectProperty<OpenDetailsPanel> openedDetailsPanel;
/**
* Constructor
*/
public LibraryFeaturePanel() {
super();
this.applicationName = new SimpleStringProperty();
this.containersPath = new SimpleStringProperty();
this.filter = new SimpleObjectProperty<>();
this.javaFxSettingsManager = new SimpleObjectProperty<>();
this.scriptInterpreter = new SimpleObjectProperty<>();
this.objectMapper = new SimpleObjectProperty<>();
this.categories = FXCollections.observableArrayList();
this.tabs = FXCollections.observableArrayList();
this.consoleController = new SimpleObjectProperty<>();
this.shortcutRunner = new SimpleObjectProperty<>();
this.shortcutManager = new SimpleObjectProperty<>();
this.selectedTab = new SimpleObjectProperty<>();
this.selectedShortcut = new SimpleObjectProperty<>();
this.openedDetailsPanel = new SimpleObjectProperty<>(new None());
}
/**
* {@inheritDoc}
*/
@Override
public LibraryFeaturePanelSkin createSkin() {
return new LibraryFeaturePanelSkin(this);
}
/**
* Creates a new shortcut
*
* @param shortcutCreationDTO DTO describing the new shortcut
*/
public void createShortcut(ShortcutCreationDTO shortcutCreationDTO) {
// get container
// TODO: smarter way using container manager
final String executablePath = shortcutCreationDTO.getExecutable().getAbsolutePath();
final String pathInContainers = executablePath.replace(getContainersPath(), "");
final String[] split = pathInContainers.split("/");
final String engineContainer = split[0];
final String engine = StringUtils.capitalize(engineContainer).replace("prefix", "");
// TODO: better way to get engine ID
final String engineId = engine.toLowerCase();
final String container = split[1];
final InteractiveScriptSession interactiveScriptSession = getScriptInterpreter().createInteractiveSession();
final String scriptInclude = "const Shortcut = include(\"engines." + engineId + ".shortcuts." + engineId
+ "\");";
interactiveScriptSession.eval(scriptInclude,
ignored -> interactiveScriptSession.eval("new Shortcut()",
output -> {
final Value shortcutObject = (Value) output;
shortcutObject.invokeMember("name", shortcutCreationDTO.getName());
shortcutObject.invokeMember("category", shortcutCreationDTO.getCategory());
shortcutObject.invokeMember("description", shortcutCreationDTO.getDescription());
shortcutObject.invokeMember("miniature", shortcutCreationDTO.getMiniature());
shortcutObject.invokeMember("search", shortcutCreationDTO.getExecutable().getName());
shortcutObject.invokeMember("prefix", container);
shortcutObject.invokeMember("create");
},
e -> Platform.runLater(() -> {
final ErrorDialog errorDialog = ErrorDialog.builder()
.withMessage(tr("Error while creating shortcut"))
.withException(e)
.withOwner(getScene().getWindow())
.build();
errorDialog.showAndWait();
})),
e -> Platform.runLater(() -> {
final ErrorDialog errorDialog = ErrorDialog.builder()
.withMessage(tr("Error while creating shortcut"))
.withException(e)
.withOwner(getScene().getWindow())
.build();
errorDialog.showAndWait();
}));
}
/**
* Executes/runs a given shortcut
*
* @param shortcut The shortcut to be executed
*/
public void runShortcut(ShortcutDTO shortcut) {
getShortcutRunner().run(shortcut, Collections.emptyList(), e -> Platform.runLater(() -> {
final ErrorDialog errorDialog = ErrorDialog.builder()
.withMessage(tr("Error"))
.withException(e)
.withOwner(getScene().getWindow())
.build();
errorDialog.showAndWait();
}));
}
/**
* Stops the application referenced by a given shortcut
*
* @param shortcut The shortcut of the application to be stopped
*/
public void stopShortcut(ShortcutDTO shortcut) {
getShortcutRunner().stop(shortcut, e -> Platform.runLater(() -> {
final ErrorDialog errorDialog = ErrorDialog.builder()
.withMessage(tr("Error"))
.withException(e)
.withOwner(getScene().getWindow())
.build();
errorDialog.showAndWait();
}));
}
/**
* Removes a given shortcut
*
* @param shortcut The shortcut to be removed
*/
public void uninstallShortcut(ShortcutDTO shortcut) {
final String shortcutName = shortcut.getInfo().getName();
final SimpleConfirmDialog confirmMessage = SimpleConfirmDialog.builder()
.withTitle(tr("Uninstall {0}", shortcutName))
.withMessage(tr("Are you sure you want to uninstall {0}?", shortcutName))
.withOwner(getScene().getWindow())
.withResizable(true)
.withYesCallback(() -> getShortcutManager().uninstallFromShortcut(shortcut, e -> {
final ErrorDialog errorDialog = ErrorDialog.builder()
.withMessage(tr("Error while uninstalling {0}", shortcutName))
.withException(e)
.withOwner(getScene().getWindow())
.build();
errorDialog.showAndWait();
}))
.build();
confirmMessage.showAndCallback();
}
/**
* Opens a new console tab
*/
public void openConsole() {
final ConsoleTab console = getConsoleController().createConsole();
getTabs().add(console);
setSelectedTab(console);
}
public String getApplicationName() {
return this.applicationName.get();
}
public StringProperty applicationNameProperty() {
return this.applicationName;
}
public void setApplicationName(String applicationName) {
this.applicationName.set(applicationName);
}
public String getContainersPath() {
return this.containersPath.get();
}
public StringProperty containersPathProperty() {
return this.containersPath;
}
public void setContainersPath(String containersPath) {
this.containersPath.set(containersPath);
}
public LibraryFilter getFilter() {
return this.filter.get();
}
public ObjectProperty<LibraryFilter> filterProperty() {
return this.filter;
}
public void setFilter(LibraryFilter filter) {
this.filter.set(filter);
}
public JavaFxSettingsManager getJavaFxSettingsManager() {
return this.javaFxSettingsManager.get();
}
public ObjectProperty<JavaFxSettingsManager> javaFxSettingsManagerProperty() {
return this.javaFxSettingsManager;
}
public void setJavaFxSettingsManager(JavaFxSettingsManager javaFxSettingsManager) {
this.javaFxSettingsManager.set(javaFxSettingsManager);
}
public ScriptInterpreter getScriptInterpreter() {
return this.scriptInterpreter.get();
}
public ObjectProperty<ScriptInterpreter> scriptInterpreterProperty() {
return this.scriptInterpreter;
}
public void setScriptInterpreter(ScriptInterpreter scriptInterpreter) {
this.scriptInterpreter.set(scriptInterpreter);
}
public ObjectMapper getObjectMapper() {
return this.objectMapper.get();
}
public ObjectProperty<ObjectMapper> objectMapperProperty() {
return this.objectMapper;
}
public void setObjectMapper(ObjectMapper objectMapper) {
this.objectMapper.set(objectMapper);
}
public ObservableList<ShortcutCategoryDTO> getCategories() {
return this.categories;
}
public ObservableList<Tab> getTabs() {
return this.tabs;
}
public Tab getSelectedTab() {
return this.selectedTab.get();
}
public ObjectProperty<Tab> selectedTabProperty() {
return this.selectedTab;
}
public void setSelectedTab(Tab selectedTab) {
this.selectedTab.set(selectedTab);
}
public ConsoleController getConsoleController() {
return this.consoleController.get();
}
public ObjectProperty<ConsoleController> consoleControllerProperty() {
return this.consoleController;
}
public void setConsoleController(ConsoleController consoleController) {
this.consoleController.set(consoleController);
}
public ShortcutRunner getShortcutRunner() {
return this.shortcutRunner.get();
}
public ObjectProperty<ShortcutRunner> shortcutRunnerProperty() {
return this.shortcutRunner;
}
public void setShortcutRunner(ShortcutRunner shortcutRunner) {
this.shortcutRunner.set(shortcutRunner);
}
public ShortcutManager getShortcutManager() {
return this.shortcutManager.get();
}
public ObjectProperty<ShortcutManager> shortcutManagerProperty() {
return this.shortcutManager;
}
public void setShortcutManager(ShortcutManager shortcutManager) {
this.shortcutManager.set(shortcutManager);
}
public ShortcutDTO getSelectedShortcut() {
return this.selectedShortcut.get();
}
public ObjectProperty<ShortcutDTO> selectedShortcutProperty() {
return this.selectedShortcut;
}
public void setSelectedShortcut(ShortcutDTO selectedShortcut) {
this.selectedShortcut.set(selectedShortcut);
}
public OpenDetailsPanel getOpenedDetailsPanel() {
return this.openedDetailsPanel.get();
}
public ObjectProperty<OpenDetailsPanel> openedDetailsPanelProperty() {
return this.openedDetailsPanel;
}
public void setOpenedDetailsPanel(OpenDetailsPanel openDetailsPanel) {
this.openedDetailsPanel.set(openDetailsPanel);
}
/**
* {@inheritDoc}
*/
@Override
public void closeDetailsPanel() {
// deselect the currently selected shortcut
setSelectedShortcut(null);
// close the details panel
setOpenedDetailsPanel(new None());
}
}
| 1 | 14,525 | Can we move the remaining code of this method to a new method? I don't like the `return;` here, we could try to replace it with an `if ... else ...` syntax. What do you think? | PhoenicisOrg-phoenicis | java |
@@ -0,0 +1,14 @@
+from kinto.core.events import ResourceChanged
+
+from .listener import on_resource_changed
+
+
+def includeme(config):
+ config.add_api_capability('quotas',
+ description='Quotas Management on Buckets.',
+ url='https://kinto.readthedocs.io')
+
+ # Listen to every resources (except history)
+ config.add_subscriber(on_resource_changed, ResourceChanged,
+ for_resources=('bucket', 'group',
+ 'collection', 'record')) | 1 | 1 | 9,625 | nitpick: _and collections_ | Kinto-kinto | py |
|
@@ -42,6 +42,8 @@ struct rp_generator_t {
int is_websocket_handshake;
int had_body_error; /* set if an error happened while fetching the body so that we can propagate the error */
h2o_timer_t send_headers_timeout;
+ unsigned req_done : 1;
+ unsigned res_done : 1;
};
struct rp_ws_upgrade_info_t { | 1 | /*
* Copyright (c) 2014,2015 DeNA Co., Ltd., Kazuho Oku, Masahiro Nagano
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include "picohttpparser.h"
#include "h2o.h"
#include "h2o/http1.h"
#include "h2o/httpclient.h"
#include "h2o/tunnel.h"
struct rp_generator_t {
h2o_generator_t super;
h2o_req_t *src_req;
h2o_httpclient_t *client;
struct {
h2o_iovec_t bufs[2]; /* first buf is the request line and headers, the second is the POST content */
int is_head;
} up_req;
h2o_buffer_t *last_content_before_send;
h2o_doublebuffer_t sending;
int is_websocket_handshake;
int had_body_error; /* set if an error happened while fetching the body so that we can propagate the error */
h2o_timer_t send_headers_timeout;
};
struct rp_ws_upgrade_info_t {
h2o_context_t *ctx;
uint64_t timeout;
h2o_socket_t *upstream_sock;
};
static h2o_httpclient_ctx_t *get_client_ctx(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
if (overrides != NULL && overrides->client_ctx != NULL)
return overrides->client_ctx;
return &req->conn->ctx->proxy.client_ctx;
}
static h2o_iovec_t rewrite_location(h2o_mem_pool_t *pool, const char *location, size_t location_len, h2o_url_t *match,
const h2o_url_scheme_t *req_scheme, h2o_iovec_t req_authority, h2o_iovec_t req_basepath)
{
h2o_url_t loc_parsed;
if (h2o_url_parse(location, location_len, &loc_parsed) != 0)
goto NoRewrite;
if (loc_parsed.scheme != &H2O_URL_SCHEME_HTTP)
goto NoRewrite;
if (!h2o_url_hosts_are_equal(&loc_parsed, match))
goto NoRewrite;
if (h2o_url_get_port(&loc_parsed) != h2o_url_get_port(match))
goto NoRewrite;
if (loc_parsed.path.len < match->path.len)
goto NoRewrite;
if (memcmp(loc_parsed.path.base, match->path.base, match->path.len) != 0)
goto NoRewrite;
return h2o_concat(pool, req_scheme->name, h2o_iovec_init(H2O_STRLIT("://")), req_authority, req_basepath,
h2o_iovec_init(loc_parsed.path.base + match->path.len, loc_parsed.path.len - match->path.len));
NoRewrite:
return (h2o_iovec_t){NULL};
}
static h2o_iovec_t build_request_merge_headers(h2o_mem_pool_t *pool, h2o_iovec_t merged, h2o_iovec_t added, int seperator)
{
if (added.len == 0)
return merged;
if (merged.len == 0)
return added;
size_t newlen = merged.len + 2 + added.len;
char *buf = h2o_mem_alloc_pool(pool, *buf, newlen);
memcpy(buf, merged.base, merged.len);
buf[merged.len] = seperator;
buf[merged.len + 1] = ' ';
memcpy(buf + merged.len + 2, added.base, added.len);
merged.base = buf;
merged.len = newlen;
return merged;
}
/*
* A request without neither Content-Length or Transfer-Encoding header implies a zero-length request body (see 6th rule of RFC 7230
* 3.3.3).
* OTOH, section 3.3.3 states:
*
* A user agent SHOULD send a Content-Length in a request message when
* no Transfer-Encoding is sent and the request method defines a meaning
* for an enclosed payload body. For example, a Content-Length header
* field is normally sent in a POST request even when the value is 0
* (indicating an empty payload body). A user agent SHOULD NOT send a
* Content-Length header field when the request message does not contain
* a payload body and the method semantics do not anticipate such a
* body.
*
* PUT and POST define a meaning for the payload body, let's emit a
* Content-Length header if it doesn't exist already, since the server
* might send a '411 Length Required' response.
*
* see also: ML thread starting at https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0580.html
*/
static int req_requires_content_length(h2o_req_t *req)
{
int is_put_or_post = (req->method.len >= 1 && req->method.base[0] == 'P' &&
(h2o_memis(req->method.base, req->method.len, H2O_STRLIT("POST")) ||
h2o_memis(req->method.base, req->method.len, H2O_STRLIT("PUT"))));
return is_put_or_post && h2o_find_header(&req->res.headers, H2O_TOKEN_TRANSFER_ENCODING, -1) == -1;
}
static h2o_iovec_t build_content_length(h2o_mem_pool_t *pool, size_t cl)
{
h2o_iovec_t cl_buf;
cl_buf.base = h2o_mem_alloc_pool(pool, char, sizeof(H2O_UINT64_LONGEST_STR) - 1);
cl_buf.len = sprintf(cl_buf.base, "%zu", cl);
return cl_buf;
}
static void build_request(h2o_req_t *req, h2o_iovec_t *method, h2o_url_t *url, h2o_headers_t *headers,
h2o_httpclient_properties_t *props, int keepalive, int is_websocket_handshake, int use_proxy_protocol,
int *reprocess_if_too_early, h2o_url_t *origin)
{
size_t remote_addr_len = SIZE_MAX;
char remote_addr[NI_MAXHOST];
struct sockaddr_storage ss;
socklen_t sslen;
h2o_iovec_t cookie_buf = {NULL}, xff_buf = {NULL}, via_buf = {NULL};
int preserve_x_forwarded_proto = req->conn->ctx->globalconf->proxy.preserve_x_forwarded_proto;
int emit_x_forwarded_headers = req->conn->ctx->globalconf->proxy.emit_x_forwarded_headers;
int emit_via_header = req->conn->ctx->globalconf->proxy.emit_via_header;
/* for x-f-f */
if ((sslen = req->conn->callbacks->get_peername(req->conn, (void *)&ss)) != 0)
remote_addr_len = h2o_socket_getnumerichost((void *)&ss, sslen, remote_addr);
if (props->proxy_protocol != NULL && use_proxy_protocol) {
props->proxy_protocol->base = h2o_mem_alloc_pool(&req->pool, char, H2O_PROXY_HEADER_MAX_LENGTH);
props->proxy_protocol->len = h2o_stringify_proxy_header(req->conn, props->proxy_protocol->base);
}
/* method */
*method = h2o_strdup(&req->pool, req->method.base, req->method.len);
/* url */
h2o_url_init(url, origin->scheme, req->authority, h2o_strdup(&req->pool, req->path.base, req->path.len));
if (props->connection_header) {
if (is_websocket_handshake) {
*props->connection_header = h2o_iovec_init(H2O_STRLIT("upgrade"));
h2o_add_header(&req->pool, headers, H2O_TOKEN_UPGRADE, NULL, H2O_STRLIT("websocket"));
} else if (keepalive) {
*props->connection_header = h2o_iovec_init(H2O_STRLIT("keep-alive"));
} else {
*props->connection_header = h2o_iovec_init(H2O_STRLIT("close"));
}
}
/* CL or TE? Depends on whether we're streaming the request body or
not, and if CL was advertised in the original request */
if (req->proceed_req == NULL) {
if (req->entity.base != NULL || req_requires_content_length(req)) {
h2o_iovec_t cl_buf = build_content_length(&req->pool, req->entity.len);
h2o_add_header(&req->pool, headers, H2O_TOKEN_CONTENT_LENGTH, NULL, cl_buf.base, cl_buf.len);
}
} else {
if (req->content_length != SIZE_MAX) {
h2o_iovec_t cl_buf = build_content_length(&req->pool, req->content_length);
h2o_add_header(&req->pool, headers, H2O_TOKEN_CONTENT_LENGTH, NULL, cl_buf.base, cl_buf.len);
} else if (props->chunked != NULL) {
*(props->chunked) = 1;
h2o_add_header(&req->pool, headers, H2O_TOKEN_TRANSFER_ENCODING, NULL, H2O_STRLIT("chunked"));
}
}
/* headers */
/* rewrite headers if necessary */
h2o_headers_t req_headers = req->headers;
if (req->overrides != NULL && req->overrides->headers_cmds != NULL) {
req_headers.entries = NULL;
req_headers.size = 0;
req_headers.capacity = 0;
h2o_headers_command_t *cmd;
h2o_vector_reserve(&req->pool, &req_headers, req->headers.capacity);
memcpy(req_headers.entries, req->headers.entries, sizeof(req->headers.entries[0]) * req->headers.size);
req_headers.size = req->headers.size;
for (cmd = req->overrides->headers_cmds; cmd->cmd != H2O_HEADERS_CMD_NULL; ++cmd)
h2o_rewrite_headers(&req->pool, &req_headers, cmd);
}
{
const h2o_header_t *h, *h_end;
int found_early_data = 0;
for (h = req_headers.entries, h_end = h + req_headers.size; h != h_end; ++h) {
if (h2o_iovec_is_token(h->name)) {
const h2o_token_t *token = (void *)h->name;
if (token->flags.proxy_should_drop_for_req)
continue;
if (token == H2O_TOKEN_COOKIE) {
/* merge the cookie headers; see HTTP/2 8.1.2.5 and HTTP/1 (RFC6265 5.4) */
/* FIXME current algorithm is O(n^2) against the number of cookie headers */
cookie_buf = build_request_merge_headers(&req->pool, cookie_buf, h->value, ';');
continue;
} else if (token == H2O_TOKEN_VIA) {
if (!emit_via_header) {
goto AddHeader;
}
via_buf = build_request_merge_headers(&req->pool, via_buf, h->value, ',');
continue;
} else if (token == H2O_TOKEN_X_FORWARDED_FOR) {
if (!emit_x_forwarded_headers) {
goto AddHeader;
}
xff_buf = build_request_merge_headers(&req->pool, xff_buf, h->value, ',');
continue;
} else if (token == H2O_TOKEN_EARLY_DATA) {
found_early_data = 1;
goto AddHeader;
}
}
if (!preserve_x_forwarded_proto && h2o_lcstris(h->name->base, h->name->len, H2O_STRLIT("x-forwarded-proto")))
continue;
AddHeader:
if (h2o_iovec_is_token(h->name)) {
const h2o_token_t *token = (void *)h->name;
h2o_add_header(&req->pool, headers, token, h->orig_name, h->value.base, h->value.len);
} else {
h2o_add_header_by_str(&req->pool, headers, h->name->base, h->name->len, 0, h->orig_name, h->value.base,
h->value.len);
}
}
if (found_early_data) {
*reprocess_if_too_early = 0;
} else if (*reprocess_if_too_early) {
h2o_add_header(&req->pool, headers, H2O_TOKEN_EARLY_DATA, NULL, H2O_STRLIT("1"));
}
}
if (cookie_buf.len != 0) {
h2o_add_header(&req->pool, headers, H2O_TOKEN_COOKIE, NULL, cookie_buf.base, cookie_buf.len);
}
if (emit_x_forwarded_headers) {
if (!preserve_x_forwarded_proto)
h2o_add_header_by_str(&req->pool, headers, H2O_STRLIT("x-forwarded-proto"), 0, NULL, req->input.scheme->name.base,
req->input.scheme->name.len);
if (remote_addr_len != SIZE_MAX)
xff_buf = build_request_merge_headers(&req->pool, xff_buf, h2o_strdup(&req->pool, remote_addr, remote_addr_len), ',');
if (xff_buf.len != 0)
h2o_add_header(&req->pool, headers, H2O_TOKEN_X_FORWARDED_FOR, NULL, xff_buf.base, xff_buf.len);
}
if (emit_via_header) {
h2o_iovec_t added;
added.base = h2o_mem_alloc_pool(&req->pool, char, sizeof("1.1 ") - 1 + req->input.authority.len);
added.len = 0;
if (req->version < 0x200) {
added.base[added.len++] = '1';
added.base[added.len++] = '.';
added.base[added.len++] = '0' + (0x100 <= req->version && req->version <= 0x109 ? req->version - 0x100 : 0);
} else {
added.base[added.len++] = '2';
}
added.base[added.len++] = ' ';
memcpy(added.base + added.len, req->input.authority.base, req->input.authority.len);
added.len += req->input.authority.len;
via_buf = build_request_merge_headers(&req->pool, via_buf, added, ',');
h2o_add_header(&req->pool, headers, H2O_TOKEN_VIA, NULL, via_buf.base, via_buf.len);
}
}
static void do_close(struct rp_generator_t *self)
{
/**
* This can be called in the following three scenarios:
* 1. Downstream timeout before receiving header from upstream
* dispose callback calls this function, but stop callback doesn't
* 2. Reprocess
* stop callback calls this, but dispose callback does it later (after reprocessed request gets finished)
* 3. Others
* Both of stop and dispose callbacks call this function in order
* Thus, to ensure to do closing things, both of dispose and stop callbacks call this function.
*/
if (self->client != NULL) {
self->client->cancel(self->client);
self->client = NULL;
}
h2o_timer_unlink(&self->send_headers_timeout);
}
static void do_stop(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
do_close(self);
}
static void do_send(struct rp_generator_t *self)
{
h2o_iovec_t vecs[1];
size_t veccnt;
h2o_send_state_t ststate;
vecs[0] = h2o_doublebuffer_prepare(&self->sending, self->client != NULL ? self->client->buf : &self->last_content_before_send,
self->src_req->preferred_chunk_size);
if (self->client == NULL && vecs[0].len == self->sending.buf->size && self->last_content_before_send->size == 0) {
veccnt = vecs[0].len != 0 ? 1 : 0;
ststate = H2O_SEND_STATE_FINAL;
} else {
if (vecs[0].len == 0)
return;
veccnt = 1;
ststate = H2O_SEND_STATE_IN_PROGRESS;
}
if (self->had_body_error)
ststate = H2O_SEND_STATE_ERROR;
h2o_send(self->src_req, vecs, veccnt, ststate);
}
static void do_proceed(h2o_generator_t *generator, h2o_req_t *req)
{
struct rp_generator_t *self = (void *)generator;
h2o_doublebuffer_consume(&self->sending);
do_send(self);
if (self->client != NULL)
self->client->update_window(self->client);
}
static void on_websocket_upgrade_complete(void *_info, h2o_socket_t *sock, size_t reqsize)
{
struct rp_ws_upgrade_info_t *info = _info;
if (sock != NULL) {
h2o_buffer_consume(&sock->input, reqsize); // It is detached from conn. Let's trash unused data.
h2o_tunnel_establish(info->ctx, sock, info->upstream_sock, info->timeout);
} else {
h2o_socket_close(info->upstream_sock);
}
free(info);
}
static inline void on_websocket_upgrade(struct rp_generator_t *self, uint64_t timeout)
{
h2o_req_t *req = self->src_req;
h2o_socket_t *sock = self->client->steal_socket(self->client);
struct rp_ws_upgrade_info_t *info = h2o_mem_alloc(sizeof(*info));
info->upstream_sock = sock;
info->timeout = timeout;
info->ctx = req->conn->ctx;
h2o_http1_upgrade(req, NULL, 0, on_websocket_upgrade_complete, info);
}
static int on_body(h2o_httpclient_t *client, const char *errstr)
{
struct rp_generator_t *self = client->data;
h2o_timer_unlink(&self->send_headers_timeout);
if (errstr != NULL) {
self->src_req->timestamps.proxy = self->client->timings;
/* detach the content */
self->last_content_before_send = *self->client->buf;
h2o_buffer_init(self->client->buf, &h2o_socket_buffer_prototype);
self->client = NULL;
if (errstr != h2o_httpclient_error_is_eos) {
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
self->had_body_error = 1;
}
}
if (!self->sending.inflight)
do_send(self);
return 0;
}
static char compress_hint_to_enum(const char *val, size_t len)
{
if (h2o_lcstris(val, len, H2O_STRLIT("on"))) {
return H2O_COMPRESS_HINT_ENABLE;
}
if (h2o_lcstris(val, len, H2O_STRLIT("off"))) {
return H2O_COMPRESS_HINT_DISABLE;
}
if (h2o_lcstris(val, len, H2O_STRLIT("gzip"))) {
return H2O_COMPRESS_HINT_ENABLE_GZIP;
}
if (h2o_lcstris(val, len, H2O_STRLIT("br"))) {
return H2O_COMPRESS_HINT_ENABLE_BR;
}
return H2O_COMPRESS_HINT_AUTO;
}
static void on_send_headers_timeout(h2o_timer_t *entry)
{
struct rp_generator_t *self = H2O_STRUCT_FROM_MEMBER(struct rp_generator_t, send_headers_timeout, entry);
h2o_doublebuffer_prepare_empty(&self->sending);
h2o_send(self->src_req, NULL, 0, H2O_SEND_STATE_IN_PROGRESS);
}
static h2o_httpclient_body_cb on_head(h2o_httpclient_t *client, const char *errstr, int version, int status, h2o_iovec_t msg,
h2o_header_t *headers, size_t num_headers, int header_requires_dup)
{
struct rp_generator_t *self = client->data;
h2o_req_t *req = self->src_req;
size_t i;
int emit_missing_date_header = req->conn->ctx->globalconf->proxy.emit_missing_date_header;
int seen_date_header = 0;
self->src_req->timestamps.proxy = self->client->timings;
if (errstr != NULL && errstr != h2o_httpclient_error_is_eos) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", errstr);
if (errstr == h2o_httpclient_error_refused_stream) {
req->upstream_refused = 1;
static h2o_generator_t generator = {NULL, NULL};
h2o_start_response(req, &generator);
h2o_send(req, NULL, 0, H2O_SEND_STATE_ERROR);
} else {
h2o_send_error_502(req, "Gateway Error", errstr, 0);
}
return NULL;
}
/* copy the response (note: all the headers must be copied; http1client discards the input once we return from this callback) */
req->res.status = status;
req->res.reason = h2o_strdup(&req->pool, msg.base, msg.len).base;
for (i = 0; i != num_headers; ++i) {
h2o_iovec_t value = headers[i].value;
if (h2o_iovec_is_token(headers[i].name)) {
const h2o_token_t *token = H2O_STRUCT_FROM_MEMBER(h2o_token_t, buf, headers[i].name);
if (token->flags.proxy_should_drop_for_res)
continue;
if (token == H2O_TOKEN_CONTENT_LENGTH) {
if (req->res.content_length != SIZE_MAX ||
(req->res.content_length = h2o_strtosize(headers[i].value.base, headers[i].value.len)) == SIZE_MAX) {
self->client = NULL;
h2o_req_log_error(req, "lib/core/proxy.c", "%s", "invalid response from upstream (malformed content-length)");
h2o_send_error_502(req, "Gateway Error", "invalid response from upstream", 0);
return NULL;
}
goto Skip;
} else if (token == H2O_TOKEN_LOCATION) {
if (req->res_is_delegated && (300 <= status && status <= 399) && status != 304) {
self->client = NULL;
h2o_iovec_t method = h2o_get_redirect_method(req->method, status);
h2o_send_redirect_internal(req, method, headers[i].value.base, headers[i].value.len, 1);
return NULL;
}
if (req->overrides != NULL && req->overrides->location_rewrite.match != NULL) {
h2o_iovec_t new_value =
rewrite_location(&req->pool, value.base, value.len, req->overrides->location_rewrite.match,
req->input.scheme, req->input.authority, req->overrides->location_rewrite.path_prefix);
if (new_value.base != NULL) {
value = new_value;
goto AddHeader;
}
}
} else if (token == H2O_TOKEN_LINK) {
value = h2o_push_path_in_link_header(req, value.base, value.len);
if (!value.len)
goto Skip;
} else if (token == H2O_TOKEN_SERVER) {
if (!req->conn->ctx->globalconf->proxy.preserve_server_header)
goto Skip;
} else if (token == H2O_TOKEN_X_COMPRESS_HINT) {
req->compress_hint = compress_hint_to_enum(value.base, value.len);
goto Skip;
} else if (token == H2O_TOKEN_DATE) {
seen_date_header = 1;
}
if (header_requires_dup)
value = h2o_strdup(&req->pool, value.base, value.len);
AddHeader:
h2o_add_header(&req->pool, &req->res.headers, token, headers[i].orig_name, value.base, value.len);
Skip:;
} else {
h2o_iovec_t name = *headers[i].name;
if (header_requires_dup) {
name = h2o_strdup(&req->pool, name.base, name.len);
value = h2o_strdup(&req->pool, value.base, value.len);
}
h2o_add_header_by_str(&req->pool, &req->res.headers, name.base, name.len, 0, headers[i].orig_name, value.base,
value.len);
}
}
if (!seen_date_header && emit_missing_date_header)
h2o_resp_add_date_header(req);
if (self->is_websocket_handshake && req->res.status == 101) {
h2o_httpclient_ctx_t *client_ctx = get_client_ctx(req);
assert(client_ctx->websocket_timeout != NULL);
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_UPGRADE, NULL, H2O_STRLIT("websocket"));
on_websocket_upgrade(self, *client_ctx->websocket_timeout);
self->client = NULL;
return NULL;
}
/* declare the start of the response */
h2o_start_response(req, &self->super);
if (errstr == h2o_httpclient_error_is_eos) {
self->client = NULL;
h2o_send(req, NULL, 0, H2O_SEND_STATE_FINAL);
return NULL;
}
/* if httpclient has no received body at this time, immediately send only headers using zero timeout */
h2o_timer_link(req->conn->ctx->loop, 0, &self->send_headers_timeout);
return on_body;
}
static int on_1xx(h2o_httpclient_t *client, int version, int status, h2o_iovec_t msg, h2o_header_t *headers, size_t num_headers)
{
struct rp_generator_t *self = client->data;
size_t i;
for (i = 0; i != num_headers; ++i) {
if (headers[i].name == &H2O_TOKEN_LINK->buf)
h2o_push_path_in_link_header(self->src_req, headers[i].value.base, headers[i].value.len);
}
if (status != 101) {
self->src_req->res.status = status;
self->src_req->res.headers = (h2o_headers_t){headers, num_headers, num_headers};
h2o_send_informational(self->src_req);
}
return 0;
}
static void proceed_request(h2o_httpclient_t *client, size_t written, int is_end_stream)
{
struct rp_generator_t *self = client->data;
if (self->src_req->proceed_req != NULL)
self->src_req->proceed_req(self->src_req, written, is_end_stream);
}
static int write_req(void *ctx, h2o_iovec_t chunk, int is_end_stream)
{
struct rp_generator_t *self = ctx;
if (is_end_stream) {
self->src_req->write_req.cb = NULL;
}
return self->client->write_req(self->client, chunk, is_end_stream);
}
static h2o_httpclient_head_cb on_connect(h2o_httpclient_t *client, const char *errstr, h2o_iovec_t *method, h2o_url_t *url,
const h2o_header_t **headers, size_t *num_headers, h2o_iovec_t *body,
h2o_httpclient_proceed_req_cb *proceed_req_cb, h2o_httpclient_properties_t *props,
h2o_url_t *origin)
{
struct rp_generator_t *self = client->data;
h2o_req_t *req = self->src_req;
int use_proxy_protocol = 0, reprocess_if_too_early = 0;
self->src_req->timestamps.proxy = self->client->timings;
if (errstr != NULL) {
self->client = NULL;
h2o_req_log_error(self->src_req, "lib/core/proxy.c", "%s", errstr);
h2o_send_error_502(self->src_req, "Gateway Error", errstr, 0);
return NULL;
}
assert(origin != NULL);
if (req->overrides != NULL) {
use_proxy_protocol = req->overrides->use_proxy_protocol;
req->overrides->location_rewrite.match = origin;
if (!req->overrides->proxy_preserve_host) {
req->scheme = origin->scheme;
req->authority = origin->authority;
}
h2o_iovec_t append = req->path;
if (origin->path.base[origin->path.len - 1] == '/' && append.base[0] == '/') {
append.base += 1;
append.len -= 1;
}
req->path = h2o_concat(&req->pool, origin->path, append);
req->path_normalized =
h2o_url_normalize_path(&req->pool, req->path.base, req->path.len, &req->query_at, &req->norm_indexes);
}
reprocess_if_too_early = h2o_conn_is_early_data(req->conn);
h2o_headers_t headers_vec = (h2o_headers_t){NULL};
build_request(req, method, url, &headers_vec, props,
!use_proxy_protocol && h2o_socketpool_can_keepalive(client->connpool->socketpool), self->is_websocket_handshake,
use_proxy_protocol, &reprocess_if_too_early, origin);
*headers = headers_vec.entries;
*num_headers = headers_vec.size;
if (reprocess_if_too_early)
req->reprocess_if_too_early = 1;
*body = h2o_iovec_init(NULL, 0);
*proceed_req_cb = NULL;
if (self->src_req->entity.base != NULL) {
*body = self->src_req->entity;
if (self->src_req->proceed_req != NULL) {
*proceed_req_cb = proceed_request;
self->src_req->write_req.cb = write_req;
self->src_req->write_req.ctx = self;
}
}
self->client->informational_cb = on_1xx;
return on_head;
}
static void on_generator_dispose(void *_self)
{
struct rp_generator_t *self = _self;
do_close(self);
h2o_buffer_dispose(&self->last_content_before_send);
h2o_doublebuffer_dispose(&self->sending);
}
static struct rp_generator_t *proxy_send_prepare(h2o_req_t *req)
{
struct rp_generator_t *self = h2o_mem_alloc_shared(&req->pool, sizeof(*self), on_generator_dispose);
h2o_httpclient_ctx_t *client_ctx = get_client_ctx(req);
self->super.proceed = do_proceed;
self->super.stop = do_stop;
self->src_req = req;
if (client_ctx->websocket_timeout != NULL && h2o_lcstris(req->upgrade.base, req->upgrade.len, H2O_STRLIT("websocket"))) {
self->is_websocket_handshake = 1;
} else {
self->is_websocket_handshake = 0;
}
self->had_body_error = 0;
self->up_req.is_head = h2o_memis(req->method.base, req->method.len, H2O_STRLIT("HEAD"));
h2o_buffer_init(&self->last_content_before_send, &h2o_socket_buffer_prototype);
h2o_doublebuffer_init(&self->sending, &h2o_socket_buffer_prototype);
req->timestamps.proxy = (h2o_httpclient_timings_t){{0}};
h2o_timer_init(&self->send_headers_timeout, on_send_headers_timeout);
return self;
}
void h2o__proxy_process_request(h2o_req_t *req)
{
h2o_req_overrides_t *overrides = req->overrides;
h2o_httpclient_ctx_t *client_ctx = get_client_ctx(req);
h2o_url_t target_buf, *target = &target_buf;
h2o_httpclient_connection_pool_t *connpool = &req->conn->ctx->proxy.connpool;
if (overrides != NULL && overrides->connpool != NULL) {
connpool = overrides->connpool;
if (!overrides->proxy_preserve_host)
target = NULL;
}
if (target == &target_buf)
h2o_url_init(&target_buf, req->scheme, req->authority, h2o_iovec_init(H2O_STRLIT("/")));
struct rp_generator_t *self = proxy_send_prepare(req);
/*
When the PROXY protocol is being used (i.e. when overrides->use_proxy_protocol is set), the client needs to establish a new
connection even when there is a pooled connection to the peer, since the header (as defined in
https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) needs to be sent at the beginning of the connection.
However, currently h2o_http1client_connect doesn't provide an interface to enforce estabilishing a new connection. In other
words, there is a chance that we would use a pool connection here.
OTOH, the probability of seeing such issue is rare; it would only happen if the same destination identified by its host:port is
accessed in both ways (i.e. in one path with use_proxy_protocol set and in the other path without).
So I leave this as it is for the time being.
*/
h2o_httpclient_connect(&self->client, &req->pool, self, client_ctx, connpool, target, on_connect);
}
| 1 | 13,511 | I believe these need to be explicitly initialized in `proxy_send_prepare` | h2o-h2o | c |
@@ -160,6 +160,7 @@ class TaskProcess(multiprocessing.Process):
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
+ t0 = time.time() # Failed task start time
status = FAILED
expl = ''
missing = [] | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker communicates with the scheduler and does two things:
1. Sends all tasks that has to be run
2. Gets tasks from the scheduler that should be run
When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.Scheduler` instance.
When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.
Everything in this module is private to luigi and may change in incompatible
ways between versions. The exception is the exception types and the
:py:class:`worker` config class.
"""
import collections
import getpass
import logging
import multiprocessing
import os
import signal
import subprocess
import sys
try:
import Queue
except ImportError:
import queue as Queue
import random
import socket
import threading
import time
import traceback
import types
from luigi import six
from luigi import notifications
from luigi.event import Event
from luigi.task_register import load_task
from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, UNKNOWN, Scheduler, RetryPolicy
from luigi.scheduler import WORKER_STATE_ACTIVE, WORKER_STATE_DISABLED
from luigi.target import Target
from luigi.task import Task, flatten, getpaths, Config
from luigi.task_register import TaskClassException
from luigi.task_status import RUNNING
from luigi.parameter import FloatParameter, IntParameter, BoolParameter
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,
# that may not be unlocked in child process, resulting in the process being locked indefinitely.
fork_lock = threading.Lock()
# Why we assert on _WAIT_INTERVAL_EPS:
# multiprocessing.Queue.get() is undefined for timeout=0 it seems:
# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.
# I also tried with really low epsilon, but then ran into the same issue where
# the test case "test_external_dependency_worker_is_patient" got stuck. So I
# unscientifically just set the final value to a floating point number that
# "worked for me".
_WAIT_INTERVAL_EPS = 0.00001
def _is_external(task):
return task.run is None or task.run == NotImplemented
def _get_retry_policy_dict(task):
return RetryPolicy(task.retry_count, task.disable_hard_timeout, task.disable_window_seconds)._asdict()
class TaskException(Exception):
pass
GetWorkResponse = collections.namedtuple('GetWorkResponse', (
'task_id',
'running_tasks',
'n_pending_tasks',
'n_unique_pending',
'n_pending_last_scheduled',
'worker_state',
))
class TaskProcess(multiprocessing.Process):
""" Wrap all task execution in this class.
Mainly for convenience since this is run in a separate process. """
def __init__(self, task, worker_id, result_queue, tracking_url_callback,
status_message_callback, use_multiprocessing=False, worker_timeout=0):
super(TaskProcess, self).__init__()
self.task = task
self.worker_id = worker_id
self.result_queue = result_queue
self.tracking_url_callback = tracking_url_callback
self.status_message_callback = status_message_callback
if task.worker_timeout is not None:
worker_timeout = task.worker_timeout
self.timeout_time = time.time() + worker_timeout if worker_timeout else None
self.use_multiprocessing = use_multiprocessing or self.timeout_time is not None
def _run_get_new_deps(self):
self.task.set_tracking_url = self.tracking_url_callback
self.task.set_status_message = self.status_message_callback
task_gen = self.task.run()
self.task.set_tracking_url = None
self.task.set_status_message = None
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
return new_deps
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.use_multiprocessing:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled! For external tasks we
# don't care about unfulfilled dependencies, because we are just
# checking completeness of self.task so outputs of dependencies are
# irrelevant.
if not _is_external(self.task):
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if _is_external(self.task):
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
if self.task.complete():
status = DONE
else:
status = FAILED
expl = 'Task is an external data dependency ' \
'and data does not exist (yet?).'
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except KeyboardInterrupt:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
def _recursive_terminate(self):
import psutil
try:
parent = psutil.Process(self.pid)
children = parent.children(recursive=True)
# terminate parent. Give it a chance to clean up
super(TaskProcess, self).terminate()
parent.wait()
# terminate children
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
continue
except psutil.NoSuchProcess:
return
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()
class SingleProcessPool(object):
"""
Dummy process pool for using a single processor.
Imitates the api of multiprocessing.Pool using single-processor equivalents.
"""
def apply_async(self, function, args):
return function(*args)
def close(self):
pass
def join(self):
pass
class DequeQueue(collections.deque):
"""
deque wrapper implementing the Queue interface.
"""
def put(self, obj, block=None, timeout=None):
return self.append(obj)
def get(self, block=None, timeout=None):
try:
return self.pop()
except IndexError:
raise Queue.Empty
class AsyncCompletionException(Exception):
"""
Exception indicating that something went wrong with checking complete.
"""
def __init__(self, trace):
self.trace = trace
class TracebackWrapper(object):
"""
Class to wrap tracebacks so we can know they're not just strings.
"""
def __init__(self, trace):
self.trace = trace
def check_complete(task, out_queue):
"""
Checks if task is complete, puts the result to out_queue.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = task.complete()
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))
class worker(Config):
# NOTE: `section.config-variable` in the config_path argument is deprecated in favor of `worker.config_variable`
ping_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
'worker alive only if it has a unique pending task, as '
'well as having keep-alive true')
count_last_scheduled = BoolParameter(default=False,
description='Keep a worker alive only if there are '
'pending tasks which it was the last to '
'schedule.')
wait_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-wait-interval'))
wait_jitter = FloatParameter(default=5.0)
max_reschedules = IntParameter(default=1,
config_path=dict(section='core', name='worker-max-reschedules'))
timeout = IntParameter(default=0,
config_path=dict(section='core', name='worker-timeout'))
task_limit = IntParameter(default=None,
config_path=dict(section='core', name='worker-task-limit'))
retry_external_tasks = BoolParameter(default=False,
config_path=dict(section='core', name='retry-external-tasks'),
description='If true, incomplete external tasks will be '
'retested for completion while Luigi is running.')
no_install_shutdown_handler = BoolParameter(default=False,
description='If true, the SIGUSR1 shutdown handler will'
'NOT be install on the worker')
class KeepAliveThread(threading.Thread):
"""
Periodically tell the scheduler that the worker still lives.
"""
def __init__(self, scheduler, worker_id, ping_interval):
super(KeepAliveThread, self).__init__()
self._should_stop = threading.Event()
self._scheduler = scheduler
self._worker_id = worker_id
self._ping_interval = ping_interval
def stop(self):
self._should_stop.set()
def run(self):
while True:
self._should_stop.wait(self._ping_interval)
if self._should_stop.is_set():
logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id)
break
with fork_lock:
try:
self._scheduler.ping(worker=self._worker_id)
except: # httplib.BadStatusLine:
logger.warning('Failed pinging scheduler')
class Worker(object):
"""
Worker object communicates with a scheduler.
Simple class that talks to a scheduler and:
* tells the scheduler what it has to do + its dependencies
* asks for stuff to do (pulls it in a loop and runs it)
"""
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = Scheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._batch_running_tasks = {}
self._batch_families_sent = set()
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
# note that ``signal.signal(signal.SIGUSR1, fn)`` only works inside the main execution thread, which is why we
# provide the ability to conditionally install the hook.
if not self._config.no_install_shutdown_handler:
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
signal.siginterrupt(signal.SIGUSR1, False)
except AttributeError:
pass
# Keep info about what tasks are running (could be in other processes)
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
msg = (task, status, runnable)
self._add_task_history.append(msg)
if task_id in self._batch_running_tasks:
for batch_task in self._batch_running_tasks.pop(task_id):
self._add_task_history.append((batch_task, status, True))
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status)
def __enter__(self):
"""
Start the KeepAliveThread.
"""
self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
return self
def __exit__(self, type, value, traceback):
"""
Stop the KeepAliveThread and kill still running tasks.
"""
self._keep_alive_thread.stop()
self._keep_alive_thread.join()
for task in self._running_tasks.values():
if task.is_alive():
task.terminate()
return False # Don't suppress exception
def _generate_worker_info(self):
# Generate as much info as possible about the worker
# Some of these calls might not be available on all OS's
args = [('salt', '%09d' % random.randrange(0, 999999999)),
('workers', self.worker_processes)]
try:
args += [('host', socket.gethostname())]
except BaseException:
pass
try:
args += [('username', getpass.getuser())]
except BaseException:
pass
try:
args += [('pid', os.getpid())]
except BaseException:
pass
try:
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
args.append(('sudo_user', sudo_user))
except BaseException:
pass
return args
def _validate_task(self, task):
if not isinstance(task, Task):
raise TaskException('Can not schedule non-task %s' % task)
if not task.initialized():
# we can't get the repr of it since it's not initialized...
raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__)
def _log_complete_error(self, task, tb):
log_msg = "Will not run {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_dependency_error(self, task, tb):
log_msg = "Will not run {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_unexpected_error(self, task):
logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause
def _email_complete_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not run {task} or any dependencies due to error in complete() method",
)
def _email_dependency_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not run {task} or any dependencies due to error in deps() method",
)
def _email_unexpected_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: Framework error while scheduling {task}. Host: {host}",
headline="Luigi framework error",
)
def _email_task_failure(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} FAILED. Host: {host}",
headline="A task failed when running. Most likely run() raised an exception.",
)
def _email_error(self, task, formatted_traceback, subject, headline):
formatted_subject = subject.format(task=task, host=self.host)
command = subprocess.list2cmdline(sys.argv)
message = notifications.format_task_error(headline, task, command, formatted_traceback)
notifications.send_error_email(formatted_subject, message, task.owner_email)
def _handle_task_load_error(self, exception, task_ids):
msg = 'Cannot find task(s) sent by scheduler: {}'.format(','.join(task_ids))
logger.exception(msg)
subject = 'Luigi: {}'.format(msg)
error_message = notifications.wrap_traceback(exception)
for task_id in task_ids:
self._add_task(
worker=self._id,
task_id=task_id,
status=FAILED,
runnable=False,
expl=error_message,
)
notifications.send_error_email(subject, error_message)
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
raise
finally:
pool.close()
pool.join()
return self.add_succeeded
def _add_task_batcher(self, task):
family = task.task_family
if family not in self._batch_families_sent:
task_class = type(task)
batch_param_names = task_class.batch_param_names()
if batch_param_names:
self._scheduler.add_task_batcher(
worker=self._id,
task_family=family,
batched_args=batch_param_names,
max_batch_size=task.max_batch_size,
)
self._batch_families_sent.add(family)
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not run %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
deps = None
status = UNKNOWN
runnable = False
else:
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except KeyboardInterrupt:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
elif is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif _is_external(task):
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
self._add_task_batcher(task)
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
deps = None
status = UNKNOWN
runnable = False
else:
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(
worker=self._id,
task_id=task.task_id,
status=status,
deps=deps,
runnable=runnable,
priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
batchable=task.batchable,
retry_policy_dict=_get_retry_policy_dict(task),
)
def _validate_dependency(self, dependency):
if isinstance(dependency, Target):
raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')
elif not isinstance(dependency, Task):
raise Exception('requires() must return Task objects')
def _check_complete_value(self, is_complete):
if is_complete not in (True, False):
if isinstance(is_complete, TracebackWrapper):
raise AsyncCompletionException(is_complete.trace)
raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete)
def _add_worker(self):
self._worker_info.append(('first_task', self._first_task))
self._scheduler.add_worker(self._id, self._worker_info)
def _log_remote_tasks(self, get_work_response):
logger.debug("Done")
logger.debug("There are no more tasks to run at this time")
if get_work_response.running_tasks:
for r in get_work_response.running_tasks:
logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])
elif get_work_response.n_pending_tasks:
logger.debug(
"There are %s pending tasks possibly being run by other workers",
get_work_response.n_pending_tasks)
if get_work_response.n_unique_pending:
logger.debug(
"There are %i pending tasks unique to this worker",
get_work_response.n_unique_pending)
if get_work_response.n_pending_last_scheduled:
logger.debug(
"There are %i pending tasks last scheduled by this worker",
get_work_response.n_pending_last_scheduled)
def _get_work_task_id(self, get_work_response):
if get_work_response.get('task_id') is not None:
return get_work_response['task_id']
elif 'batch_id' in get_work_response:
try:
task = load_task(
module=get_work_response.get('task_module'),
task_name=get_work_response['task_family'],
params_str=get_work_response['task_params'],
)
except Exception as ex:
self._handle_task_load_error(ex, get_work_response['batch_task_ids'])
self.run_succeeded = False
return None
self._scheduler.add_task(
worker=self._id,
task_id=task.task_id,
module=get_work_response.get('task_module'),
family=get_work_response['task_family'],
params=task.to_str_params(),
status=RUNNING,
batch_id=get_work_response['batch_id'],
)
return task.task_id
else:
return None
def _get_work(self):
if self._stop_requesting_work:
return GetWorkResponse(None, 0, 0, 0, 0, WORKER_STATE_DISABLED)
if self.worker_processes > 0:
logger.debug("Asking scheduler for work...")
r = self._scheduler.get_work(
worker=self._id,
host=self.host,
assistant=self._assistant,
current_tasks=list(self._running_tasks.keys()),
)
else:
logger.debug("Checking if tasks are still pending")
r = self._scheduler.count_pending(worker=self._id)
running_tasks = r['running_tasks']
task_id = self._get_work_task_id(r)
self._get_work_response_history.append({
'task_id': task_id,
'running_tasks': running_tasks,
})
if task_id is not None and task_id not in self._scheduled_tasks:
logger.info('Did not schedule %s, will load it dynamically', task_id)
try:
# TODO: we should obtain the module name from the server!
self._scheduled_tasks[task_id] = \
load_task(module=r.get('task_module'),
task_name=r['task_family'],
params_str=r['task_params'])
except TaskClassException as ex:
self._handle_task_load_error(ex, [task_id])
task_id = None
self.run_succeeded = False
if task_id is not None and 'batch_task_ids' in r:
batch_tasks = filter(None, [
self._scheduled_tasks.get(batch_id) for batch_id in r['batch_task_ids']])
self._batch_running_tasks[task_id] = batch_tasks
return GetWorkResponse(
task_id=task_id,
running_tasks=running_tasks,
n_pending_tasks=r['n_pending_tasks'],
n_unique_pending=r['n_unique_pending'],
# TODO: For a tiny amount of time (a month?) we'll keep forwards compatibility
# That is you can user a newer client than server (Sep 2016)
n_pending_last_scheduled=r.get('n_pending_last_scheduled', 0),
worker_state=r.get('worker_state', WORKER_STATE_ACTIVE),
)
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
task_process = self._create_task_process(task)
self._running_tasks[task_id] = task_process
if task_process.use_multiprocessing:
with fork_lock:
task_process.start()
else:
# Run in the same process
task_process.run()
def _create_task_process(self, task):
def update_tracking_url(tracking_url):
self._scheduler.add_task(
task_id=task.task_id,
worker=self._id,
status=RUNNING,
tracking_url=tracking_url,
)
def update_status_message(message):
self._scheduler.set_task_status_message(task.task_id, message)
return TaskProcess(
task, self._id, self._task_result_queue, update_tracking_url, update_status_message,
use_multiprocessing=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout
)
def _purge_children(self):
"""
Find dead children and put a response on the result queue.
:return:
"""
for task_id, p in six.iteritems(self._running_tasks):
if not p.is_alive() and p.exitcode:
error_msg = 'Task {} died unexpectedly with exit code {}'.format(task_id, p.exitcode)
p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg)
elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():
p.terminate()
error_msg = 'Task {} timed out after {} seconds and was terminated.'.format(task_id, p.task.worker_timeout)
p.task.trigger_event(Event.TIMEOUT, p.task, error_msg)
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
# external task if run not implemented, retry-able if config option is enabled.
external_task_retryable = _is_external(task) and self._config.retry_external_tasks
if status == FAILED and not external_task_retryable:
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant)
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return
def _sleeper(self):
# TODO is exponential backoff necessary?
while True:
jitter = self._config.wait_jitter
wait_interval = self._config.wait_interval + random.uniform(0, jitter)
logger.debug('Sleeping for %f seconds', wait_interval)
time.sleep(wait_interval)
yield
def _keep_alive(self, get_work_response):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
elif self._config.count_last_scheduled:
return get_work_response.n_pending_last_scheduled > 0
elif self._config.count_uniques:
return get_work_response.n_unique_pending > 0
else:
return get_work_response.n_pending_tasks > 0
def handle_interrupt(self, signum, _):
"""
Stops the assistant from asking for more work on SIGUSR1
"""
if signum == signal.SIGUSR1:
self._start_phasing_out()
def _start_phasing_out(self):
"""
Go into a mode where we dont ask for more work and quit once existing
tasks are done.
"""
self._config.keep_alive = False
self._stop_requesting_work = True
def run(self):
"""
Returns True if all scheduled tasks were executed successfully.
"""
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while len(self._running_tasks) >= self.worker_processes > 0:
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
get_work_response = self._get_work()
if get_work_response.worker_state == WORKER_STATE_DISABLED:
self._start_phasing_out()
if get_work_response.task_id is None:
if not self._stop_requesting_work:
self._log_remote_tasks(get_work_response)
if len(self._running_tasks) == 0:
if self._keep_alive(get_work_response):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
# task_id is not None:
logger.debug("Pending tasks: %s", get_work_response.n_pending_tasks)
self._run_task(get_work_response.task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded
| 1 | 16,239 | This is unnecessary. The declaration of `t0` on line 179 is still in scope inside the `except` block. | spotify-luigi | py |
@@ -1,6 +1,15 @@
*/
+/**
+ * The border color when the client is marked.
+ * It has priority over the rest of beautiful border color properties.
+ * Note that only solid colors are supported.
+ * @beautiful beautiful.border_color_marked
+ * @param color
+ * @see request::border
+ */
+
/**
* The fallback border color when the client is floating.
* | 1 | */
/**
* The fallback border color when the client is floating.
*
* @beautiful beautiful.border_color_floating
* @param color
* @see request::border
* @see beautiful.border_color_floating_active
* @see beautiful.border_color_floating_normal
* @see beautiful.border_color_floating_urgent
* @see beautiful.border_color_floating_new
*/
/**
* The fallback border color when the client is mazimized.
*
* @beautiful beautiful.border_color_mazimized
* @param color
* @see request::border
* @see beautiful.border_color_maximized_active
* @see beautiful.border_color_maximized_normal
* @see beautiful.border_color_maximized_urgent
* @see beautiful.border_color_maximized_new
*/
/**
* The border color when the client is active.
*
* @beautiful beautiful.border_color_active
* @param color
* @see request::border
*/
/**
* The border color when the client is not active.
*
* @beautiful beautiful.border_color_normal
* @param color
* @see request::border
*/
/**
* The border color when the client has the urgent property set.
*
* @beautiful beautiful.border_color_urgent
* @param color
* @see request::border
*/
/**
* The border color when the client is not active and new.
*
* @beautiful beautiful.border_color_new
* @param color
* @see request::border
*/
/**
* The border color when the (floating) client is active.
*
* @beautiful beautiful.border_color_floating_active
* @param color
* @see request::border
*/
/**
* The border color when the (floating) client is not active.
*
* @beautiful beautiful.border_color_floating_normal
* @param color
* @see request::border
*/
/**
* The border color when the (floating) client has the urgent property set.
*
* @beautiful beautiful.border_color_floating_urgent
* @param color
* @see request::border
*/
/**
* The border color when the (floating) client is not active and new.
*
* @beautiful beautiful.border_color_floating_new
* @param color
* @see request::border
*/
/**
* The border color when the (maximized) client is active.
*
* @beautiful beautiful.border_color_maximized_active
* @param color
* @see request::border
*/
/**
* The border color when the (maximized) client is not active.
*
* @beautiful beautiful.border_color_maximized_normal
* @param color
* @see request::border
*/
/**
* The border color when the (maximized) client has the urgent property set.
*
* @beautiful beautiful.border_color_maximized_urgent
* @param color
* @see request::border
*/
/**
* The border color when the (maximized) client is not active and new.
*
* @beautiful beautiful.border_color_maximized_new
* @param color
* @see request::border
*/
/**
* The border color when the (fullscreen) client is active.
*
* @beautiful beautiful.border_color_fullscreen_active
* @param color
* @see request::border
*/
/**
* The border color when the (fullscreen) client is not active.
*
* @beautiful beautiful.border_color_fullscreen_normal
* @param color
* @see request::border
*/
/**
* The border color when the (fullscreen) client has the urgent property set.
*
* @beautiful beautiful.border_color_fullscreen_urgent
* @param color
* @see request::border
*/
/**
* The border color when the (fullscreen) client is not active and new.
*
* @beautiful beautiful.border_color_fullscreen_new
* @param color
* @see request::border
*/
/**
* The fallback border width when nothing else is set.
*
* @beautiful beautiful.border_width
* @param integer
* @see request::border
* @see beautiful.border_width_floating
* @see beautiful.border_width_mazimized
* @see beautiful.border_width_floating_active
* @see beautiful.border_width_floating_normal
* @see beautiful.border_width_floating_urgent
* @see beautiful.border_width_floating_new
* @see beautiful.border_width_maximized_active
* @see beautiful.border_width_maximized_normal
* @see beautiful.border_width_maximized_urgent
* @see beautiful.border_width_maximized_new
*/
/**
* The fallback border width when the client is floating.
*
* @beautiful beautiful.border_width_floating
* @param integer
* @see request::border
* @see beautiful.border_width_floating_active
* @see beautiful.border_width_floating_normal
* @see beautiful.border_width_floating_urgent
* @see beautiful.border_width_floating_new
*/
/**
* The fallback border width when the client is mazimized.
*
* @beautiful beautiful.border_width_mazimized
* @param integer
* @see request::border
* @see beautiful.border_width_maximized_active
* @see beautiful.border_width_maximized_normal
* @see beautiful.border_width_maximized_urgent
* @see beautiful.border_width_maximized_new
*/
/**
* The client border width for the normal clients.
*
* @beautiful beautiful.border_width_normal
* @param integer
* @see request::border
*/
/**
* The client border width for the active client.
*
* @beautiful beautiful.border_width_active
* @param integer
* @see request::border
*/
/**
* The client border width for the urgent clients.
*
* @beautiful beautiful.border_width_urgent
* @param integer
* @see request::border
*/
/**
* The client border width for the new clients.
*
* @beautiful beautiful.border_width_new
* @param integer
* @see request::border
*/
/**
* The client border width for the normal floating clients.
*
* @beautiful beautiful.border_width_floating_normal
* @param integer
* @see request::border
*/
/**
* The client border width for the active floating client.
*
* @beautiful beautiful.border_width_floating_active
* @param integer
* @see request::border
*/
/**
* The client border width for the urgent floating clients.
*
* @beautiful beautiful.border_width_floating_urgent
* @param integer
* @see request::border
*/
/**
* The client border width for the new floating clients.
*
* @beautiful beautiful.border_width_floating_new
* @param integer
* @see request::border
*/
/**
* The client border width for the normal maximized clients.
*
* @beautiful beautiful.border_width_maximized_normal
* @param integer
* @see request::border
*/
/**
* The client border width for the active maximized client.
*
* @beautiful beautiful.border_width_maximized_active
* @param integer
* @see request::border
*/
/**
* The client border width for the urgent maximized clients.
*
* @beautiful beautiful.border_width_maximized_urgent
* @param integer
* @see request::border
*/
/**
* The client border width for the new maximized clients.
*
* @beautiful beautiful.border_width_maximized_new
* @param integer
* @see request::border
*/
/**
* The client border width for the normal fullscreen clients.
*
* @beautiful beautiful.border_width_fullscreen_normal
* @param integer
* @see request::border
*/
/**
* The client border width for the active fullscreen client.
*
* @beautiful beautiful.border_width_fullscreen_active
* @param integer
* @see request::border
*/
/**
* The client border width for the urgent fullscreen clients.
*
* @beautiful beautiful.border_width_fullscreen_urgent
* @param integer
* @see request::border
*/
/**
* The client border width for the new fullscreen clients.
*
* @beautiful beautiful.border_width_fullscreen_new
* @param integer
* @see request::border
*/
/**
* The client opacity for the normal clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_normal
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the active client.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_active
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the urgent clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_urgent
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the new clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_new
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the normal floating clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_floating_normal
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the active floating client.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_floating_active
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the urgent floating clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_floating_urgent
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the new floating clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_floating_new
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the normal maximized clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_maximized_normal
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the active maximized client.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_maximized_active
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the urgent maximized clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_maximized_urgent
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the new maximized clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_maximized_new
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the normal fullscreen clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_fullscreen_normal
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the active fullscreen client.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_fullscreen_active
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the urgent fullscreen clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_fullscreen_urgent
* @param[opt=1] number
* @see request::border
*/
/**
* The client opacity for the new fullscreen clients.
*
* A number between 0 and 1.
*
* @beautiful beautiful.opacity_fullscreen_new
* @param[opt=1] number
* @see request::border
*/
/**
* The marked clients border color.
* Note that only solid colors are supported.
* @beautiful beautiful.border_marked
* @param color
*/
/*
| 1 | 18,741 | The `border_color_`... properties don't support solid colors, not only this one. Maybe `@param solid_color` should be used instead of adding this note to every one of them? | awesomeWM-awesome | c |
@@ -0,0 +1,19 @@
+module.exports = {
+ roots: [
+ "<rootDir>/javascript/grid-ui/src"
+ ],
+ testMatch: [
+ "<rootDir>/javascript/grid-ui/src/tests/**/*.test.tsx"
+ ],
+ transform: {
+ "^.+\\.(ts|tsx)$": "ts-jest"
+ },
+ moduleFileExtensions: ["ts", "tsx", "js", "jsx", "json", "node"],
+ snapshotSerializers: ["enzyme-to-json/serializer"],
+ setupFilesAfterEnv: ["<rootDir>/setupTests.ts"],
+ testEnvironment: "jsdom",
+ moduleNameMapper: {
+ ".+\\.(svg|png|jpg|css)$": "identity-obj-proxy",
+ "selenium/javascript/grid-ui/(.*)": "<rootDir>/$1"
+ }
+} | 1 | 1 | 18,309 | We don't need this file, we can use the one that is in the grid-ui directory | SeleniumHQ-selenium | js |
|
@@ -48,6 +48,10 @@ public class BazelIgnoreParser {
try {
for (String path : FileOperationProvider.getInstance().readAllLines(bazelIgnoreFile)) {
+ if (!isEmptyLine(path)) {
+ continue;
+ }
+
if (path.endsWith("/")) {
// .bazelignore allows the "/" path suffix, but WorkspacePath doesn't.
path = path.substring(0, path.length() - 1); | 1 | /*
* Copyright 2019 The Bazel Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.idea.blaze.base.sync.projectview;
import com.google.common.collect.ImmutableList;
import com.google.idea.blaze.base.io.FileOperationProvider;
import com.google.idea.blaze.base.model.primitives.WorkspacePath;
import com.google.idea.blaze.base.model.primitives.WorkspaceRoot;
import com.intellij.openapi.diagnostic.Logger;
import java.io.File;
import java.io.IOException;
/** A parser for .bazelgnore files, which tells Bazel a list of paths to ignore. */
public class BazelIgnoreParser {
private static final Logger logger = Logger.getInstance(BazelIgnoreParser.class);
private final File bazelIgnoreFile;
public BazelIgnoreParser(WorkspaceRoot workspaceRoot) {
this.bazelIgnoreFile = workspaceRoot.fileForPath(new WorkspacePath(".bazelignore"));
}
/**
* Parse a .bazelignore file (if it exists) for workspace relative paths.
*
* @return a list of validated WorkspacePaths.
*/
public ImmutableList<WorkspacePath> getIgnoredPaths() {
if (!FileOperationProvider.getInstance().exists(bazelIgnoreFile)) {
return ImmutableList.of();
}
ImmutableList.Builder<WorkspacePath> ignoredPaths = ImmutableList.builder();
try {
for (String path : FileOperationProvider.getInstance().readAllLines(bazelIgnoreFile)) {
if (path.endsWith("/")) {
// .bazelignore allows the "/" path suffix, but WorkspacePath doesn't.
path = path.substring(0, path.length() - 1);
}
if (!WorkspacePath.isValid(path)) {
logger.warn(
String.format(
"Found %s in .bazelignore, but unable to parse as relative workspace path.",
path));
continue;
}
ignoredPaths.add(new WorkspacePath(path));
}
} catch (IOException e) {
logger.warn(String.format("Unable to read .bazelignore file even though it exists."));
}
return ignoredPaths.build();
}
}
| 1 | 5,478 | FYI: inlined this method in the internal review. | bazelbuild-intellij | java |
@@ -17,10 +17,8 @@ import org.junit.Test;
import org.junit.runner.RunWith;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Date;
import java.util.List;
-import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.feed.Feed; | 1 | package de.test.antennapod.service.download;
import android.content.Context;
import android.content.Intent;
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import androidx.core.util.Consumer;
import androidx.test.platform.app.InstrumentationRegistry;
import androidx.test.ext.junit.runners.AndroidJUnit4;
import de.test.antennapod.EspressoTestUtils;
import org.awaitility.Awaitility;
import org.awaitility.core.ConditionTimeoutException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.feed.Feed;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.FeedMedia;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.download.DownloadRequest;
import de.danoeh.antennapod.core.service.download.DownloadService;
import de.danoeh.antennapod.core.service.download.DownloadStatus;
import de.danoeh.antennapod.core.service.download.Downloader;
import de.danoeh.antennapod.core.service.download.DownloaderFactory;
import de.danoeh.antennapod.core.service.download.StubDownloader;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import static de.test.antennapod.util.event.DownloadEventListener.withDownloadEventListener;
import static de.test.antennapod.util.event.FeedItemEventListener.withFeedItemEventListener;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
/**
* @see HttpDownloaderTest for the test of actual download (and saving the file).
*/
@RunWith(AndroidJUnit4.class)
public class DownloadServiceTest {
private FeedMedia testMedia11 = null;
private DownloaderFactory origFactory = null;
@Before
public void setUp() throws Exception {
EspressoTestUtils.clearDatabase();
EspressoTestUtils.clearPreferences();
origFactory = DownloadService.getDownloaderFactory();
Feed testFeed = setUpTestFeeds();
testMedia11 = testFeed.getItemAtIndex(0).getMedia();
}
private Feed setUpTestFeeds() throws Exception {
// To avoid complication in case of test failures, leaving behind orphaned
// media files: add a timestamp so that each test run will have its own directory for media files.
Feed feed = new Feed("url", null, "Test Feed title 1 " + System.currentTimeMillis());
List<FeedItem> items = new ArrayList<>();
feed.setItems(items);
FeedItem item1 = new FeedItem(0, "Item 1-1", "Item 1-1", "url", new Date(), FeedItem.NEW, feed);
items.add(item1);
FeedMedia media1 = new FeedMedia(0, item1, 123, 1, 1, "audio/mp3", null, "http://example.com/episode.mp3", false, null, 0, 0);
item1.setMedia(media1);
DBWriter.setFeedItem(item1).get();
return feed;
}
@After
public void tearDown() throws Exception {
DownloadService.setDownloaderFactory(origFactory);
Context context = InstrumentationRegistry.getInstrumentation().getTargetContext();
DownloadRequester.getInstance().cancelAllDownloads(context);
context.stopService(new Intent(context, DownloadService.class));
EspressoTestUtils.tryKillDownloadService();
}
@Test
public void testEventsGeneratedCaseMediaDownloadSuccess_noEnqueue() throws Exception {
doTestEventsGeneratedCaseMediaDownloadSuccess(false, 1);
}
@Test
public void testEventsGeneratedCaseMediaDownloadSuccess_withEnqueue() throws Exception {
// enqueue itself generates additional FeedItem event
doTestEventsGeneratedCaseMediaDownloadSuccess(true, 2);
}
private void doTestEventsGeneratedCaseMediaDownloadSuccess(boolean enqueueDownloaded,
int numEventsExpected)
throws Exception {
// create a stub download that returns successful
//
// OPEN: Ideally, I'd like the download time long enough so that multiple in-progress DownloadEvents
// are generated (to simulate typical download), but it'll make download time quite long (1-2 seconds)
// to do so
DownloadService.setDownloaderFactory(new StubDownloaderFactory(50, DownloadStatus::setSuccessful));
UserPreferences.setEnqueueDownloadedEpisodes(enqueueDownloaded);
withFeedItemEventListener(feedItemEventListener -> {
try {
assertEquals(0, feedItemEventListener.getEvents().size());
assertFalse("The media in test should not yet been downloaded",
DBReader.getFeedMedia(testMedia11.getId()).isDownloaded());
DownloadRequester.getInstance().downloadMedia(false, InstrumentationRegistry
.getInstrumentation().getTargetContext(), true, testMedia11.getItem());
Awaitility.await()
.atMost(5000, TimeUnit.MILLISECONDS)
.until(() -> feedItemEventListener.getEvents().size() >= numEventsExpected);
assertTrue("After media download has completed, FeedMedia object in db should indicate so.",
DBReader.getFeedMedia(testMedia11.getId()).isDownloaded());
assertEquals("The FeedItem should have been " + (enqueueDownloaded ? "" : "not ") + "enqueued",
enqueueDownloaded,
DBReader.getQueueIDList().contains(testMedia11.getItem().getId()));
} catch (ConditionTimeoutException cte) {
fail("The expected FeedItemEvent (for media download complete) has not been posted. "
+ cte.getMessage());
}
});
}
@Test
public void testCancelDownload_UndoEnqueue_Normal() throws Exception {
doTestCancelDownload_UndoEnqueue(false);
}
@Test
public void testCancelDownload_UndoEnqueue_AlreadyInQueue() throws Exception {
doTestCancelDownload_UndoEnqueue(true);
}
private void doTestCancelDownload_UndoEnqueue(boolean itemAlreadyInQueue) throws Exception {
Context context = InstrumentationRegistry.getInstrumentation().getTargetContext();
// let download take longer to ensure the test can cancel the download in time
DownloadService.setDownloaderFactory(
new StubDownloaderFactory(30000, DownloadStatus::setSuccessful));
UserPreferences.setEnqueueDownloadedEpisodes(true);
UserPreferences.setEnableAutodownload(false);
final long item1Id = testMedia11.getItem().getId();
if (itemAlreadyInQueue) {
// simulate item already in queue condition
DBWriter.addQueueItem(context, false, item1Id).get();
assertTrue(DBReader.getQueueIDList().contains(item1Id));
} else {
assertFalse(DBReader.getQueueIDList().contains(item1Id));
}
withFeedItemEventListener(feedItemEventListener -> {
DownloadRequester.getInstance().downloadMedia(false, context, true, testMedia11.getItem());
withDownloadEventListener(downloadEventListener ->
Awaitility.await("download is actually running")
.atMost(5000, TimeUnit.MILLISECONDS)
.until(() -> downloadEventListener.getLatestEvent() != null
&& downloadEventListener.getLatestEvent().update.mediaIds.length > 0
&& downloadEventListener.getLatestEvent().update.mediaIds[0] == testMedia11.getId()));
if (itemAlreadyInQueue) {
assertEquals("download service receives the request - no event is expected before cancel is issued",
0, feedItemEventListener.getEvents().size());
} else {
Awaitility.await("item enqueue event")
.atMost(2000, TimeUnit.MILLISECONDS)
.until(() -> feedItemEventListener.getEvents().size() >= 1);
}
DownloadRequester.getInstance().cancelDownload(context, testMedia11);
final int totalNumEventsExpected = itemAlreadyInQueue ? 1 : 3;
Awaitility.await("item dequeue event + download termination event")
.atMost(2000, TimeUnit.MILLISECONDS)
.until(() -> feedItemEventListener.getEvents().size() >= totalNumEventsExpected);
assertFalse("The download should have been canceled",
DBReader.getFeedMedia(testMedia11.getId()).isDownloaded());
if (itemAlreadyInQueue) {
assertTrue("The FeedItem should still be in the queue after the download is cancelled."
+ " It's there before download.",
DBReader.getQueueIDList().contains(item1Id));
} else {
assertFalse("The FeedItem should not be in the queue after the download is cancelled.",
DBReader.getQueueIDList().contains(item1Id));
}
});
}
private static class StubDownloaderFactory implements DownloaderFactory {
private final long downloadTime;
@NonNull
private final Consumer<DownloadStatus> onDownloadComplete;
StubDownloaderFactory(long downloadTime, @NonNull Consumer<DownloadStatus> onDownloadComplete) {
this.downloadTime = downloadTime;
this.onDownloadComplete = onDownloadComplete;
}
@Nullable
@Override
public Downloader create(@NonNull DownloadRequest request) {
return new StubDownloader(request, downloadTime, onDownloadComplete);
}
}
}
| 1 | 17,830 | Werid, the checksum between this file and the one on branch `develop` is the same not sure why it's showing a diff | AntennaPod-AntennaPod | java |
@@ -47,7 +47,7 @@ namespace OpenTelemetry.Exporter
foreach (var metric in exporter.Metrics)
{
var builder = new PrometheusMetricBuilder()
- .WithName(metric.Name)
+ .WithName(metric.Meter.Name + metric.Name)
.WithDescription(metric.Description);
switch (metric.MetricType) | 1 | // <copyright file="PrometheusExporterExtensions.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System.Globalization;
using System.IO;
using System.Text;
using OpenTelemetry.Exporter.Prometheus.Implementation;
using OpenTelemetry.Metrics;
using static OpenTelemetry.Exporter.Prometheus.Implementation.PrometheusMetricBuilder;
namespace OpenTelemetry.Exporter
{
/// <summary>
/// Helper to write metrics collection from exporter in Prometheus format.
/// </summary>
public static class PrometheusExporterExtensions
{
private const string PrometheusCounterType = "counter";
private const string PrometheusGaugeType = "gauge";
private const string PrometheusHistogramType = "histogram";
private const string PrometheusHistogramSumPostFix = "_sum";
private const string PrometheusHistogramCountPostFix = "_count";
private const string PrometheusHistogramBucketPostFix = "_bucket";
private const string PrometheusHistogramBucketLabelPositiveInfinity = "+Inf";
private const string PrometheusHistogramBucketLabelLessThan = "le";
/// <summary>
/// Serialize to Prometheus Format.
/// </summary>
/// <param name="exporter">Prometheus Exporter.</param>
/// <param name="writer">StreamWriter to write to.</param>
public static void WriteMetricsCollection(this PrometheusExporter exporter, StreamWriter writer)
{
foreach (var metric in exporter.Metrics)
{
var builder = new PrometheusMetricBuilder()
.WithName(metric.Name)
.WithDescription(metric.Description);
switch (metric.MetricType)
{
case MetricType.LongSum:
{
builder = builder.WithType(PrometheusCounterType);
foreach (ref var metricPoint in metric.GetMetricPoints())
{
var metricValueBuilder = builder.AddValue();
metricValueBuilder = metricValueBuilder.WithValue(metricPoint.LongValue);
metricValueBuilder.AddLabels(metricPoint.Keys, metricPoint.Values);
}
builder.Write(writer);
break;
}
case MetricType.DoubleSum:
{
builder = builder.WithType(PrometheusCounterType);
foreach (ref var metricPoint in metric.GetMetricPoints())
{
var metricValueBuilder = builder.AddValue();
metricValueBuilder = metricValueBuilder.WithValue(metricPoint.DoubleValue);
metricValueBuilder.AddLabels(metricPoint.Keys, metricPoint.Values);
}
builder.Write(writer);
break;
}
case MetricType.LongGauge:
{
builder = builder.WithType(PrometheusGaugeType);
foreach (ref var metricPoint in metric.GetMetricPoints())
{
var metricValueBuilder = builder.AddValue();
metricValueBuilder = metricValueBuilder.WithValue(metricPoint.LongValue);
metricValueBuilder.AddLabels(metricPoint.Keys, metricPoint.Values);
}
builder.Write(writer);
break;
}
case MetricType.DoubleGauge:
{
builder = builder.WithType(PrometheusGaugeType);
foreach (ref var metricPoint in metric.GetMetricPoints())
{
var metricValueBuilder = builder.AddValue();
metricValueBuilder = metricValueBuilder.WithValue(metricPoint.DoubleValue);
metricValueBuilder.AddLabels(metricPoint.Keys, metricPoint.Values);
}
builder.Write(writer);
break;
}
case MetricType.Histogram:
{
/*
* For Histogram we emit one row for Sum, Count and as
* many rows as number of buckets.
* myHistogram_sum{tag1="value1",tag2="value2"} 258330 1629860660991
* myHistogram_count{tag1="value1",tag2="value2"} 355 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="0"} 0 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="5"} 2 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="10"} 4 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="25"} 6 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="50"} 12 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="75"} 19 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="100"} 26 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="250"} 65 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="500"} 128 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="1000"} 241 1629860660991
* myHistogram_bucket{tag1="value1",tag2="value2",le="+Inf"} 355 1629860660991
*/
builder = builder.WithType(PrometheusHistogramType);
foreach (ref var metricPoint in metric.GetMetricPoints())
{
var metricValueBuilderSum = builder.AddValue();
metricValueBuilderSum.WithName(metric.Name + PrometheusHistogramSumPostFix);
metricValueBuilderSum = metricValueBuilderSum.WithValue(metricPoint.DoubleValue);
metricValueBuilderSum.AddLabels(metricPoint.Keys, metricPoint.Values);
var metricValueBuilderCount = builder.AddValue();
metricValueBuilderCount.WithName(metric.Name + PrometheusHistogramCountPostFix);
metricValueBuilderCount = metricValueBuilderCount.WithValue(metricPoint.LongValue);
metricValueBuilderCount.AddLabels(metricPoint.Keys, metricPoint.Values);
long totalCount = 0;
for (int i = 0; i < metricPoint.ExplicitBounds.Length + 1; i++)
{
totalCount += metricPoint.BucketCounts[i];
var metricValueBuilderBuckets = builder.AddValue();
metricValueBuilderBuckets.WithName(metric.Name + PrometheusHistogramBucketPostFix);
metricValueBuilderBuckets = metricValueBuilderBuckets.WithValue(totalCount);
metricValueBuilderBuckets.AddLabels(metricPoint.Keys, metricPoint.Values);
var bucketName = i == metricPoint.ExplicitBounds.Length ?
PrometheusHistogramBucketLabelPositiveInfinity : metricPoint.ExplicitBounds[i].ToString(CultureInfo.InvariantCulture);
metricValueBuilderBuckets.WithLabel(PrometheusHistogramBucketLabelLessThan, bucketName);
}
}
builder.Write(writer);
break;
}
}
}
}
/// <summary>
/// Get Metrics Collection as a string.
/// </summary>
/// <param name="exporter"> Prometheus Exporter. </param>
/// <returns>Metrics serialized to string in Prometheus format.</returns>
public static string GetMetricsCollection(this PrometheusExporter exporter)
{
using var stream = new MemoryStream();
using var writer = new StreamWriter(stream);
WriteMetricsCollection(exporter, writer);
writer.Flush();
return Encoding.UTF8.GetString(stream.ToArray(), 0, (int)stream.Length);
}
private static void AddLabels(this PrometheusMetricValueBuilder valueBuilder, string[] keys, object[] values)
{
if (keys != null)
{
for (int i = 0; i < keys.Length; i++)
{
valueBuilder.WithLabel(keys[i], values[i].ToString());
}
}
}
}
}
| 1 | 21,561 | Prometheus doesn't have concept of Meter (like OTLP does).. Trying to see if this is a good approach to use the meter name as namespace, to avoid name collisions, when same instrument name is used across multiple instruments, from different Meter. | open-telemetry-opentelemetry-dotnet | .cs |
@@ -78,10 +78,17 @@ class LibraryCardsController extends AbstractBase
// Connect to the ILS for login drivers:
$catalog = $this->getILS();
+ $config = $this->getConfig();
+ $allowConnectingCards = !empty(
+ $config->Catalog
+ ->auth_based_library_cards
+ ) &&
+ ($this->getAuthManager()->getAuthMethod() == 'Shibboleth');
return $this->createViewModel(
[
'libraryCards' => $user->getLibraryCards(),
- 'multipleTargets' => $catalog->checkCapability('getLoginDrivers')
+ 'multipleTargets' => $catalog->checkCapability('getLoginDrivers'),
+ 'allowConnectingCards' => $allowConnectingCards,
]
);
} | 1 | <?php
/**
* LibraryCards Controller
*
* PHP version 7
*
* Copyright (C) Villanova University 2010.
* Copyright (C) The National Library of Finland 2015-2019.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Controller
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
namespace VuFind\Controller;
use VuFind\Exception\ILS as ILSException;
/**
* Controller for the library card functionality.
*
* @category VuFind
* @package Controller
* @author Demian Katz <[email protected]>
* @author Ere Maijala <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Site
*/
class LibraryCardsController extends AbstractBase
{
/**
* Send user's library cards to the view
*
* @return mixed
*/
public function homeAction()
{
if (!($user = $this->getUser())) {
return $this->forceLogin();
}
// Check for "delete card" request; parameter may be in GET or POST depending
// on calling context.
$deleteId = $this->params()->fromPost(
'delete', $this->params()->fromQuery('delete')
);
if ($deleteId) {
// If the user already confirmed the operation, perform the delete now;
// otherwise prompt for confirmation:
$confirm = $this->params()->fromPost(
'confirm', $this->params()->fromQuery('confirm')
);
if ($confirm) {
$success = $this->performDeleteLibraryCard($deleteId);
if ($success !== true) {
return $success;
}
} else {
return $this->confirmDeleteLibraryCard($deleteId);
}
}
// Connect to the ILS for login drivers:
$catalog = $this->getILS();
return $this->createViewModel(
[
'libraryCards' => $user->getLibraryCards(),
'multipleTargets' => $catalog->checkCapability('getLoginDrivers')
]
);
}
/**
* Send user's library card to the edit view
*
* @return mixed
*/
public function editCardAction()
{
// User must be logged in to edit library cards:
$user = $this->getUser();
if ($user == false) {
return $this->forceLogin();
}
// Process email authentication:
if ($this->params()->fromQuery('auth_method') === 'Email'
&& ($hash = $this->params()->fromQuery('hash'))
) {
return $this->processEmailLink($user, $hash);
}
// Process form submission:
if ($this->formWasSubmitted('submit')) {
if ($redirect = $this->processEditLibraryCard($user)) {
return $redirect;
}
}
$id = $this->params()->fromRoute('id', $this->params()->fromQuery('id'));
$card = $user->getLibraryCard($id == 'NEW' ? null : $id);
$target = null;
$username = $card->cat_username;
$loginSettings = $this->getILSLoginSettings();
// Split target and username if multiple login targets are available:
if ($loginSettings['targets'] && strstr($username, '.')) {
list($target, $username) = explode('.', $username, 2);
}
$cardName = $this->params()->fromPost('card_name', $card->card_name);
$username = $this->params()->fromPost('username', $username);
$target = $this->params()->fromPost('target', $target);
// Send the card to the view:
return $this->createViewModel(
[
'card' => $card,
'cardName' => $cardName,
'target' => $target ?: $loginSettings['defaultTarget'],
'username' => $username,
'targets' => $loginSettings['targets'],
'defaultTarget' => $loginSettings['defaultTarget'],
'loginMethod' => $loginSettings['loginMethod'],
'loginMethods' => $loginSettings['loginMethods'],
]
);
}
/**
* Creates a confirmation box to delete or not delete the current list
*
* @return mixed
*/
public function deleteCardAction()
{
// User must be logged in to edit library cards:
$user = $this->getUser();
if ($user == false) {
return $this->forceLogin();
}
// Get requested library card ID:
$cardID = $this->params()
->fromPost('cardID', $this->params()->fromQuery('cardID'));
// Have we confirmed this?
$confirm = $this->params()->fromPost(
'confirm', $this->params()->fromQuery('confirm')
);
if ($confirm) {
$user->deleteLibraryCard($cardID);
// Success Message
$this->flashMessenger()->addMessage('Library Card Deleted', 'success');
// Redirect to MyResearch library cards
return $this->redirect()->toRoute('librarycards-home');
}
// If we got this far, we must display a confirmation message:
return $this->confirm(
'confirm_delete_library_card_brief',
$this->url()->fromRoute('librarycards-deletecard'),
$this->url()->fromRoute('librarycards-home'),
'confirm_delete_library_card_text', ['cardID' => $cardID]
);
}
/**
* When redirecting after selecting a library card, adjust the URL to make
* sure it will work correctly.
*
* @param string $url URL to adjust
*
* @return string
*/
protected function adjustCardRedirectUrl($url)
{
// If there is pagination in the URL, reset it to page 1, since the
// new card may have a different number of pages of data:
return preg_replace('/([&?]page)=[0-9]+/', '$1=1', $url);
}
/**
* Activates a library card
*
* @return \Laminas\Http\Response
*/
public function selectCardAction()
{
$user = $this->getUser();
if ($user == false) {
return $this->forceLogin();
}
$cardID = $this->params()->fromQuery('cardID');
if (null === $cardID) {
return $this->redirect()->toRoute('myresearch-home');
}
$user->activateLibraryCard($cardID);
// Connect to the ILS and check that the credentials are correct:
try {
$catalog = $this->getILS();
$patron = $catalog->patronLogin(
$user->cat_username,
$user->getCatPassword()
);
if (!$patron) {
$this->flashMessenger()
->addMessage('authentication_error_invalid', 'error');
}
} catch (ILSException $e) {
$this->flashMessenger()
->addMessage('authentication_error_technical', 'error');
}
$this->setFollowupUrlToReferer();
if ($url = $this->getFollowupUrl()) {
$this->clearFollowupUrl();
return $this->redirect()->toUrl($this->adjustCardRedirectUrl($url));
}
return $this->redirect()->toRoute('myresearch-home');
}
/**
* Process the "edit library card" submission.
*
* @param \VuFind\Db\Row\User $user Logged in user
*
* @return object|bool Response object if redirect is
* needed, false if form needs to be redisplayed.
*/
protected function processEditLibraryCard($user)
{
$cardName = $this->params()->fromPost('card_name', '');
$target = $this->params()->fromPost('target', '');
$username = $this->params()->fromPost('username', '');
$password = $this->params()->fromPost('password', '');
$id = $this->params()->fromRoute('id', $this->params()->fromQuery('id'));
if (!$username) {
$this->flashMessenger()
->addMessage('authentication_error_blank', 'error');
return false;
}
if ($target) {
$username = "$target.$username";
}
// Check the credentials if the username is changed or a new password is
// entered:
$card = $user->getLibraryCard($id == 'NEW' ? null : $id);
if ($card->cat_username !== $username || trim($password)) {
// Connect to the ILS and check that the credentials are correct:
$loginMethod = $this->getILSLoginMethod($target);
$catalog = $this->getILS();
try {
$patron = $catalog->patronLogin($username, $password);
} catch (ILSException $e) {
$this->flashMessenger()->addErrorMessage('ils_connection_failed');
return false;
}
if ('password' === $loginMethod && !$patron) {
$this->flashMessenger()
->addMessage('authentication_error_invalid', 'error');
return false;
}
if ('email' === $loginMethod) {
if ($patron) {
$info = $patron;
$info['cardID'] = $id;
$info['cardName'] = $cardName;
$emailAuthenticator = $this->serviceLocator
->get(\VuFind\Auth\EmailAuthenticator::class);
$emailAuthenticator->sendAuthenticationLink(
$info['email'],
$info,
['auth_method' => 'Email'],
'editLibraryCard'
);
}
// Don't reveal the result
$this->flashMessenger()->addSuccessMessage('email_login_link_sent');
return $this->redirect()->toRoute('librarycards-home');
}
}
try {
$user->saveLibraryCard(
$id == 'NEW' ? null : $id, $cardName, $username, $password
);
} catch (\VuFind\Exception\LibraryCard $e) {
$this->flashMessenger()->addMessage($e->getMessage(), 'error');
return false;
}
return $this->redirect()->toRoute('librarycards-home');
}
/**
* Process library card addition via an email link
*
* @param User $user User object
* @param string $hash Hash
*
* @return \Laminas\Http\Response Response object
*/
protected function processEmailLink($user, $hash)
{
$emailAuthenticator = $this->serviceLocator
->get(\VuFind\Auth\EmailAuthenticator::class);
try {
$info = $emailAuthenticator->authenticate($hash);
$user->saveLibraryCard(
'NEW' === $info['cardID'] ? null : $info['cardID'],
$info['cardName'],
$info['cat_username'],
' '
);
} catch (\VuFind\Exception\Auth $e) {
$this->flashMessenger()->addErrorMessage($e->getMessage());
} catch (\VuFind\Exception\LibraryCard $e) {
$this->flashMessenger()->addErrorMessage($e->getMessage());
}
return $this->redirect()->toRoute('librarycards-home');
}
}
| 1 | 30,733 | The formatting of this is a bit strange; I'd suggest collapsing this back to a single line, and moving the `&&` to the beginning of the second line to meet the line length restriction. | vufind-org-vufind | php |
@@ -1791,7 +1791,7 @@ create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point,
copy->segments[i].end = os_segments[i].end;
copy->segments[i].prot = os_segments[i].prot;
}
- } else
+ } else if (segments != NULL)
memcpy(copy->segments, segments, num_segments*sizeof(module_segment_data_t));
copy->timestamp = timestamp;
# ifdef MACOS | 1 | /* ******************************************************************************
* Copyright (c) 2010-2017 Google, Inc. All rights reserved.
* Copyright (c) 2010-2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2002-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2002-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2002 Hewlett-Packard Company */
/*
* instrument.c - interface for instrumentation
*/
#include "../globals.h" /* just to disable warning C4206 about an empty file */
#include "instrument.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "disassemble.h"
#include "../fragment.h"
#include "../emit.h"
#include "../link.h"
#include "../monitor.h" /* for mark_trace_head */
#include <string.h> /* for strstr */
#include <stdarg.h> /* for varargs */
#include "../nudge.h" /* for nudge_internal() */
#include "../synch.h"
#include "../annotations.h"
#include "../translate.h"
#ifdef UNIX
# include <sys/time.h> /* ITIMER_* */
# include "../unix/module.h" /* redirect_* functions */
#endif
#ifdef CLIENT_INTERFACE
/* in utils.c, not exported to everyone */
extern ssize_t do_file_write(file_t f, const char *fmt, va_list ap);
#ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* PR 200065: User passes us the shared library, we look up "dr_init"
* or "dr_client_main" and call it. From there, the client can register which events it
* wishes to receive.
*/
#define INSTRUMENT_INIT_NAME_LEGACY "dr_init"
#define INSTRUMENT_INIT_NAME "dr_client_main"
/* PR 250952: version check
* If changing this, don't forget to update:
* - lib/dr_defines.h _USES_DR_VERSION_
* - api/docs/footer.html
*/
#define USES_DR_VERSION_NAME "_USES_DR_VERSION_"
/* Should we expose this for use in samples/tracedump.c?
* Also, if we change this, need to change the symlink generation
* in core/CMakeLists.txt: at that point should share single define.
*/
/* OLDEST_COMPATIBLE_VERSION now comes from configure.h */
/* The 3rd version number, the bugfix/patch number, should not affect
* compatibility, so our version check number simply uses:
* major*100 + minor
* Which gives us room for 100 minor versions per major.
*/
#define NEWEST_COMPATIBLE_VERSION CURRENT_API_VERSION
/* Store the unique not-part-of-version build number (the version
* BUILD_NUMBER is limited to 64K and is not guaranteed to be unique)
* somewhere accessible at a customer site. We could alternatively
* pull it out of our DYNAMORIO_DEFINES string.
*/
DR_API const char *unique_build_number = STRINGIFY(UNIQUE_BUILD_NUMBER);
/* Acquire when registering or unregistering event callbacks
* Also held when invoking events, which happens much more often
* than registration changes, so we use rwlock
*/
DECLARE_CXTSWPROT_VAR(static read_write_lock_t callback_registration_lock,
INIT_READWRITE_LOCK(callback_registration_lock));
/* Structures for maintaining lists of event callbacks */
typedef void (*callback_t)(void);
typedef struct _callback_list_t {
callback_t *callbacks; /* array of callback functions */
size_t num; /* number of callbacks registered */
size_t size; /* allocated space (may be larger than num) */
} callback_list_t;
/* This is a little convoluted. The following is a macro to iterate
* over a list of callbacks and call each function. We use a macro
* instead of a function so we can pass the function type and perform
* a typecast. We need to copy the callback list before iterating to
* support the possibility of one callback unregistering another and
* messing up the list while we're iterating. We'll optimize the case
* for 5 or fewer registered callbacks and stack-allocate the temp
* list. Otherwise, we'll heap-allocate the temp.
*
* We allow the args to use the var "idx" to access the client index.
*
* We consider the first registered callback to have the highest
* priority and call it last. If we gave the last registered callback
* the highest priority, a client could re-register a routine to
* increase its priority. That seems a little weird.
*/
/*
*/
#define FAST_COPY_SIZE 5
#define call_all_ret(ret, retop, postop, vec, type, ...) \
do { \
size_t idx, num; \
/* we will be called even if no callbacks (i.e., (vec).num == 0) */ \
/* we guarantee we're in DR state at all callbacks and clean calls */ \
/* XXX: add CLIENT_ASSERT here */ \
read_lock(&callback_registration_lock); \
num = (vec).num; \
if (num == 0) { \
read_unlock(&callback_registration_lock); \
} \
else if (num <= FAST_COPY_SIZE) { \
callback_t tmp[FAST_COPY_SIZE]; \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
} \
else { \
callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, \
num, ACCT_OTHER, UNPROTECTED); \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tmp, callback_t, num, \
ACCT_OTHER, UNPROTECTED); \
} \
} while (0)
/* It's less error-prone if we just have one call_all macro. We'll
* reuse call_all_ret above for callbacks that don't have a return
* value by assigning to a dummy var. Note that this means we'll
* have to pass an int-returning type to call_all()
*/
#define call_all(vec, type, ...) \
do { \
int dummy; \
call_all_ret(dummy, =, , vec, type, __VA_ARGS__); \
} while (0)
/* Lists of callbacks for each event type. Note that init and nudge
* callback lists are kept in the client_lib_t data structure below.
* We could store all lists on a per-client basis, but we can iterate
* over these lists slightly more efficiently if we store all
* callbacks for a specific event in a single list.
*/
static callback_list_t exit_callbacks = {0,};
static callback_list_t thread_init_callbacks = {0,};
static callback_list_t thread_exit_callbacks = {0,};
#ifdef UNIX
static callback_list_t fork_init_callbacks = {0,};
#endif
static callback_list_t bb_callbacks = {0,};
static callback_list_t trace_callbacks = {0,};
#ifdef CUSTOM_TRACES
static callback_list_t end_trace_callbacks = {0,};
#endif
static callback_list_t fragdel_callbacks = {0,};
static callback_list_t restore_state_callbacks = {0,};
static callback_list_t restore_state_ex_callbacks = {0,};
static callback_list_t module_load_callbacks = {0,};
static callback_list_t module_unload_callbacks = {0,};
static callback_list_t filter_syscall_callbacks = {0,};
static callback_list_t pre_syscall_callbacks = {0,};
static callback_list_t post_syscall_callbacks = {0,};
#ifdef WINDOWS
static callback_list_t exception_callbacks = {0,};
#else
static callback_list_t signal_callbacks = {0,};
#endif
#ifdef PROGRAM_SHEPHERDING
static callback_list_t security_violation_callbacks = {0,};
#endif
static callback_list_t persist_ro_size_callbacks = {0,};
static callback_list_t persist_ro_callbacks = {0,};
static callback_list_t resurrect_ro_callbacks = {0,};
static callback_list_t persist_rx_size_callbacks = {0,};
static callback_list_t persist_rx_callbacks = {0,};
static callback_list_t resurrect_rx_callbacks = {0,};
static callback_list_t persist_rw_size_callbacks = {0,};
static callback_list_t persist_rw_callbacks = {0,};
static callback_list_t resurrect_rw_callbacks = {0,};
static callback_list_t persist_patch_callbacks = {0,};
/* An array of client libraries. We use a static array instead of a
* heap-allocated list so we can load the client libs before
* initializing DR's heap.
*/
typedef struct _client_lib_t {
client_id_t id;
char path[MAXIMUM_PATH];
/* PR 366195: dlopen() handle truly is opaque: != start */
shlib_handle_t lib;
app_pc start;
app_pc end;
/* The raw option string, which after i#1736 contains token-delimiting quotes */
char options[MAX_OPTION_LENGTH];
/* The option string with token-delimiting quotes removed for backward compat */
char legacy_options[MAX_OPTION_LENGTH];
/* The parsed options: */
int argc;
const char **argv;
/* We need to associate nudge events with a specific client so we
* store that list here in the client_lib_t instead of using a
* single global list.
*/
callback_list_t nudge_callbacks;
} client_lib_t;
/* these should only be modified prior to instrument_init(), since no
* readers of the client_libs array (event handlers, etc.) use synch
*/
static client_lib_t client_libs[MAX_CLIENT_LIBS] = {{0,}};
static size_t num_client_libs = 0;
static void *persist_user_data[MAX_CLIENT_LIBS];
#ifdef WINDOWS
/* private kernel32 lib, used to print to console */
static bool print_to_console;
static shlib_handle_t priv_kernel32;
typedef BOOL (WINAPI *kernel32_WriteFile_t)
(HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED);
static kernel32_WriteFile_t kernel32_WriteFile;
static ssize_t dr_write_to_console_varg(bool to_stdout, const char *fmt, ...);
#endif
bool client_requested_exit;
#ifdef WINDOWS
/* used for nudge support */
static bool block_client_nudge_threads = false;
DECLARE_CXTSWPROT_VAR(static int num_client_nudge_threads, 0);
#endif
#ifdef CLIENT_SIDELINE
/* # of sideline threads */
DECLARE_CXTSWPROT_VAR(static int num_client_sideline_threads, 0);
#endif
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
/* protects block_client_nudge_threads and incrementing num_client_nudge_threads */
DECLARE_CXTSWPROT_VAR(static mutex_t client_thread_count_lock,
INIT_LOCK_FREE(client_thread_count_lock));
#endif
static vm_area_vector_t *client_aux_libs;
#ifdef WINDOWS
DECLARE_CXTSWPROT_VAR(static mutex_t client_aux_lib64_lock,
INIT_LOCK_FREE(client_aux_lib64_lock));
#endif
/****************************************************************************/
/* INTERNAL ROUTINES */
static bool
char_is_quote(char c)
{
return c == '"' || c == '\'' || c == '`';
}
static void
parse_option_array(client_id_t client_id, const char *opstr,
int *argc OUT, const char ***argv OUT,
size_t max_token_size)
{
const char **a;
int cnt;
const char *s;
char *token = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, max_token_size,
ACCT_CLIENT, UNPROTECTED);
for (cnt = 0, s = dr_get_token(opstr, token, max_token_size);
s != NULL;
s = dr_get_token(s, token, max_token_size)) {
cnt++;
}
cnt++; /* add 1 so 0 can be "app" */
a = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, const char *, cnt, ACCT_CLIENT, UNPROTECTED);
cnt = 0;
a[cnt] = dr_strdup(dr_get_client_path(client_id) HEAPACCT(ACCT_CLIENT));
cnt++;
for (s = dr_get_token(opstr, token, max_token_size);
s != NULL;
s = dr_get_token(s, token, max_token_size)) {
a[cnt] = dr_strdup(token HEAPACCT(ACCT_CLIENT));
cnt++;
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, token, char, max_token_size,
ACCT_CLIENT, UNPROTECTED);
*argc = cnt;
*argv = a;
}
#ifdef DEBUG
static bool
free_option_array(int argc, const char **argv)
{
int i;
for (i = 0; i < argc; i++) {
dr_strfree(argv[i] HEAPACCT(ACCT_CLIENT));
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, argv, char *, argc, ACCT_CLIENT, UNPROTECTED);
return true;
}
#endif
static void
add_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
if (func == NULL) {
CLIENT_ASSERT(false, "trying to register a NULL callback");
return;
}
if (standalone_library) {
CLIENT_ASSERT(false, "events not supported in standalone library mode");
return;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
/* We may already have an open slot since we allocate in twos and
* because we don't bother to free the storage when we remove the
* callback. Check and only allocate if necessary.
*/
if (vec->num == vec->size) {
callback_t *tmp = HEAP_ARRAY_ALLOC
(GLOBAL_DCONTEXT, callback_t, vec->size + 2, /* Let's allocate 2 */
ACCT_OTHER, UNPROTECTED);
if (tmp == NULL) {
CLIENT_ASSERT(false, "out of memory: can't register callback");
write_unlock(&callback_registration_lock);
return;
}
if (vec->callbacks != NULL) {
memcpy(tmp, vec->callbacks, vec->num * sizeof(callback_t));
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
}
vec->callbacks = tmp;
vec->size += 2;
}
vec->callbacks[vec->num] = func;
vec->num++;
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
}
static bool
remove_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
size_t i;
bool found = false;
if (func == NULL) {
CLIENT_ASSERT(false, "trying to unregister a NULL callback");
return false;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
for (i=0; i<vec->num; i++) {
if (vec->callbacks[i] == func) {
size_t j;
/* shift down the entries on the tail */
for (j=i; j<vec->num-1; j++) {
vec->callbacks[j] = vec->callbacks[j+1];
}
vec->num -= 1;
found = true;
break;
}
}
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
return found;
}
/* This should only be called prior to instrument_init(),
* since no readers of the client_libs array use synch
* and since this routine assumes .data is writable.
*/
static void
add_client_lib(const char *path, const char *id_str, const char *options)
{
client_id_t id;
shlib_handle_t client_lib;
DEBUG_DECLARE(size_t i);
ASSERT(!dynamo_initialized);
/* if ID not specified, we'll default to 0 */
id = (id_str == NULL) ? 0 : strtoul(id_str, NULL, 16);
#ifdef DEBUG
/* Check for conflicting IDs */
for (i=0; i<num_client_libs; i++) {
CLIENT_ASSERT(client_libs[i].id != id, "Clients have the same ID");
}
#endif
if (num_client_libs == MAX_CLIENT_LIBS) {
CLIENT_ASSERT(false, "Max number of clients reached");
return;
}
LOG(GLOBAL, LOG_INTERP, 4, "about to load client library %s\n", path);
client_lib = load_shared_library(path, true/*reachable*/);
if (client_lib == NULL) {
char msg[MAXIMUM_PATH*4];
char err[MAXIMUM_PATH*2];
shared_library_error(err, BUFFER_SIZE_ELEMENTS(err));
snprintf(msg, BUFFER_SIZE_ELEMENTS(msg),
".\n\tError opening instrumentation library %s:\n\t%s",
path, err);
NULL_TERMINATE_BUFFER(msg);
/* PR 232490 - malformed library names or incorrect
* permissions shouldn't blow up an app in release builds as
* they may happen at customer sites with a third party
* client.
*/
#ifdef UNIX
/* PR 408318: 32-vs-64 errors should NOT be fatal to continue
* in debug build across execve chains. Xref i#147.
* XXX: w/ -private_loader, err always equals "error in private loader"
* and so we never match here!
*/
if (strstr(err, "wrong ELF class") == NULL)
#endif
CLIENT_ASSERT(false, msg);
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4,
get_application_name(), get_application_pid(), path, msg);
}
else {
/* PR 250952: version check */
int *uses_dr_version = (int *)
lookup_library_routine(client_lib, USES_DR_VERSION_NAME);
if (uses_dr_version == NULL ||
*uses_dr_version < OLDEST_COMPATIBLE_VERSION ||
*uses_dr_version > NEWEST_COMPATIBLE_VERSION) {
/* not a fatal usage error since we want release build to continue */
CLIENT_ASSERT(false,
"client library is incompatible with this version of DR");
SYSLOG(SYSLOG_ERROR, CLIENT_VERSION_INCOMPATIBLE, 2,
get_application_name(), get_application_pid());
}
else {
size_t idx = num_client_libs++;
DEBUG_DECLARE(bool ok;)
client_libs[idx].id = id;
client_libs[idx].lib = client_lib;
DEBUG_DECLARE(ok =)
shared_library_bounds(client_lib, (byte *) uses_dr_version, NULL,
&client_libs[idx].start, &client_libs[idx].end);
ASSERT(ok);
LOG(GLOBAL, LOG_INTERP, 1, "loaded %s at "PFX"-"PFX"\n",
path, client_libs[idx].start, client_libs[idx].end);
#ifdef X64
/* Now that we map the client within the constraints, this request
* should always succeed.
*/
request_region_be_heap_reachable(client_libs[idx].start,
client_libs[idx].end -
client_libs[idx].start);
#endif
strncpy(client_libs[idx].path, path,
BUFFER_SIZE_ELEMENTS(client_libs[idx].path));
NULL_TERMINATE_BUFFER(client_libs[idx].path);
if (options != NULL) {
strncpy(client_libs[idx].options, options,
BUFFER_SIZE_ELEMENTS(client_libs[idx].options));
NULL_TERMINATE_BUFFER(client_libs[idx].options);
}
/* We'll look up dr_client_main and call it in instrument_init */
}
}
}
void
instrument_load_client_libs(void)
{
if (CLIENTS_EXIST()) {
char buf[MAX_LIST_OPTION_LENGTH];
char *path;
string_option_read_lock();
strncpy(buf, INTERNAL_OPTION(client_lib), BUFFER_SIZE_ELEMENTS(buf));
string_option_read_unlock();
NULL_TERMINATE_BUFFER(buf);
/* We're expecting path;ID;options triples */
path = buf;
do {
char *id = NULL;
char *options = NULL;
char *next_path = NULL;
id = strstr(path, ";");
if (id != NULL) {
id[0] = '\0';
id++;
options = strstr(id, ";");
if (options != NULL) {
options[0] = '\0';
options++;
next_path = strstr(options, ";");
if (next_path != NULL) {
next_path[0] = '\0';
next_path++;
}
}
}
#ifdef STATIC_LIBRARY
/* We ignore client library paths and allow client code anywhere in the app.
* We have a check in load_shared_library() to avoid loading
* a 2nd copy of the app.
* We do support passing client ID and options via the first -client_lib.
*/
add_client_lib(get_application_name(), id == NULL ? "0" : id,
options == NULL ? "" : options);
break;
#endif
add_client_lib(path, id, options);
path = next_path;
} while (path != NULL);
}
}
static void
init_client_aux_libs(void)
{
if (client_aux_libs == NULL) {
VMVECTOR_ALLOC_VECTOR(client_aux_libs, GLOBAL_DCONTEXT,
VECTOR_SHARED, client_aux_libs);
}
}
void
instrument_init(void)
{
size_t i;
init_client_aux_libs();
if (num_client_libs > 0) {
/* We no longer distinguish in-DR vs in-client crashes, as many crashes in
* the DR lib are really client bugs.
* We expect most end-user tools to call dr_set_client_name() so we
* have generic defaults here:
*/
set_exception_strings("Tool", "your tool's issue tracker");
}
/* Iterate over the client libs and call each init routine */
for (i=0; i<num_client_libs; i++) {
void (*init)(client_id_t, int, const char **) =
(void (*)(client_id_t, int, const char **))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME));
void (*legacy)(client_id_t) = (void (*)(client_id_t))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME_LEGACY));
/* we can't do this in instrument_load_client_libs() b/c vmheap
* is not set up at that point
*/
all_memory_areas_lock();
update_all_memory_areas(client_libs[i].start, client_libs[i].end,
/* FIXME: need to walk the sections: but may be
* better to obfuscate from clients anyway.
* We can't set as MEMPROT_NONE as that leads to
* bugs if the app wants to interpret part of
* its code section (xref PR 504629).
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
/* i#1736: parse the options up front */
parse_option_array(client_libs[i].id, client_libs[i].options,
&client_libs[i].argc, &client_libs[i].argv,
MAX_OPTION_LENGTH);
#ifdef STATIC_LIBRARY
/* We support the app having client code anywhere, so there does not
* have to be an init routine that we call. This means the app
* may have to iterate modules on its own.
*/
#else
/* Since the user has to register all other events, it
* doesn't make sense to provide the -client_lib
* option for a module that doesn't export an init routine.
*/
CLIENT_ASSERT(init != NULL || legacy != NULL,
"client does not export a dr_client_main or dr_init routine");
#endif
if (init != NULL)
(*init)(client_libs[i].id, client_libs[i].argc, client_libs[i].argv);
else if (legacy != NULL)
(*legacy)(client_libs[i].id);
}
/* We now initialize the 1st thread before coming here, so we can
* hand the client a dcontext; so we need to specially generate
* the thread init event now. An alternative is to have
* dr_get_global_drcontext(), but that's extra complexity for no
* real reason.
* We raise the thread init event prior to the module load events
* so the client can access a dcontext in module load events (i#1339).
*/
if (thread_init_callbacks.num > 0) {
instrument_thread_init(get_thread_private_dcontext(), false, false);
}
/* If the client just registered the module-load event, let's
* assume it wants to be informed of *all* modules and tell it
* which modules are already loaded. If the client registers the
* event later, it will need to use the module iterator routines
* to retrieve currently loaded modules. We use the dr_module_iterator
* exposed to the client to avoid locking issues.
*/
if (module_load_callbacks.num > 0) {
dr_module_iterator_t *mi = dr_module_iterator_start();
while (dr_module_iterator_hasnext(mi)) {
module_data_t *data = dr_module_iterator_next(mi);
instrument_module_load(data, true /*already loaded*/);
/* XXX; more efficient to set this flag during dr_module_iterator_start */
os_module_set_flag(data->start, MODULE_LOAD_EVENT);
dr_free_module_data(data);
}
dr_module_iterator_stop(mi);
}
}
#ifdef DEBUG
void
free_callback_list(callback_list_t *vec)
{
if (vec->callbacks != NULL) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
vec->callbacks = NULL;
}
vec->size = 0;
vec->num = 0;
}
void free_all_callback_lists()
{
free_callback_list(&exit_callbacks);
free_callback_list(&thread_init_callbacks);
free_callback_list(&thread_exit_callbacks);
#ifdef UNIX
free_callback_list(&fork_init_callbacks);
#endif
free_callback_list(&bb_callbacks);
free_callback_list(&trace_callbacks);
#ifdef CUSTOM_TRACES
free_callback_list(&end_trace_callbacks);
#endif
free_callback_list(&fragdel_callbacks);
free_callback_list(&restore_state_callbacks);
free_callback_list(&restore_state_ex_callbacks);
free_callback_list(&module_load_callbacks);
free_callback_list(&module_unload_callbacks);
free_callback_list(&filter_syscall_callbacks);
free_callback_list(&pre_syscall_callbacks);
free_callback_list(&post_syscall_callbacks);
#ifdef WINDOWS
free_callback_list(&exception_callbacks);
#else
free_callback_list(&signal_callbacks);
#endif
#ifdef PROGRAM_SHEPHERDING
free_callback_list(&security_violation_callbacks);
#endif
free_callback_list(&persist_ro_size_callbacks);
free_callback_list(&persist_ro_callbacks);
free_callback_list(&resurrect_ro_callbacks);
free_callback_list(&persist_rx_size_callbacks);
free_callback_list(&persist_rx_callbacks);
free_callback_list(&resurrect_rx_callbacks);
free_callback_list(&persist_rw_size_callbacks);
free_callback_list(&persist_rw_callbacks);
free_callback_list(&resurrect_rw_callbacks);
free_callback_list(&persist_patch_callbacks);
}
#endif /* DEBUG */
void
instrument_exit(void)
{
DEBUG_DECLARE(size_t i);
/* Note - currently own initexit lock when this is called (see PR 227619). */
/* support dr_get_mcontext() from the exit event */
if (!standalone_library)
get_thread_private_dcontext()->client_data->mcontext_in_dcontext = true;
call_all(exit_callbacks, int (*)(),
/* It seems the compiler is confused if we pass no var args
* to the call_all macro. Bogus NULL arg */
NULL);
#ifdef DEBUG
/* Unload all client libs and free any allocated storage */
for (i=0; i<num_client_libs; i++) {
free_callback_list(&client_libs[i].nudge_callbacks);
unload_shared_library(client_libs[i].lib);
if (client_libs[i].argv != NULL)
free_option_array(client_libs[i].argc, client_libs[i].argv);
}
free_all_callback_lists();
#endif
vmvector_delete_vector(GLOBAL_DCONTEXT, client_aux_libs);
client_aux_libs = NULL;
num_client_libs = 0;
#ifdef WINDOWS
DELETE_LOCK(client_aux_lib64_lock);
#endif
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
DELETE_LOCK(client_thread_count_lock);
#endif
DELETE_READWRITE_LOCK(callback_registration_lock);
}
bool
is_in_client_lib(app_pc addr)
{
/* NOTE: we use this routine for detecting exceptions in
* clients. If we add a callback on that event we'll have to be
* sure to deliver it only to the right client.
*/
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return true;
}
}
if (client_aux_libs != NULL &&
vmvector_overlap(client_aux_libs, addr, addr+1))
return true;
return false;
}
bool
get_client_bounds(client_id_t client_id,
app_pc *start/*OUT*/, app_pc *end/*OUT*/)
{
if (client_id >= num_client_libs)
return false;
if (start != NULL)
*start = (app_pc) client_libs[client_id].start;
if (end != NULL)
*end = (app_pc) client_libs[client_id].end;
return true;
}
const char *
get_client_path_from_addr(app_pc addr)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return client_libs[i].path;
}
}
return "";
}
bool
is_valid_client_id(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return true;
}
}
return false;
}
void
dr_register_exit_event(void (*func)(void))
{
add_callback(&exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_exit_event(void (*func)(void))
{
return remove_callback(&exit_callbacks, (void (*)(void))func, true);
}
void
dr_register_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for bb event when code_api is disabled");
return;
}
add_callback(&bb_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
return remove_callback(&bb_callbacks, (void (*)(void))func, true);
}
void
dr_register_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for trace event when code_api is disabled");
return;
}
add_callback(&trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
return remove_callback(&trace_callbacks, (void (*)(void))func, true);
}
#ifdef CUSTOM_TRACES
void
dr_register_end_trace_event(dr_custom_trace_action_t (*func)
(void *drcontext, void *tag, void *next_tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for end-trace event when code_api is disabled");
return;
}
add_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_end_trace_event(dr_custom_trace_action_t
(*func)(void *drcontext, void *tag, void *next_tag))
{
return remove_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_delete_event(void (*func)(void *drcontext, void *tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for delete event when code_api is disabled");
return;
}
add_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_delete_event(void (*func)(void *drcontext, void *tag))
{
return remove_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore state event when code_api is disabled");
return;
}
add_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
return remove_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore_state_ex event when code_api disabled");
return;
}
add_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
return remove_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_init_event(void (*func)(void *drcontext))
{
add_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_init_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_exit_event(void (*func)(void *drcontext))
{
add_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_exit_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
#ifdef UNIX
void
dr_register_fork_init_event(void (*func)(void *drcontext))
{
add_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_fork_init_event(void (*func)(void *drcontext))
{
return remove_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
add_callback(&module_load_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
return remove_callback(&module_load_callbacks, (void (*)(void))func, true);
}
void
dr_register_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
add_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
return remove_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
#ifdef WINDOWS
void
dr_register_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
add_callback(&exception_callbacks, (bool (*)(void))func, true);
}
bool
dr_unregister_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
return remove_callback(&exception_callbacks, (bool (*)(void))func, true);
}
#else
void
dr_register_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
add_callback(&signal_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
return remove_callback(&signal_callbacks, (void (*)(void))func, true);
}
#endif /* WINDOWS */
void
dr_register_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
add_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
return remove_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
#ifdef PROGRAM_SHEPHERDING
void
dr_register_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
add_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
return remove_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
add_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
return;
}
}
CLIENT_ASSERT(false, "dr_register_nudge_event: invalid client ID");
}
bool
dr_unregister_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return remove_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
}
}
CLIENT_ASSERT(false, "dr_unregister_nudge_event: invalid client ID");
return false;
}
dr_config_status_t
dr_nudge_client_ex(process_id_t process_id, client_id_t client_id,
uint64 argument, uint timeout_ms)
{
if (process_id == get_process_id()) {
size_t i;
#ifdef WINDOWS
pre_second_thread();
#endif
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == client_id) {
if (client_libs[i].nudge_callbacks.num == 0) {
CLIENT_ASSERT(false, "dr_nudge_client: no nudge handler registered");
return false;
}
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
return false;
} else {
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
bool
dr_nudge_client(client_id_t client_id, uint64 argument)
{
return dr_nudge_client_ex(get_process_id(), client_id, argument, 0) == DR_SUCCESS;
}
#ifdef WINDOWS
DR_API
bool
dr_is_nudge_thread(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid parameter to dr_is_nudge_thread");
return dcontext->nudge_target != NULL;
}
#endif
void
instrument_client_thread_init(dcontext_t *dcontext, bool client_thread)
{
if (dcontext->client_data == NULL) {
dcontext->client_data = HEAP_TYPE_ALLOC(dcontext, client_data_t,
ACCT_OTHER, UNPROTECTED);
memset(dcontext->client_data, 0x0, sizeof(client_data_t));
#ifdef CLIENT_SIDELINE
ASSIGN_INIT_LOCK_FREE(dcontext->client_data->sideline_mutex, sideline_mutex);
#endif
CLIENT_ASSERT(dynamo_initialized || thread_init_callbacks.num == 0 ||
client_thread,
"1st call to instrument_thread_init should have no cbs");
}
#ifdef CLIENT_SIDELINE
if (client_thread) {
ATOMIC_INC(int, num_client_sideline_threads);
/* We don't call dynamo_thread_not_under_dynamo() b/c we want itimers. */
dcontext->thread_record->under_dynamo_control = false;
dcontext->client_data->is_client_thread = true;
}
#endif /* CLIENT_SIDELINE */
}
void
instrument_thread_init(dcontext_t *dcontext, bool client_thread, bool valid_mc)
{
/* Note that we're called twice for the initial thread: once prior
* to instrument_init() (PR 216936) to set up the dcontext client
* field (at which point there should be no callbacks since client
* has not had a chance to register any) (now split out, but both
* routines are called prior to instrument_init()), and once after
* instrument_init() to call the client event.
*/
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
bool swap_peb = false;
#endif
if (client_thread) {
/* no init event */
return;
}
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* i#996: we might be in app's state.
* It is simpler to check and swap here than earlier on thread init paths.
*/
if (dr_using_app_state(dcontext)) {
swap_peb_pointer(dcontext, true/*to priv*/);
swap_peb = true;
}
#endif
/* i#117/PR 395156: support dr_get_mcontext() from the thread init event */
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = true;
call_all(thread_init_callbacks, int (*)(void *), (void *)dcontext);
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = false;
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
if (swap_peb)
swap_peb_pointer(dcontext, false/*to app*/);
#endif
}
#ifdef UNIX
void
instrument_fork_init(dcontext_t *dcontext)
{
call_all(fork_init_callbacks, int (*)(void *), (void *)dcontext);
}
#endif
/* PR 536058: split the exit event from thread cleanup, to provide a
* dcontext in the process exit event
*/
void
instrument_thread_exit_event(dcontext_t *dcontext)
{
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(dcontext)
/* if nudge thread calls dr_exit_process() it will be marked as a client
* thread: rule it out here so we properly clean it up
*/
IF_WINDOWS(&& dcontext->nudge_target == NULL)) {
ATOMIC_DEC(int, num_client_sideline_threads);
/* no exit event */
return;
}
#endif
/* i#1394: best-effort to try to avoid crashing thread exit events
* where thread init was never called.
*/
if (!dynamo_initialized)
return;
/* support dr_get_mcontext() from the exit event */
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently own initexit lock when this is called (see PR 227619). */
call_all(thread_exit_callbacks, int (*)(void *), (void *)dcontext);
}
void
instrument_thread_exit(dcontext_t *dcontext)
{
#ifdef DEBUG
client_todo_list_t *todo;
client_flush_req_t *flush;
#endif
#ifdef DEBUG
/* PR 470957: avoid racy crashes by not freeing in release build */
# ifdef CLIENT_SIDELINE
DELETE_LOCK(dcontext->client_data->sideline_mutex);
# endif
/* could be heap space allocated for the todo list */
todo = dcontext->client_data->to_do;
while (todo != NULL) {
client_todo_list_t *next_todo = todo->next;
if (todo->ilist != NULL) {
instrlist_clear_and_destroy(dcontext, todo->ilist);
}
HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, UNPROTECTED);
todo = next_todo;
}
/* could be heap space allocated for the flush list */
flush = dcontext->client_data->flush_list;
while (flush != NULL) {
client_flush_req_t *next_flush = flush->next;
HEAP_TYPE_FREE(dcontext, flush, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush = next_flush;
}
HEAP_TYPE_FREE(dcontext, dcontext->client_data, client_data_t,
ACCT_OTHER, UNPROTECTED);
dcontext->client_data = NULL; /* for mutex_wait_contended_lock() */
dcontext->is_client_thread_exiting = true; /* for is_using_app_peb() */
#endif /* DEBUG */
}
bool
dr_bb_hook_exists(void)
{
return (bb_callbacks.num > 0);
}
bool
dr_trace_hook_exists(void)
{
return (trace_callbacks.num > 0);
}
bool
dr_fragment_deleted_hook_exists(void)
{
return (fragdel_callbacks.num > 0);
}
bool
dr_end_trace_hook_exists(void)
{
return (end_trace_callbacks.num > 0);
}
bool
dr_thread_exit_hook_exists(void)
{
return (thread_exit_callbacks.num > 0);
}
bool
dr_exit_hook_exists(void)
{
return (exit_callbacks.num > 0);
}
bool
dr_xl8_hook_exists(void)
{
return (restore_state_callbacks.num > 0 ||
restore_state_ex_callbacks.num > 0);
}
#endif /* CLIENT_INTERFACE */
/* needed outside of CLIENT_INTERFACE for simpler USE_BB_BUILDING_LOCK_STEADY_STATE() */
bool
dr_modload_hook_exists(void)
{
/* We do not support (as documented in the module event doxygen)
* the client changing this during bb building, as that will mess
* up USE_BB_BUILDING_LOCK_STEADY_STATE().
*/
return IF_CLIENT_INTERFACE_ELSE(module_load_callbacks.num > 0, false);
}
#ifdef CLIENT_INTERFACE
bool
hide_tag_from_client(app_pc tag)
{
#ifdef WINDOWS
/* Case 10009: Basic blocks that consist of a single jump into the
* interception buffer should be obscured from clients. Clients
* will see the displaced code, so we'll provide the address of this
* block if the client asks for the address of the displaced code.
*
* Note that we assume the jump is the first instruction in the
* BB for any blocks that jump to the interception buffer.
*/
if (is_intercepted_app_pc(tag, NULL) ||
/* Displaced app code is now in the landing pad, so skip the
* jump from the interception buffer to the landing pad
*/
is_in_interception_buffer(tag) ||
/* Landing pads that exist between hook points and the trampolines
* shouldn't be seen by the client too. PR 250294.
*/
is_on_interception_initial_route(tag) ||
/* PR 219351: if we lose control on a callback and get it back on
* one of our syscall trampolines, we'll appear at the jmp out of
* the interception buffer to the int/sysenter instruction. The
* problem is that our syscall trampolines, unlike our other
* intercepted code, are hooked earlier than the real action point
* and we have displaced app code at the start of the interception
* buffer: we hook at the wrapper entrance and return w/ a jmp to
* the sysenter/int instr. When creating bbs at the start we hack
* it to make it look like there is no hook. But on retaking control
* we end up w/ this jmp out that won't be solved w/ our normal
* mechanism for other hook jmp-outs: so we just suppress and the
* client next sees the post-syscall bb. It already saw a gap.
*/
is_syscall_trampoline(tag, NULL))
return true;
#endif
return false;
}
#ifdef DEBUG
/* PR 214962: client must set translation fields */
static void
check_ilist_translations(instrlist_t *ilist)
{
/* Ensure client set the translation field for all non-meta
* instrs, even if it didn't return DR_EMIT_STORE_TRANSLATIONS
* (since we may decide ourselves to store)
*/
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
if (!instr_opcode_valid(in)) {
CLIENT_ASSERT(INTERNAL_OPTION(fast_client_decode), "level 0 instr found");
} else if (instr_is_app(in)) {
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) == NULL)
loginst(get_thread_private_dcontext(), 1, in, "translation is NULL");
});
CLIENT_ASSERT(instr_get_translation(in) != NULL,
"translation field must be set for every app instruction");
} else {
/* The meta instr could indeed not affect app state, but
* better I think to assert and make them put in an
* empty restore event callback in that case. */
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) != NULL &&
!instr_is_our_mangling(in) &&
!dr_xl8_hook_exists())
loginst(get_thread_private_dcontext(), 1, in, "translation != NULL");
});
CLIENT_ASSERT(instr_get_translation(in) == NULL ||
instr_is_our_mangling(in) ||
dr_xl8_hook_exists(),
/* FIXME: if multiple clients, we need to check that this
* particular client has the callback: but we have
* no way to do that other than looking at library
* bounds...punting for now */
"a meta instr should not have its translation field "
"set without also having a restore_state callback");
}
}
}
#endif
/* Returns true if the bb hook is called */
bool
instrument_basic_block(dcontext_t *dcontext, app_pc tag, instrlist_t *bb,
bool for_trace, bool translating, dr_emit_flags_t *emitflags)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
/* return false if no BB hooks are registered */
if (bb_callbacks.num == 0)
return false;
if (hide_tag_from_client(tag)) {
LOG(THREAD, LOG_INTERP, 3, "hiding tag "PFX" from client\n", tag);
return false;
}
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_basic_block ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating && !for_trace)
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently we are couldbelinking and hold the
* bb_building lock when this is called (see PR 227619).
*/
/* We or together the return values */
call_all_ret(ret, |=, , bb_callbacks,
int (*) (void *, void *, instrlist_t *, bool, bool),
(void *)dcontext, (void *)tag, bb, for_trace, translating);
if (emitflags != NULL)
*emitflags = ret;
DOCHECK(1, { check_ilist_translations(bb); });
dcontext->client_data->mcontext_in_dcontext = false;
if (IF_DEBUG_ELSE(for_trace, false)) {
CLIENT_ASSERT(instrlist_get_return_target(bb) == NULL &&
instrlist_get_fall_through_target(bb) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
}
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
return true;
}
/* Give the user the completely mangled and optimized trace just prior
* to emitting into code cache, user gets final crack at it
*/
dr_emit_flags_t
instrument_trace(dcontext_t *dcontext, app_pc tag, instrlist_t *trace,
bool translating)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
#ifdef UNSUPPORTED_API
instr_t *instr;
#endif
if (trace_callbacks.num == 0)
return DR_EMIT_DEFAULT;
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_trace ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
/* We always pass Level 3 instrs to the client, since we no longer
* expose the expansion routines.
*/
#ifdef UNSUPPORTED_API
for (instr = instrlist_first_expanded(dcontext, trace);
instr != NULL;
instr = instr_get_next_expanded(dcontext, trace, instr)) {
instr_decode(dcontext, instr);
}
/* ASSUMPTION: all ctis are already at Level 3, so we don't have
* to do a separate pass to fix up intra-list targets like
* instrlist_decode_cti() does
*/
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating)
dcontext->client_data->mcontext_in_dcontext = true;
/* We or together the return values */
call_all_ret(ret, |=, , trace_callbacks,
int (*)(void *, void *, instrlist_t *, bool),
(void *)dcontext, (void *)tag, trace, translating);
DOCHECK(1, { check_ilist_translations(trace); });
CLIENT_ASSERT(instrlist_get_return_target(trace) == NULL &&
instrlist_get_fall_through_target(trace) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
dcontext->client_data->mcontext_in_dcontext = false;
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
return ret;
}
/* Notify user when a fragment is deleted from the cache
* FIXME PR 242544: how does user know whether this is a shadowed copy or the
* real thing? The user might free memory that shouldn't be freed!
*/
void
instrument_fragment_deleted(dcontext_t *dcontext, app_pc tag, uint flags)
{
if (fragdel_callbacks.num == 0)
return;
#ifdef WINDOWS
/* Case 10009: We don't call the basic block hook for blocks that
* are jumps to the interception buffer, so we'll hide them here
* as well.
*/
if (!TEST(FRAG_IS_TRACE, flags) && hide_tag_from_client(tag))
return;
#endif
/* PR 243008: we don't expose GLOBAL_DCONTEXT, so change to NULL.
* Our comments warn the user about this.
*/
if (dcontext == GLOBAL_DCONTEXT)
dcontext = NULL;
call_all(fragdel_callbacks, int (*)(void *, void *),
(void *)dcontext, (void *)tag);
}
bool
instrument_restore_state(dcontext_t *dcontext, bool restore_memory,
dr_restore_state_info_t *info)
{
bool res = true;
/* Support both legacy and extended handlers */
if (restore_state_callbacks.num > 0) {
call_all(restore_state_callbacks,
int (*)(void *, void *, dr_mcontext_t *, bool, bool),
(void *)dcontext, info->fragment_info.tag, info->mcontext,
restore_memory, info->fragment_info.app_code_consistent);
}
if (restore_state_ex_callbacks.num > 0) {
/* i#220/PR 480565: client has option of failing the translation.
* We fail it if any client wants to, short-circuiting in that case.
* This does violate the "priority order" of events where the
* last one is supposed to have final say b/c it won't even
* see the event (xref i#424).
*/
call_all_ret(res, = res &&, , restore_state_ex_callbacks,
int (*)(void *, bool, dr_restore_state_info_t *),
(void *)dcontext, restore_memory, info);
}
CLIENT_ASSERT(!restore_memory || res,
"translation should not fail for restore_memory=true");
return res;
}
#ifdef CUSTOM_TRACES
/* Ask whether to end trace prior to adding next_tag fragment.
* Return values:
* CUSTOM_TRACE_DR_DECIDES = use standard termination criteria
* CUSTOM_TRACE_END_NOW = end trace
* CUSTOM_TRACE_CONTINUE = do not end trace
*/
dr_custom_trace_action_t
instrument_end_trace(dcontext_t *dcontext, app_pc trace_tag, app_pc next_tag)
{
dr_custom_trace_action_t ret = CUSTOM_TRACE_DR_DECIDES;
if (end_trace_callbacks.num == 0)
return ret;
/* Highest priority callback decides how to end the trace (see
* call_all_ret implementation)
*/
call_all_ret(ret, =, , end_trace_callbacks, int (*)(void *, void *, void *),
(void *)dcontext, (void *)trace_tag, (void *)next_tag);
return ret;
}
#endif
static module_data_t *
create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point,
uint flags, const module_names_t *names,
const char *full_path
#ifdef WINDOWS
, version_number_t file_version,
version_number_t product_version,
uint checksum, uint timestamp,
size_t mod_size
#else
, bool contiguous,
uint num_segments,
module_segment_t *os_segments,
module_segment_data_t *segments,
uint timestamp
# ifdef MACOS
, uint current_version,
uint compatibility_version,
const byte uuid[16]
# endif
#endif
)
{
#ifndef WINDOWS
uint i;
#endif
module_data_t *copy = (module_data_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_data_t, ACCT_CLIENT, UNPROTECTED);
memset(copy, 0, sizeof(module_data_t));
copy->start = start;
copy->end = end;
copy->entry_point = entry_point;
copy->flags = flags;
if (full_path != NULL)
copy->full_path = dr_strdup(full_path HEAPACCT(ACCT_CLIENT));
if (names->module_name != NULL)
copy->names.module_name = dr_strdup(names->module_name HEAPACCT(ACCT_CLIENT));
if (names->file_name != NULL)
copy->names.file_name = dr_strdup(names->file_name HEAPACCT(ACCT_CLIENT));
#ifdef WINDOWS
if (names->exe_name != NULL)
copy->names.exe_name = dr_strdup(names->exe_name HEAPACCT(ACCT_CLIENT));
if (names->rsrc_name != NULL)
copy->names.rsrc_name = dr_strdup(names->rsrc_name HEAPACCT(ACCT_CLIENT));
copy->file_version = file_version;
copy->product_version = product_version;
copy->checksum = checksum;
copy->timestamp = timestamp;
copy->module_internal_size = mod_size;
#else
copy->contiguous = contiguous;
copy->num_segments = num_segments;
copy->segments = (module_segment_data_t *)
HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, module_segment_data_t,
num_segments, ACCT_VMAREAS, PROTECTED);
if (os_segments != NULL) {
for (i = 0; i < num_segments; i++) {
copy->segments[i].start = os_segments[i].start;
copy->segments[i].end = os_segments[i].end;
copy->segments[i].prot = os_segments[i].prot;
}
} else
memcpy(copy->segments, segments, num_segments*sizeof(module_segment_data_t));
copy->timestamp = timestamp;
# ifdef MACOS
copy->current_version = current_version;
copy->compatibility_version = compatibility_version;
memcpy(copy->uuid, uuid, sizeof(copy->uuid));
# endif
#endif
return copy;
}
module_data_t *
copy_module_area_to_module_data(const module_area_t *area)
{
if (area == NULL)
return NULL;
return create_and_initialize_module_data(area->start, area->end, area->entry_point,
0, &area->names, area->full_path
#ifdef WINDOWS
, area->os_data.file_version,
area->os_data.product_version,
area->os_data.checksum,
area->os_data.timestamp,
area->os_data.module_internal_size
#else
, area->os_data.contiguous,
area->os_data.num_segments,
area->os_data.segments,
NULL,
area->os_data.timestamp
# ifdef MACOS
, area->os_data.current_version,
area->os_data.compatibility_version,
area->os_data.uuid
# endif
#endif
);
}
DR_API
/* Makes a copy of a module_data_t for returning to the client. We return a copy so
* we don't have to hold the module areas list lock while in the client (xref PR 225020).
* Note - dr_data is allowed to be NULL. */
module_data_t *
dr_copy_module_data(const module_data_t *data)
{
if (data == NULL)
return NULL;
return create_and_initialize_module_data(data->start, data->end, data->entry_point,
0, &data->names, data->full_path
#ifdef WINDOWS
, data->file_version,
data->product_version,
data->checksum, data->timestamp,
data->module_internal_size
#else
, data->contiguous,
data->num_segments,
NULL,
data->segments,
data->timestamp
# ifdef MACOS
, data->current_version,
data->compatibility_version,
data->uuid
# endif
#endif
);
}
DR_API
/* Used to free a module_data_t created by dr_copy_module_data() */
void
dr_free_module_data(module_data_t *data)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (data == NULL)
return;
if (dcontext != NULL && data == dcontext->client_data->no_delete_mod_data) {
CLIENT_ASSERT(false, "dr_free_module_data: don\'t free module_data passed to "
"the image load or image unload event callbacks.");
return;
}
#ifdef UNIX
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, data->segments, module_segment_data_t,
data->num_segments, ACCT_VMAREAS, PROTECTED);
#endif
if (data->full_path != NULL)
dr_strfree(data->full_path HEAPACCT(ACCT_CLIENT));
free_module_names(&data->names HEAPACCT(ACCT_CLIENT));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, module_data_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
bool
dr_module_contains_addr(const module_data_t *data, app_pc addr)
{
/* XXX: this duplicates module_contains_addr(), but we have two different
* data structures (module_area_t and module_data_t) so it's hard to share.
*/
#ifdef WINDOWS
return (addr >= data->start && addr < data->end);
#else
if (data->contiguous)
return (addr >= data->start && addr < data->end);
else {
uint i;
for (i = 0; i < data->num_segments; i++) {
if (addr >= data->segments[i].start && addr < data->segments[i].end)
return true;
}
}
return false;
#endif
}
/* Looks up module containing pc (assumed to be fully loaded).
* If it exists and its client module load event has not been called, calls it.
*/
void
instrument_module_load_trigger(app_pc pc)
{
if (CLIENTS_EXIST()) {
module_area_t *ma;
module_data_t *client_data = NULL;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
/* switch to write lock */
os_get_module_info_unlock();
os_get_module_info_write_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
ma->flags |= MODULE_LOAD_EVENT;
client_data = copy_module_area_to_module_data(ma);
os_get_module_info_write_unlock();
instrument_module_load(client_data, true/*i#884: already loaded*/);
dr_free_module_data(client_data);
} else
os_get_module_info_write_unlock();
} else
os_get_module_info_unlock();
}
}
/* Notify user when a module is loaded */
void
instrument_module_load(module_data_t *data, bool previously_loaded)
{
/* Note - during DR initialization this routine is called before we've set up a
* dcontext for the main thread and before we've called instrument_init. It's okay
* since there's no way a callback will be registered and we'll return immediately. */
dcontext_t *dcontext;
if (module_load_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_load_callbacks, int (*)(void *, module_data_t *, bool),
(void *)dcontext, data, previously_loaded);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* Notify user when a module is unloaded */
void
instrument_module_unload(module_data_t *data)
{
dcontext_t *dcontext;
if (module_unload_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_unload_callbacks, int (*)(void *, module_data_t *),
(void *)dcontext, data);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* returns whether this sysnum should be intercepted */
bool
instrument_filter_syscall(dcontext_t *dcontext, int sysnum)
{
bool ret = false;
/* if client does not filter then we don't intercept anything */
if (filter_syscall_callbacks.num == 0)
return ret;
/* if any client wants to intercept, then we intercept */
call_all_ret(ret, =, || ret, filter_syscall_callbacks, bool (*)(void *, int),
(void *)dcontext, sysnum);
return ret;
}
/* returns whether this syscall should execute */
bool
instrument_pre_syscall(dcontext_t *dcontext, int sysnum)
{
bool exec = true;
dcontext->client_data->in_pre_syscall = true;
/* clear flag from dr_syscall_invoke_another() */
dcontext->client_data->invoke_another_syscall = false;
if (pre_syscall_callbacks.num > 0) {
/* Skip syscall if any client wants to skip it, but don't short-circuit,
* as skipping syscalls is usually done when the effect of the syscall
* will be emulated in some other way. The app is typically meant to
* think that the syscall succeeded. Thus, other tool components
* should see the syscall as well (xref i#424).
*/
call_all_ret(exec, =, && exec, pre_syscall_callbacks,
bool (*)(void *, int), (void *)dcontext, sysnum);
}
dcontext->client_data->in_pre_syscall = false;
return exec;
}
void
instrument_post_syscall(dcontext_t *dcontext, int sysnum)
{
if (post_syscall_callbacks.num == 0)
return;
dcontext->client_data->in_post_syscall = true;
call_all(post_syscall_callbacks, int (*)(void *, int),
(void *)dcontext, sysnum);
dcontext->client_data->in_post_syscall = false;
}
bool
instrument_invoke_another_syscall(dcontext_t *dcontext)
{
return dcontext->client_data->invoke_another_syscall;
}
#ifdef WINDOWS
/* Notify user of exceptions. Note: not called for RaiseException */
bool
instrument_exception(dcontext_t *dcontext, dr_exception_t *exception)
{
bool res = true;
/* We short-circuit if any client wants to "own" the fault and not pass on.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own it (xref i#424).
*/
call_all_ret(res, = res &&, , exception_callbacks,
bool (*)(void *, dr_exception_t *),
(void *)dcontext, exception);
return res;
}
#else
dr_signal_action_t
instrument_signal(dcontext_t *dcontext, dr_siginfo_t *siginfo)
{
dr_signal_action_t ret = DR_SIGNAL_DELIVER;
/* We short-circuit if any client wants to do other than deliver to the app.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own the signal (xref i#424).
*/
call_all_ret(ret, = ret == DR_SIGNAL_DELIVER ? , : ret, signal_callbacks,
dr_signal_action_t (*)(void *, dr_siginfo_t *),
(void *)dcontext, siginfo);
return ret;
}
bool
dr_signal_hook_exists(void)
{
return (signal_callbacks.num > 0);
}
#endif /* WINDOWS */
#ifdef PROGRAM_SHEPHERDING
/* Notify user when a security violation is detected */
void
instrument_security_violation(dcontext_t *dcontext, app_pc target_pc,
security_violation_t violation, action_type_t *action)
{
dr_security_violation_type_t dr_violation;
dr_security_violation_action_t dr_action, dr_action_original;
app_pc source_pc = NULL;
fragment_t *last;
dr_mcontext_t dr_mcontext;
dr_mcontext_init(&dr_mcontext);
if (security_violation_callbacks.num == 0)
return;
if (!priv_mcontext_to_dr_mcontext(&dr_mcontext, get_mcontext(dcontext)))
return;
/* FIXME - the source_tag, source_pc, and context can all be incorrect if the
* violation ends up occurring in the middle of a bb we're building. See case
* 7380 which we should fix in interp.c.
*/
/* Obtain the source addr to pass to the client. xref case 285 --
* we're using the more heavy-weight solution 2) here, but that
* should be okay since we already have the overhead of calling
* into the client. */
last = dcontext->last_fragment;
if (!TEST(FRAG_FAKE, last->flags)) {
cache_pc pc = EXIT_CTI_PC(last, dcontext->last_exit);
source_pc = recreate_app_pc(dcontext, pc, last);
}
/* FIXME - set pc field of dr_mcontext_t. We'll probably want it
* for thread start and possibly apc/callback events as well.
*/
switch (violation) {
case STACK_EXECUTION_VIOLATION:
dr_violation = DR_RCO_STACK_VIOLATION;
break;
case HEAP_EXECUTION_VIOLATION:
dr_violation = DR_RCO_HEAP_VIOLATION;
break;
case RETURN_TARGET_VIOLATION:
dr_violation = DR_RCT_RETURN_VIOLATION;
break;
case RETURN_DIRECT_RCT_VIOLATION:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
case INDIRECT_CALL_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_CALL_VIOLATION;
break;
case INDIRECT_JUMP_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_JUMP_VIOLATION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
}
switch (*action) {
case ACTION_TERMINATE_PROCESS:
dr_action = DR_VIOLATION_ACTION_KILL_PROCESS;
break;
case ACTION_CONTINUE:
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
case ACTION_TERMINATE_THREAD:
dr_action = DR_VIOLATION_ACTION_KILL_THREAD;
break;
case ACTION_THROW_EXCEPTION:
dr_action = DR_VIOLATION_ACTION_THROW_EXCEPTION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
}
dr_action_original = dr_action;
/* NOTE - last->tag should be valid here (even if the frag is fake since the
* coarse wrappers set the tag). FIXME - for traces we really want the bb tag not
* the trace tag, should get that. Of course the only real reason we pass source
* tag is because we can't always give a valid source_pc. */
/* Note that the last registered function gets the final crack at
* changing the action.
*/
call_all(security_violation_callbacks,
int (*)(void *, void *, app_pc, app_pc, dr_security_violation_type_t,
dr_mcontext_t *, dr_security_violation_action_t *),
(void *)dcontext, last->tag, source_pc, target_pc,
dr_violation, &dr_mcontext, &dr_action);
if (dr_action != dr_action_original) {
switch(dr_action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
*action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
*action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
*action = ACTION_THROW_EXCEPTION;
break;
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
/* FIXME - not safe to implement till case 7380 is fixed. */
CLIENT_ASSERT(false, "action DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT "
"not yet supported.");
/* note - no break, fall through */
case DR_VIOLATION_ACTION_CONTINUE:
*action = ACTION_CONTINUE;
break;
default:
CLIENT_ASSERT(false, "Security violation event callback returned invalid "
"action value.");
}
}
}
#endif
/* Notify the client of a nudge. */
void
instrument_nudge(dcontext_t *dcontext, client_id_t id, uint64 arg)
{
size_t i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT &&
dcontext == get_thread_private_dcontext());
/* synch_with_all_threads and flush API assume that client nudge threads
* hold no dr locks and are !couldbelinking while in client lib code */
ASSERT_OWN_NO_LOCKS();
ASSERT(!is_couldbelinking(dcontext));
/* find the client the nudge is intended for */
for (i=0; i<num_client_libs; i++) {
/* until we have nudge-arg support (PR 477454), nudges target the 1st client */
if (IF_VMX86_ELSE(true, client_libs[i].id == id)) {
break;
}
}
if (i == num_client_libs || client_libs[i].nudge_callbacks.num == 0)
return;
#ifdef WINDOWS
/* count the number of nudge events so we can make sure they're
* all finished before exiting
*/
mutex_lock(&client_thread_count_lock);
if (block_client_nudge_threads) {
/* FIXME - would be nice if there was a way to let the external agent know that
* the nudge event wasn't delivered (but this only happens when the process
* is detaching or exiting). */
mutex_unlock(&client_thread_count_lock);
return;
}
/* atomic to avoid locking around the dec */
ATOMIC_INC(int, num_client_nudge_threads);
mutex_unlock(&client_thread_count_lock);
/* We need to mark this as a client controlled thread for synch_with_all_threads
* and otherwise treat it as native. Xref PR 230836 on what to do if this
* thread hits native_exec_syscalls hooks.
* XXX: this requires extra checks for "not a nudge thread" after IS_CLIENT_THREAD
* in get_stack_bounds() and instrument_thread_exit_event(): maybe better
* to have synchall checks do extra checks and have IS_CLIENT_THREAD be
* false for nudge threads at exit time?
*/
dcontext->client_data->is_client_thread = true;
dcontext->thread_record->under_dynamo_control = false;
#else
/* support calling dr_get_mcontext() on this thread. the app
* context should be intact in the current mcontext except
* pc which we set from next_tag.
*/
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
dcontext->client_data->mcontext_in_dcontext = true;
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
#endif
call_all(client_libs[i].nudge_callbacks, int (*)(void *, uint64),
(void *)dcontext, arg);
#ifdef UNIX
dcontext->client_data->mcontext_in_dcontext = false;
#else
dcontext->thread_record->under_dynamo_control = true;
dcontext->client_data->is_client_thread = false;
ATOMIC_DEC(int, num_client_nudge_threads);
#endif
}
int
get_num_client_threads(void)
{
int num = IF_WINDOWS_ELSE(num_client_nudge_threads, 0);
# ifdef CLIENT_SIDELINE
num += num_client_sideline_threads;
# endif
return num;
}
#ifdef WINDOWS
/* wait for all nudges to finish */
void
wait_for_outstanding_nudges()
{
/* block any new nudge threads from starting */
mutex_lock(&client_thread_count_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
block_client_nudge_threads = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
DOLOG(1, LOG_TOP, {
if (num_client_nudge_threads > 0) {
LOG(GLOBAL, LOG_TOP, 1,
"Waiting for %d nudges to finish - app is about to kill all threads "
"except the current one.\n", num_client_nudge_threads);
}
});
/* don't wait if the client requested exit: after all the client might
* have done so from a nudge, and if the client does want to exit it's
* its own problem if it misses nudges (and external nudgers should use
* a finite timeout)
*/
if (client_requested_exit) {
mutex_unlock(&client_thread_count_lock);
return;
}
while (num_client_nudge_threads > 0) {
/* yield with lock released to allow nudges to finish */
mutex_unlock(&client_thread_count_lock);
dr_thread_yield();
mutex_lock(&client_thread_count_lock);
}
mutex_unlock(&client_thread_count_lock);
}
#endif /* WINDOWS */
/****************************************************************************/
/* EXPORTED ROUTINES */
DR_API
/* Creates a DR context that can be used in a standalone program.
* WARNING: this context cannot be used as the drcontext for a thread
* running under DR control! It is only for standalone programs that
* wish to use DR as a library of disassembly, etc. routines.
*/
void *
dr_standalone_init(void)
{
dcontext_t *dcontext = standalone_init();
return (void *) dcontext;
}
DR_API
/* Aborts the process immediately */
void
dr_abort(void)
{
if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask))
os_dump_core("dr_abort");
os_terminate(NULL, TERMINATE_PROCESS);
}
DR_API
void
dr_exit_process(int exit_code)
{
dcontext_t *dcontext = get_thread_private_dcontext();
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Prevent cleanup from waiting for nudges as this may be called
* from a nudge!
* Also suppress leak asserts, as it's hard to clean up from
* some situations (such as DrMem -crash_at_error).
*/
client_requested_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
#ifdef WINDOWS
if (dcontext != NULL && dcontext->nudge_target != NULL) {
/* we need to free the nudge thread stack which may involved
* switching stacks so we have the nudge thread invoke
* os_terminate for us
*/
nudge_thread_cleanup(dcontext, true/*kill process*/, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
#endif
if (!is_currently_on_dstack(dcontext)
IF_UNIX(&& !is_currently_on_sigaltstack(dcontext))) {
/* if on app stack or sigaltstack, avoid incorrect leak assert at exit */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
}
os_terminate_with_code(dcontext, /* dcontext is required */
TERMINATE_CLEANUP|TERMINATE_PROCESS, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
DR_API
bool
dr_create_memory_dump(dr_memory_dump_spec_t *spec)
{
if (spec->size != sizeof(dr_memory_dump_spec_t))
return false;
#ifdef WINDOWS
if (TEST(DR_MEMORY_DUMP_LDMP, spec->flags))
return os_dump_core_live(spec->label, spec->ldmp_path, spec->ldmp_path_size);
#endif
return false;
}
DR_API
/* Returns true if all DynamoRIO caches are thread private. */
bool
dr_using_all_private_caches(void)
{
return !SHARED_FRAGMENTS_ENABLED();
}
DR_API
void
dr_request_synchronized_exit(void)
{
SYSLOG_INTERNAL_WARNING_ONCE("dr_request_synchronized_exit deprecated: "
"use dr_set_process_exit_behavior instead");
}
DR_API
void
dr_set_process_exit_behavior(dr_exit_flags_t flags)
{
if ((!DYNAMO_OPTION(multi_thread_exit) && TEST(DR_EXIT_MULTI_THREAD, flags)) ||
(DYNAMO_OPTION(multi_thread_exit) && !TEST(DR_EXIT_MULTI_THREAD, flags))) {
options_make_writable();
dynamo_options.multi_thread_exit = TEST(DR_EXIT_MULTI_THREAD, flags);
options_restore_readonly();
}
if ((!DYNAMO_OPTION(skip_thread_exit_at_exit) &&
TEST(DR_EXIT_SKIP_THREAD_EXIT, flags)) ||
(DYNAMO_OPTION(skip_thread_exit_at_exit) &&
!TEST(DR_EXIT_SKIP_THREAD_EXIT, flags))) {
options_make_writable();
dynamo_options.skip_thread_exit_at_exit = TEST(DR_EXIT_SKIP_THREAD_EXIT, flags);
options_restore_readonly();
}
}
DR_API
/* Returns the option string passed along with a client path via DR's
* -client_lib option.
*/
/* i#1736: we now token-delimit with quotes, but for backward compat we need to
* pass a version w/o quotes for dr_get_options().
*/
const char *
dr_get_options(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
/* If we already converted, pass the result */
if (client_libs[i].legacy_options[0] != '\0' ||
client_libs[i].options[0] == '\0')
return client_libs[i].legacy_options;
/* For backward compatibility, we need to remove the token-delimiting
* quotes. We tokenize, and then re-assemble the flat string.
* i#1755: however, for legacy custom frontends that are not re-quoting
* like drrun now is, we need to avoid removing any quotes from the
* original strings. We try to detect this by assuming a frontend will
* either re-quote everything or nothing. Ideally we would check all
* args, but that would require plumbing info from getword() or
* duplicating its functionality: so instead our heuristic is just checking
* the first and last chars.
*/
if (!char_is_quote(client_libs[i].options[0]) ||
/* Emptry string already detected above */
!char_is_quote(client_libs[i].options[strlen(client_libs[i].
options)-1])) {
/* At least one arg is not quoted => better use original */
snprintf(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options),
"%s", client_libs[i].options);
} else {
int j;
size_t sofar = 0;
for (j = 1/*skip client lib*/; j < client_libs[i].argc; j++) {
if (!print_to_buffer(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].
legacy_options),
&sofar, "%s%s", (j == 1) ? "" : " ",
client_libs[i].argv[j]))
break;
}
}
NULL_TERMINATE_BUFFER(client_libs[i].legacy_options);
return client_libs[i].legacy_options;
}
}
CLIENT_ASSERT(false, "dr_get_options(): invalid client id");
return NULL;
}
DR_API
bool
dr_get_option_array(client_id_t id, int *argc OUT, const char ***argv OUT)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
*argc = client_libs[i].argc;
*argv = client_libs[i].argv;
return true;
}
}
CLIENT_ASSERT(false, "dr_get_option_array(): invalid client id");
return false;
}
DR_API
/* Returns the path to the client library. Client must pass its ID */
const char *
dr_get_client_path(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].path;
}
}
CLIENT_ASSERT(false, "dr_get_client_path(): invalid client id");
return NULL;
}
DR_API
byte *
dr_get_client_base(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].start;
}
}
CLIENT_ASSERT(false, "dr_get_client_base(): invalid client id");
return NULL;
}
DR_API
bool
dr_set_client_name(const char *name, const char *report_URL)
{
/* Although set_exception_strings() accepts NULL, clients should pass real vals. */
if (name == NULL || report_URL == NULL)
return false;
set_exception_strings(name, report_URL);
return true;
}
bool
dr_set_client_version_string(const char *version)
{
if (version == NULL)
return false;
set_display_version(version);
return true;
}
DR_API const char *
dr_get_application_name(void)
{
#ifdef UNIX
return get_application_short_name();
#else
return get_application_short_unqualified_name();
#endif
}
DR_API process_id_t
dr_get_process_id(void)
{
return (process_id_t) get_process_id();
}
#ifdef UNIX
DR_API
process_id_t
dr_get_parent_id(void)
{
return get_parent_id();
}
#endif
#ifdef WINDOWS
DR_API
process_id_t
dr_convert_handle_to_pid(HANDLE process_handle)
{
ASSERT(POINTER_MAX == INVALID_PROCESS_ID);
return process_id_from_handle(process_handle);
}
DR_API
HANDLE
dr_convert_pid_to_handle(process_id_t pid)
{
return process_handle_from_id(pid);
}
DR_API
/**
* Returns information about the version of the operating system.
* Returns whether successful.
*/
bool
dr_get_os_version(dr_os_version_info_t *info)
{
int ver;
uint sp_major, sp_minor;
get_os_version_ex(&ver, &sp_major, &sp_minor);
if (info->size > offsetof(dr_os_version_info_t, version)) {
switch (ver) {
case WINDOWS_VERSION_10_1607: info->version = DR_WINDOWS_VERSION_10_1607; break;
case WINDOWS_VERSION_10_1511: info->version = DR_WINDOWS_VERSION_10_1511; break;
case WINDOWS_VERSION_10: info->version = DR_WINDOWS_VERSION_10; break;
case WINDOWS_VERSION_8_1: info->version = DR_WINDOWS_VERSION_8_1; break;
case WINDOWS_VERSION_8: info->version = DR_WINDOWS_VERSION_8; break;
case WINDOWS_VERSION_7: info->version = DR_WINDOWS_VERSION_7; break;
case WINDOWS_VERSION_VISTA: info->version = DR_WINDOWS_VERSION_VISTA; break;
case WINDOWS_VERSION_2003: info->version = DR_WINDOWS_VERSION_2003; break;
case WINDOWS_VERSION_XP: info->version = DR_WINDOWS_VERSION_XP; break;
case WINDOWS_VERSION_2000: info->version = DR_WINDOWS_VERSION_2000; break;
case WINDOWS_VERSION_NT: info->version = DR_WINDOWS_VERSION_NT; break;
default: CLIENT_ASSERT(false, "unsupported windows version");
};
} else
return false; /* struct too small for any info */
if (info->size > offsetof(dr_os_version_info_t, service_pack_major)) {
info->service_pack_major = sp_major;
if (info->size > offsetof(dr_os_version_info_t, service_pack_minor)) {
info->service_pack_minor = sp_minor;
}
}
return true;
}
DR_API
bool
dr_is_wow64(void)
{
return is_wow64_process(NT_CURRENT_PROCESS);
}
DR_API
void *
dr_get_app_PEB(void)
{
return get_own_peb();
}
#endif
DR_API
/* Retrieves the current time */
void
dr_get_time(dr_time_t *time)
{
convert_millis_to_date(query_time_millis(), time);
}
DR_API
uint64
dr_get_milliseconds(void)
{
return query_time_millis();
}
DR_API
uint64
dr_get_microseconds(void)
{
return query_time_micros();
}
DR_API
uint
dr_get_random_value(uint max)
{
return (uint) get_random_offset(max);
}
DR_API
void
dr_set_random_seed(uint seed)
{
set_random_seed(seed);
}
DR_API
uint
dr_get_random_seed(void)
{
return get_random_seed();
}
/***************************************************************************
* MEMORY ALLOCATION
*
* XXX i#774: once we split vmheap from vmcode, we need to make
* dr_thread_alloc(), dr_global_alloc(), and dr_nonheap_alloc()
* all allocate vmcode-reachable memory. Library-redirected
* allocations do not need to be reachable.
*/
DR_API
/* Allocates memory from DR's memory pool specific to the
* thread associated with drcontext.
*/
void *
dr_thread_alloc(void *drcontext, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return heap_alloc(dcontext, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees thread-specific memory allocated by dr_thread_alloc.
* size must be the same size passed to dr_thread_alloc.
*/
void
dr_thread_free(void *drcontext, void *mem, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_thread_free: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_thread_free: drcontext is invalid");
heap_free(dcontext, mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Allocates memory from DR's global memory pool.
*/
void *
dr_global_alloc(size_t size)
{
return global_heap_alloc(size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees memory allocated by dr_global_alloc.
* size must be the same size passed to dr_global_alloc.
*/
void
dr_global_free(void *mem, size_t size)
{
global_heap_free(mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* PR 352427: API routine to allocate executable memory */
void *
dr_nonheap_alloc(size_t size, uint prot)
{
return heap_mmap_ex(size, size, prot, false/*no guard pages*/);
}
DR_API
void
dr_nonheap_free(void *mem, size_t size)
{
heap_munmap_ex(mem, size, false/*no guard pages*/);
}
static void *
raw_mem_alloc(size_t size, uint prot, void *addr, dr_alloc_flags_t flags)
{
byte *p;
heap_error_code_t error_code;
CLIENT_ASSERT(ALIGNED(addr, PAGE_SIZE), "addr is not page size aligned");
if (!TEST(DR_ALLOC_NON_DR, flags)) {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
addr = (void *)ALIGN_BACKWARD(addr, PAGE_SIZE);
size = ALIGN_FORWARD(size, PAGE_SIZE);
#ifdef WINDOWS
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"cannot combine commit-only and low-2GB");
p = os_heap_reserve_in_region(NULL, (byte *)(ptr_uint_t)0x80000000, size,
&error_code, TEST(DR_MEMPROT_EXEC, flags));
if (p != NULL && !TEST(DR_ALLOC_RESERVE_ONLY, flags)) {
if (!os_heap_commit(p, size, prot, &error_code)) {
os_heap_free(p, size, &error_code);
p = NULL;
}
}
} else
#endif
{
/* We specify that DR_ALLOC_LOW_2GB only applies to x64, so it's
* ok that the Linux kernel will ignore MAP_32BIT for 32-bit.
*/
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
if (IF_WINDOWS(TEST(DR_ALLOC_COMMIT_ONLY, flags) &&)
addr != NULL &&
!app_memory_pre_alloc(get_thread_private_dcontext(), addr, size, prot,
false))
p = NULL;
else
p = os_raw_mem_alloc(addr, size, prot, os_flags, &error_code);
}
if (p != NULL) {
if (TEST(DR_ALLOC_NON_DR, flags)) {
all_memory_areas_lock();
update_all_memory_areas(p, p+size, prot, DR_MEMTYPE_DATA);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
add_dynamo_vm_area((app_pc)p, ((app_pc)p)+size, prot,
true _IF_DEBUG("fls cb in private lib"));
}
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
return p;
}
static bool
raw_mem_free(void *addr, size_t size, dr_alloc_flags_t flags)
{
bool res;
heap_error_code_t error_code;
byte *p = addr;
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
size = ALIGN_FORWARD(size, PAGE_SIZE);
if (TEST(DR_ALLOC_NON_DR, flags)) {
/* use lock to avoid racy update on parallel memory allocation,
* e.g. allocation from another thread at p happens after os_heap_free
* but before remove_from_all_memory_areas
*/
all_memory_areas_lock();
} else {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
res = os_raw_mem_free(p, size, os_flags, &error_code);
if (TEST(DR_ALLOC_NON_DR, flags)) {
remove_from_all_memory_areas(p, p + size);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
remove_dynamo_vm_area((app_pc)addr, ((app_pc)addr)+size);
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
return res;
}
DR_API
void *
dr_raw_mem_alloc(size_t size, uint prot, void *addr)
{
return raw_mem_alloc(size, prot, addr, DR_ALLOC_NON_DR);
}
DR_API
bool
dr_raw_mem_free(void *addr, size_t size)
{
return raw_mem_free(addr, size, DR_ALLOC_NON_DR);
}
static void *
custom_memory_shared(bool alloc, void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr, bool *free_res)
{
CLIENT_ASSERT(alloc || free_res != NULL, "must ask for free_res on free");
CLIENT_ASSERT(alloc || addr != NULL, "cannot free NULL");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_NON_DR|DR_ALLOC_CACHE_REACHABLE, flags),
"dr_custom_alloc: cannot combine non-DR and cache-reachable");
CLIENT_ASSERT(!alloc || TEST(DR_ALLOC_FIXED_LOCATION, flags) || addr == NULL,
"dr_custom_alloc: address only honored for fixed location");
#ifdef WINDOWS
CLIENT_ASSERT(!TESTANY(DR_ALLOC_RESERVE_ONLY | DR_ALLOC_COMMIT_ONLY, flags) ||
TESTALL(DR_ALLOC_NON_HEAP|DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: reserve/commit-only are only for non-DR non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_RESERVE_ONLY, flags) ||
!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine reserve-only + commit-only");
#endif
if (TEST(DR_ALLOC_NON_HEAP, flags)) {
CLIENT_ASSERT(drcontext == NULL,
"dr_custom_alloc: drcontext must be NULL for non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: non-heap cannot be thread-private");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_CACHE_REACHABLE|DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot combine low-2GB and cache-reachable");
#ifdef WINDOWS
CLIENT_ASSERT(addr != NULL || !TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: commit-only requires non-NULL addr");
#endif
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
#ifdef WINDOWS
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine commit-only and low-2GB");
#endif
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr with low-2GB");
/* Even if not non-DR, easier to allocate via raw */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else if (TEST(DR_ALLOC_NON_DR, flags)) {
/* ok for addr to be NULL */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else { /* including DR_ALLOC_CACHE_REACHABLE */
CLIENT_ASSERT(!alloc || !TEST(DR_ALLOC_CACHE_REACHABLE, flags) ||
addr == NULL,
"dr_custom_alloc: cannot ask for addr and cache-reachable");
/* This flag is here solely so we know which version of free to call */
if (TEST(DR_ALLOC_FIXED_LOCATION, flags)) {
CLIENT_ASSERT(addr != NULL,
"dr_custom_alloc: fixed location requires an address");
if (alloc)
return raw_mem_alloc(size, prot, addr, 0);
else
*free_res = raw_mem_free(addr, size, 0);
} else {
if (alloc)
return dr_nonheap_alloc(size, prot);
else {
*free_res = true;
dr_nonheap_free(addr, size);
}
}
}
} else {
if (!alloc)
*free_res = true;
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr for heap memory");
CLIENT_ASSERT(drcontext == NULL || TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: drcontext must be NULL for global heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot ask for heap in low 2GB");
CLIENT_ASSERT(!TEST(DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: cannot ask for non-DR heap memory");
/* for now it's all cache-reachable so we ignore DR_ALLOC_CACHE_REACHABLE */
if (TEST(DR_ALLOC_THREAD_PRIVATE, flags)) {
if (alloc)
return dr_thread_alloc(drcontext, size);
else
dr_thread_free(drcontext, addr, size);
} else {
if (alloc)
return dr_global_alloc(size);
else
dr_global_free(addr, size);
}
}
return NULL;
}
DR_API
void *
dr_custom_alloc(void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr)
{
return custom_memory_shared(true, drcontext, flags, size, prot, addr, NULL);
}
DR_API
bool
dr_custom_free(void *drcontext, dr_alloc_flags_t flags, void *addr, size_t size)
{
bool res;
custom_memory_shared(false, drcontext, flags, size, 0, addr, &res);
return res;
}
#ifdef UNIX
DR_API
/* With ld's -wrap option, we can supply a replacement for malloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_malloc(size_t size)
{
return redirect_malloc(size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for realloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_realloc(void *mem, size_t size)
{
return redirect_realloc(mem, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for calloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_calloc(size_t nmemb, size_t size)
{
return redirect_calloc(nmemb, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for free. This
* routine frees memory allocated by __wrap_alloc and expects the
* allocation size to be available in the few bytes before 'mem'.
*/
void
__wrap_free(void *mem)
{
redirect_free(mem);
}
#endif
DR_API
bool
dr_memory_protect(void *base, size_t size, uint new_prot)
{
/* We do allow the client to modify DR memory, for allocating a
* region and later making it unwritable. We should probably
* allow modifying ntdll, since our general model is to trust the
* client and let it shoot itself in the foot, but that would require
* passing in extra args to app_memory_protection_change() to ignore
* the patch_proof_list: and maybe it is safer to disallow client
* from putting hooks in ntdll.
*/
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!dynamo_vm_area_overlap(base, ((byte *)base) + size)) {
uint mod_prot = new_prot;
uint res = app_memory_protection_change(get_thread_private_dcontext(),
base, size, new_prot, &mod_prot, NULL);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE) {
return false;
} else {
/* SUBSET_APP_MEM_PROT_CHANGE should only happen for
* PROGRAM_SHEPHERDING. FIXME: not sure how common
* this will be: for now we just fail.
*/
return false;
}
}
CLIENT_ASSERT(mod_prot == new_prot, "internal error on dr_memory_protect()");
}
return set_protection(base, size, new_prot);
}
DR_API
size_t
dr_page_size(void)
{
return os_page_size();
}
DR_API
/* checks to see that all bytes with addresses from pc to pc+size-1
* are readable and that reading from there won't generate an exception.
*/
bool
dr_memory_is_readable(const byte *pc, size_t size)
{
return is_readable_without_exception(pc, size);
}
DR_API
/* OS neutral memory query for clients, just wrapper around our get_memory_info(). */
bool
dr_query_memory(const byte *pc, byte **base_pc, size_t *size, uint *prot)
{
uint real_prot;
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* xref PR 246897 - the cached all memory list can have problems when
* out-of-process entities change the mapings. For now we use the from
* os version instead (even though it's slower, and only if we have
* HAVE_MEMINFO_MAPS support). FIXME
* XXX i#853: We could decide allmem vs os with the use_all_memory_areas
* option.
*/
res = get_memory_info_from_os(pc, base_pc, size, &real_prot);
#else
res = get_memory_info(pc, base_pc, size, &real_prot);
#endif
if (prot != NULL) {
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
real_prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
*prot = real_prot;
}
return res;
}
DR_API
bool
dr_query_memory_ex(const byte *pc, OUT dr_mem_info_t *info)
{
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* PR 246897: all_memory_areas not ready for prime time */
res = query_memory_ex_from_os(pc, info);
#else
res = query_memory_ex(pc, info);
#endif
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
info->prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
return res;
}
DR_API
/* Wrapper around our safe_read. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_read(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
return safe_read_ex(base, size, out_buf, bytes_read);
}
DR_API
/* Wrapper around our safe_write. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_write(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_ex(base, size, in_buf, bytes_written);
}
DR_API
void
dr_try_setup(void *drcontext, void **try_cxt)
{
/* Yes we're duplicating the code from the TRY() macro but this
* provides better abstraction and lets us change our impl later
* vs exposing that macro
*/
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_cxt != NULL);
/* We allocate on the heap to avoid having to expose the try_except_context_t
* and dr_jmp_buf_t structs and be tied to their exact layouts.
* The client is likely to allocate memory inside the try anyway
* if doing a decode or something.
*/
try_state = (try_except_context_t *)
HEAP_TYPE_ALLOC(dcontext, try_except_context_t, ACCT_CLIENT, PROTECTED);
*try_cxt = try_state;
try_state->prev_context = dcontext->try_except.try_except_state;
dcontext->try_except.try_except_state = try_state;
}
/* dr_try_start() is in x86.asm since we can't have an extra frame that's
* going to be torn down between the longjmp and the restore point
*/
DR_API
void
dr_try_stop(void *drcontext, void *try_cxt)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state = (try_except_context_t *) try_cxt;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_state != NULL);
POP_TRY_BLOCK(&dcontext->try_except, *try_state);
HEAP_TYPE_FREE(dcontext, try_state, try_except_context_t, ACCT_CLIENT, PROTECTED);
}
DR_API
bool
dr_memory_is_dr_internal(const byte *pc)
{
return is_dynamo_address((app_pc)pc);
}
DR_API
bool
dr_memory_is_in_client(const byte *pc)
{
return is_in_client_lib((app_pc)pc);
}
void
instrument_client_lib_loaded(byte *start, byte *end)
{
/* i#852: include Extensions as they are really part of the clients and
* aren't like other private libs.
* XXX: we only avoid having the client libs on here b/c they're specified via
* full path and don't go through the loaders' locate routines.
* Not a big deal if they do end up on here: if they always did we could
* remove the linear walk in is_in_client_lib().
*/
/* called prior to instrument_init() */
init_client_aux_libs();
vmvector_add(client_aux_libs, start, end, NULL/*not an auxlib*/);
}
void
instrument_client_lib_unloaded(byte *start, byte *end)
{
/* called after instrument_exit() */
if (client_aux_libs != NULL)
vmvector_remove(client_aux_libs, start, end);
}
/**************************************************
* CLIENT AUXILIARY LIBRARIES
*/
DR_API
dr_auxlib_handle_t
dr_load_aux_library(const char *name,
byte **lib_start /*OPTIONAL OUT*/,
byte **lib_end /*OPTIONAL OUT*/)
{
byte *start, *end;
dr_auxlib_handle_t lib = load_shared_library(name, true/*reachable*/);
if (shared_library_bounds(lib, NULL, name, &start, &end)) {
/* be sure to replace b/c i#852 now adds during load w/ empty data */
vmvector_add_replace(client_aux_libs, start, end, (void*)lib);
if (lib_start != NULL)
*lib_start = start;
if (lib_end != NULL)
*lib_end = end;
all_memory_areas_lock();
update_all_memory_areas(start, end,
/* XXX: see comment in instrument_init()
* on walking the sections and what prot to use
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
} else {
unload_shared_library(lib);
lib = NULL;
}
return lib;
}
DR_API
dr_auxlib_routine_ptr_t
dr_lookup_aux_library_routine(dr_auxlib_handle_t lib, const char *name)
{
if (lib == NULL)
return NULL;
return lookup_library_routine(lib, name);
}
DR_API
bool
dr_unload_aux_library(dr_auxlib_handle_t lib)
{
byte *start = NULL, *end = NULL;
/* unfortunately on linux w/ dlopen we cannot find the bounds w/o
* either the path or an address so we iterate.
* once we have our private loader we shouldn't need this:
* XXX i#157
*/
vmvector_iterator_t vmvi;
dr_auxlib_handle_t found = NULL;
if (lib == NULL)
return false;
vmvector_iterator_start(client_aux_libs, &vmvi);
while (vmvector_iterator_hasnext(&vmvi)) {
found = (dr_auxlib_handle_t) vmvector_iterator_next(&vmvi, &start, &end);
if (found == lib)
break;
}
vmvector_iterator_stop(&vmvi);
if (found == lib) {
CLIENT_ASSERT(start != NULL && start < end, "logic error");
vmvector_remove(client_aux_libs, start, end);
unload_shared_library(lib);
all_memory_areas_lock();
update_all_memory_areas(start, end, MEMPROT_NONE, DR_MEMTYPE_FREE);
all_memory_areas_unlock();
return true;
} else {
CLIENT_ASSERT(false, "invalid aux lib");
return false;
}
}
#if defined(WINDOWS) && !defined(X64)
/* XXX i#1633: these routines all have 64-bit handle and routine types for
* handling win8's high ntdll64 in the future. For now the implementation
* treats them as 32-bit types and we do not support win8+.
*/
DR_API
dr_auxlib64_handle_t
dr_load_aux_x64_library(const char *name)
{
HANDLE h;
/* We use the x64 system loader. We assume that x64 state is fine being
* interrupted at arbitrary points during x86 execution, and that there
* is little risk of transparency violations.
*/
/* load_library_64() is racy. We don't expect anyone else to load
* x64 libs, but another thread in this client could, so we
* serialize here.
*/
mutex_lock(&client_aux_lib64_lock);
/* XXX: if we switch to our private loader we'll need to add custom
* search support to look in 64-bit system dir
*/
/* XXX: I'd add to the client_aux_libs vector, but w/ the system loader
* loading this I don't know all the dependent libs it might load.
* Not bothering for now.
*/
h = load_library_64(name);
mutex_unlock(&client_aux_lib64_lock);
return (dr_auxlib64_handle_t) h;
}
DR_API
dr_auxlib64_routine_ptr_t
dr_lookup_aux_x64_library_routine(dr_auxlib64_handle_t lib, const char *name)
{
uint64 res = get_proc_address_64((uint64)lib, name);
return (dr_auxlib64_routine_ptr_t) res;
}
DR_API
bool
dr_unload_aux_x64_library(dr_auxlib64_handle_t lib)
{
bool res;
mutex_lock(&client_aux_lib64_lock);
res = free_library_64((HANDLE)(uint)lib); /* uint cast to avoid cl warning */
mutex_unlock(&client_aux_lib64_lock);
return res;
}
#endif
/***************************************************************************
* LOCKS
*/
DR_API
/* Initializes a mutex
*/
void *
dr_mutex_create(void)
{
void *mutex = (void *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, mutex_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_LOCK_FREE(*((mutex_t *) mutex), dr_client_mutex);
return mutex;
}
DR_API
/* Deletes mutex
*/
void
dr_mutex_destroy(void *mutex)
{
/* Delete mutex so locks_not_closed()==0 test in dynamo.c passes */
DELETE_LOCK(*((mutex_t *) mutex));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (mutex_t *)mutex, mutex_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
/* Locks mutex
*/
void
dr_mutex_lock(void *mutex)
{
dcontext_t *dcontext = get_thread_private_dcontext();
/* set client_grab_mutex so that we know to set client_thread_safe_for_synch
* around the actual wait for the lock */
if (IS_CLIENT_THREAD(dcontext)) {
dcontext->client_data->client_grab_mutex = mutex;
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
dcontext->client_data->mutex_count++;
}
mutex_lock((mutex_t *) mutex);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_grab_mutex = NULL;
}
DR_API
/* Unlocks mutex
*/
void
dr_mutex_unlock(void *mutex)
{
dcontext_t *dcontext = get_thread_private_dcontext();
mutex_unlock((mutex_t *) mutex);
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
if (IS_CLIENT_THREAD(dcontext)) {
CLIENT_ASSERT(dcontext->client_data->mutex_count > 0,
"internal client mutex nesting error");
dcontext->client_data->mutex_count--;
}
}
DR_API
/* Tries once to grab the lock, returns whether or not successful
*/
bool
dr_mutex_trylock(void *mutex)
{
bool success = false;
dcontext_t *dcontext = get_thread_private_dcontext();
/* set client_grab_mutex so that we know to set client_thread_safe_for_synch
* around the actual wait for the lock */
if (IS_CLIENT_THREAD(dcontext)) {
dcontext->client_data->client_grab_mutex = mutex;
/* We do this on the outside so that we're conservative wrt races
* in the direction of not killing the thread while it has a lock
*/
dcontext->client_data->mutex_count++;
}
success = mutex_trylock((mutex_t *) mutex);
if (IS_CLIENT_THREAD(dcontext)) {
if (!success)
dcontext->client_data->mutex_count--;
dcontext->client_data->client_grab_mutex = NULL;
}
return success;
}
DR_API
bool
dr_mutex_self_owns(void *mutex)
{
return IF_DEBUG_ELSE(OWN_MUTEX((mutex_t *)mutex), true);
}
DR_API
bool
dr_mutex_mark_as_app(void *mutex)
{
mutex_t *lock = (mutex_t *) mutex;
mutex_mark_as_app(lock);
return true;
}
DR_API
void *
dr_rwlock_create(void)
{
void *rwlock = (void *) HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, read_write_lock_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_READWRITE_LOCK_FREE(*((read_write_lock_t *)rwlock), dr_client_mutex);
return rwlock;
}
DR_API
void
dr_rwlock_destroy(void *rwlock)
{
DELETE_READWRITE_LOCK(*((read_write_lock_t *) rwlock));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (read_write_lock_t *)rwlock, read_write_lock_t,
ACCT_CLIENT, UNPROTECTED);
}
DR_API
void
dr_rwlock_read_lock(void *rwlock)
{
read_lock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_read_unlock(void *rwlock)
{
read_unlock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_write_lock(void *rwlock)
{
write_lock((read_write_lock_t *)rwlock);
}
DR_API
void
dr_rwlock_write_unlock(void *rwlock)
{
write_unlock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_write_trylock(void *rwlock)
{
return write_trylock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_self_owns_write_lock(void *rwlock)
{
return self_owns_write_lock((read_write_lock_t *)rwlock);
}
DR_API
bool
dr_rwlock_mark_as_app(void *rwlock)
{
read_write_lock_t *lock = (read_write_lock_t *) rwlock;
mutex_mark_as_app(&lock->lock);
return true;
}
DR_API
void *
dr_recurlock_create(void)
{
void *reclock = (void *) HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, recursive_lock_t,
ACCT_CLIENT, UNPROTECTED);
ASSIGN_INIT_RECURSIVE_LOCK_FREE(*((recursive_lock_t *)reclock), dr_client_mutex);
return reclock;
}
DR_API
void
dr_recurlock_destroy(void *reclock)
{
DELETE_RECURSIVE_LOCK(*((recursive_lock_t *) reclock));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (recursive_lock_t *)reclock, recursive_lock_t,
ACCT_CLIENT, UNPROTECTED);
}
DR_API
void
dr_recurlock_lock(void *reclock)
{
acquire_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
void
dr_recurlock_unlock(void *reclock)
{
release_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_trylock(void *reclock)
{
return try_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_self_owns(void *reclock)
{
return self_owns_recursive_lock((recursive_lock_t *)reclock);
}
DR_API
bool
dr_recurlock_mark_as_app(void *reclock)
{
recursive_lock_t *lock = (recursive_lock_t *) reclock;
mutex_mark_as_app(&lock->lock);
return true;
}
DR_API
void *
dr_event_create(void)
{
return (void *)create_event();
}
DR_API
bool
dr_event_destroy(void *event)
{
destroy_event((event_t)event);
return true;
}
DR_API
bool
dr_event_wait(void *event)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
wait_for_event((event_t)event);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
return true;
}
DR_API
bool
dr_event_signal(void *event)
{
signal_event((event_t)event);
return true;
}
DR_API
bool
dr_event_reset(void *event)
{
reset_event((event_t)event);
return true;
}
DR_API
bool
dr_mark_safe_to_suspend(void *drcontext, bool enter)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
ASSERT_OWN_NO_LOCKS();
/* We need to return so we can't call check_wait_at_safe_spot().
* We don't set mcontext b/c noone should examine it.
*/
if (enter)
set_synch_state(dcontext, THREAD_SYNCH_NO_LOCKS_NO_XFER);
else
set_synch_state(dcontext, THREAD_SYNCH_NONE);
return true;
}
DR_API
int
dr_atomic_add32_return_sum(volatile int *x, int val)
{
return atomic_add_exchange_int(x, val);
}
/***************************************************************************
* MODULES
*/
DR_API
/* Looks up the module data containing pc. Returns NULL if not found.
* Returned module_data_t must be freed with dr_free_module_data(). */
module_data_t *
dr_lookup_module(byte *pc)
{
module_area_t *area;
module_data_t *client_data;
os_get_module_info_lock();
area = module_pc_lookup(pc);
client_data = copy_module_area_to_module_data(area);
os_get_module_info_unlock();
return client_data;
}
DR_API
module_data_t *
dr_get_main_module(void)
{
return dr_lookup_module(get_image_entry());
}
DR_API
/* Looks up the module with name matching name (ignoring case). Returns NULL if not
* found. Returned module_data_t must be freed with dr_free_module_data(). */
module_data_t *
dr_lookup_module_by_name(const char *name)
{
/* We have no quick way of doing this since our module list is indexed by pc. We
* could use get_module_handle() but that's dangerous to call at arbitrary times,
* so we just walk our full list here. */
module_iterator_t *mi = module_iterator_start();
CLIENT_ASSERT((name != NULL), "dr_lookup_module_info_by_name: null name");
while (module_iterator_hasnext(mi)) {
module_area_t *area = module_iterator_next(mi);
module_data_t *client_data;
const char *modname = GET_MODULE_NAME(&area->names);
if (modname != NULL && strcasecmp(modname, name) == 0) {
client_data = copy_module_area_to_module_data(area);
module_iterator_stop(mi);
return client_data;
}
}
module_iterator_stop(mi);
return NULL;
}
typedef struct _client_mod_iterator_list_t {
module_data_t *info;
struct _client_mod_iterator_list_t *next;
} client_mod_iterator_list_t;
typedef struct {
client_mod_iterator_list_t *current;
client_mod_iterator_list_t *full_list;
} client_mod_iterator_t;
DR_API
/* Initialize a new client module iterator. */
dr_module_iterator_t *
dr_module_iterator_start(void)
{
client_mod_iterator_t *client_iterator = (client_mod_iterator_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED);
module_iterator_t *dr_iterator = module_iterator_start();
memset(client_iterator, 0, sizeof(*client_iterator));
while (module_iterator_hasnext(dr_iterator)) {
module_area_t *area = module_iterator_next(dr_iterator);
client_mod_iterator_list_t *list = (client_mod_iterator_list_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_mod_iterator_list_t, ACCT_CLIENT,
UNPROTECTED);
ASSERT(area != NULL);
list->info = copy_module_area_to_module_data(area);
list->next = NULL;
if (client_iterator->current == NULL) {
client_iterator->current = list;
client_iterator->full_list = client_iterator->current;
} else {
client_iterator->current->next = list;
client_iterator->current = client_iterator->current->next;
}
}
module_iterator_stop(dr_iterator);
client_iterator->current = client_iterator->full_list;
return (dr_module_iterator_t)client_iterator;
}
DR_API
/* Returns true if there is another loaded module in the iterator. */
bool
dr_module_iterator_hasnext(dr_module_iterator_t *mi)
{
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_hasnext: null iterator");
return ((client_mod_iterator_t *)mi)->current != NULL;
}
DR_API
/* Retrieves the module_data_t for the next loaded module in the iterator. */
module_data_t *
dr_module_iterator_next(dr_module_iterator_t *mi)
{
module_data_t *data;
client_mod_iterator_t *ci = (client_mod_iterator_t *)mi;
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_next: null iterator");
CLIENT_ASSERT((ci->current != NULL), "dr_module_iterator_next: has no next, use "
"dr_module_iterator_hasnext() first");
if (ci->current == NULL)
return NULL;
data = ci->current->info;
ci->current = ci->current->next;
return data;
}
DR_API
/* Free the module iterator. */
void
dr_module_iterator_stop(dr_module_iterator_t *mi)
{
client_mod_iterator_t *ci = (client_mod_iterator_t *)mi;
CLIENT_ASSERT((mi != NULL), "dr_module_iterator_stop: null iterator");
/* free module_data_t's we didn't give to the client */
while (ci->current != NULL) {
dr_free_module_data(ci->current->info);
ci->current = ci->current->next;
}
ci->current = ci->full_list;
while (ci->current != NULL) {
client_mod_iterator_list_t *next = ci->current->next;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci->current, client_mod_iterator_list_t,
ACCT_CLIENT, UNPROTECTED);
ci->current = next;
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
/* Get the name dr uses for this module. */
const char *
dr_module_preferred_name(const module_data_t *data)
{
if (data == NULL)
return NULL;
return GET_MODULE_NAME(&data->names);
}
#ifdef WINDOWS
DR_API
/* If pc is within a section of module lib returns true and (optionally) a copy of
* the IMAGE_SECTION_HEADER in section_out. If pc is not within a section of the
* module mod return false. */
bool
dr_lookup_module_section(module_handle_t lib, byte *pc, IMAGE_SECTION_HEADER *section_out)
{
CLIENT_ASSERT((lib != NULL), "dr_lookup_module_section: null module_handle_t");
return module_pc_section_lookup((app_pc)lib, pc, section_out);
}
#endif
/* i#805: Instead of exposing multiple instruction levels, we expose a way for
* clients to turn off instrumentation. Then DR can avoid a full decode and we
* can save some time on modules that are not interesting.
* XXX: This breaks other clients and extensions, in particular drwrap, which
* can miss call and return sites in the uninstrumented module.
*/
DR_API
bool
dr_module_set_should_instrument(module_handle_t handle, bool should_instrument)
{
module_area_t *ma;
DEBUG_DECLARE(dcontext_t *dcontext = get_thread_private_dcontext());
IF_DEBUG(executable_areas_lock());
os_get_module_info_write_lock();
ma = module_pc_lookup((byte*)handle);
if (ma != NULL) {
/* This kind of obviates the need for handle, but it makes the API more
* explicit.
*/
CLIENT_ASSERT(dcontext->client_data->no_delete_mod_data->handle == handle,
"Do not call dr_module_set_should_instrument() outside "
"of the module's own load event");
ASSERT(!executable_vm_area_executed_from(ma->start, ma->end));
if (should_instrument) {
ma->flags &= ~MODULE_NULL_INSTRUMENT;
} else {
ma->flags |= MODULE_NULL_INSTRUMENT;
}
}
os_get_module_info_write_unlock();
IF_DEBUG(executable_areas_unlock());
return (ma != NULL);
}
DR_API
bool
dr_module_should_instrument(module_handle_t handle)
{
bool should_instrument = true;
module_area_t *ma;
os_get_module_info_lock();
ma = module_pc_lookup((byte*)handle);
CLIENT_ASSERT(ma != NULL, "invalid module handle");
if (ma != NULL) {
should_instrument = !TEST(MODULE_NULL_INSTRUMENT, ma->flags);
}
os_get_module_info_unlock();
return should_instrument;
}
DR_API
/* Returns the entry point of the function with the given name in the module
* with the given handle.
* We're not taking in module_data_t to make it simpler for the client
* to iterate or lookup the module_data_t, store the single-field
* handle, and then free the data right away: besides, module_data_t
* is not an opaque type.
*/
generic_func_t
dr_get_proc_address(module_handle_t lib, const char *name)
{
#ifdef WINDOWS
return get_proc_address_resolve_forward(lib, name);
#else
return get_proc_address(lib, name);
#endif
}
DR_API
bool
dr_get_proc_address_ex(module_handle_t lib, const char *name,
dr_export_info_t *info OUT, size_t info_len)
{
/* If we add new fields we'll check various values of info_len */
if (info == NULL || info_len < sizeof(*info))
return false;
#ifdef WINDOWS
info->address = get_proc_address_resolve_forward(lib, name);
info->is_indirect_code = false;
#else
info->address = get_proc_address_ex(lib, name, &info->is_indirect_code);
#endif
return (info->address != NULL);
}
byte *
dr_map_executable_file(const char *filename, dr_map_executable_flags_t flags,
size_t *size OUT)
{
#ifdef MACOS
/* XXX i#1285: implement private loader on Mac */
return NULL;
#else
modload_flags_t mflags = MODLOAD_NOT_PRIVLIB;
if (TEST(DR_MAPEXE_SKIP_WRITABLE, flags))
mflags |= MODLOAD_SKIP_WRITABLE;
if (filename == NULL)
return NULL;
return privload_map_and_relocate(filename, size, mflags);
#endif
}
bool
dr_unmap_executable_file(byte *base, size_t size)
{
return unmap_file(base, size);
}
DR_API
/* Creates a new directory. Fails if the directory already exists
* or if it can't be created.
*/
bool
dr_create_dir(const char *fname)
{
return os_create_dir(fname, CREATE_DIR_REQUIRE_NEW);
}
DR_API
bool
dr_delete_dir(const char *fname)
{
return os_delete_dir(fname);
}
DR_API
bool
dr_get_current_directory(char *buf, size_t bufsz)
{
return os_get_current_dir(buf, bufsz);
}
DR_API
/* Checks existence of a directory. */
bool
dr_directory_exists(const char *fname)
{
return os_file_exists(fname, true);
}
DR_API
/* Checks for the existence of a file. */
bool
dr_file_exists(const char *fname)
{
return os_file_exists(fname, false);
}
DR_API
/* Opens a file in the mode specified by mode_flags.
* Returns INVALID_FILE if unsuccessful
*/
file_t
dr_open_file(const char *fname, uint mode_flags)
{
uint flags = 0;
if (TEST(DR_FILE_WRITE_REQUIRE_NEW, mode_flags)) {
flags |= OS_OPEN_WRITE | OS_OPEN_REQUIRE_NEW;
}
if (TEST(DR_FILE_WRITE_APPEND, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE | OS_OPEN_APPEND;
}
if (TEST(DR_FILE_WRITE_OVERWRITE, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE;
}
if (TEST(DR_FILE_WRITE_ONLY, mode_flags)) {
CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected");
flags |= OS_OPEN_WRITE_ONLY;
}
if (TEST(DR_FILE_READ, mode_flags))
flags |= OS_OPEN_READ;
CLIENT_ASSERT((flags != 0), "dr_open_file: no mode selected");
if (TEST(DR_FILE_ALLOW_LARGE, mode_flags))
flags |= OS_OPEN_ALLOW_LARGE;
if (TEST(DR_FILE_CLOSE_ON_FORK, mode_flags))
flags |= OS_OPEN_CLOSE_ON_FORK;
/* all client-opened files are protected */
return os_open_protected(fname, flags);
}
DR_API
/* Closes file f
*/
void
dr_close_file(file_t f)
{
/* all client-opened files are protected */
os_close_protected(f);
}
DR_API
/* Renames the file src to dst. */
bool
dr_rename_file(const char *src, const char *dst, bool replace)
{
return os_rename_file(src, dst, replace);
}
DR_API
/* Deletes a file. */
bool
dr_delete_file(const char *filename)
{
/* os_delete_mapped_file should be a superset of os_delete_file, so we use
* it.
*/
return os_delete_mapped_file(filename);
}
DR_API
/* Flushes any buffers for file f
*/
void
dr_flush_file(file_t f)
{
os_flush(f);
}
DR_API
/* Writes count bytes from buf to f.
* Returns the actual number written.
*/
ssize_t
dr_write_file(file_t f, const void *buf, size_t count)
{
#ifdef WINDOWS
if ((f == STDOUT || f == STDERR) && print_to_console)
return dr_write_to_console_varg(f == STDOUT, "%.*s", count, buf);
else
#endif
return os_write(f, buf, count);
}
DR_API
/* Reads up to count bytes from f into buf.
* Returns the actual number read.
*/
ssize_t
dr_read_file(file_t f, void *buf, size_t count)
{
return os_read(f, buf, count);
}
DR_API
/* sets the current file position for file f to offset bytes from the specified origin
* returns true if successful */
bool
dr_file_seek(file_t f, int64 offset, int origin)
{
CLIENT_ASSERT(origin == DR_SEEK_SET || origin == DR_SEEK_CUR || origin == DR_SEEK_END,
"dr_file_seek: invalid origin value");
return os_seek(f, offset, origin);
}
DR_API
/* gets the current file position for file f in bytes from start of file */
int64
dr_file_tell(file_t f)
{
return os_tell(f);
}
DR_API
file_t
dr_dup_file_handle(file_t f)
{
#ifdef UNIX
/* returns -1 on failure == INVALID_FILE */
return dup_syscall(f);
#else
HANDLE ht = INVALID_HANDLE_VALUE;
NTSTATUS res = duplicate_handle(NT_CURRENT_PROCESS, f, NT_CURRENT_PROCESS,
&ht, SYNCHRONIZE, 0,
DUPLICATE_SAME_ACCESS|DUPLICATE_SAME_ATTRIBUTES);
if (!NT_SUCCESS(res))
return INVALID_FILE;
else
return ht;
#endif
}
DR_API
bool
dr_file_size(file_t fd, OUT uint64 *size)
{
return os_get_file_size_by_handle(fd, size);
}
DR_API
void *
dr_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
uint flags)
{
return (void *)
map_file(f, size, offs, addr, prot,
(TEST(DR_MAP_PRIVATE, flags) ? MAP_FILE_COPY_ON_WRITE : 0) |
IF_WINDOWS((TEST(DR_MAP_IMAGE, flags) ? MAP_FILE_IMAGE : 0) |)
IF_UNIX((TEST(DR_MAP_FIXED, flags) ? MAP_FILE_FIXED : 0) |)
(TEST(DR_MAP_CACHE_REACHABLE, flags) ? MAP_FILE_REACHABLE : 0));
}
DR_API
bool
dr_unmap_file(void *map, size_t size)
{
dr_mem_info_t info;
CLIENT_ASSERT(ALIGNED(map, PAGE_SIZE),
"dr_unmap_file: map is not page aligned");
if (!dr_query_memory_ex(map, &info) /* fail to query */ ||
info.type == DR_MEMTYPE_FREE /* not mapped file */) {
CLIENT_ASSERT(false, "dr_unmap_file: incorrect file map");
return false;
}
#ifdef WINDOWS
/* On Windows, the whole file will be unmapped instead, so we adjust
* the bound to make sure vm_areas are updated correctly.
*/
map = info.base_pc;
if (info.type == DR_MEMTYPE_IMAGE) {
size = get_allocation_size(map, NULL);
} else
size = info.size;
#endif
return unmap_file((byte *) map, size);
}
DR_API
void
dr_log(void *drcontext, uint mask, uint level, const char *fmt, ...)
{
#ifdef DEBUG
dcontext_t *dcontext = (dcontext_t *) drcontext;
va_list ap;
if (stats != NULL &&
((stats->logmask & mask) == 0 ||
stats->loglevel < level))
return;
va_start(ap, fmt);
if (dcontext != NULL)
do_file_write(dcontext->logfile, fmt, ap);
else
do_file_write(main_logfile, fmt, ap);
va_end(ap);
#else
return; /* no logging if not debug */
#endif
}
DR_API
/* Returns the log file for the drcontext thread.
* If drcontext is NULL, returns the main log file.
*/
file_t
dr_get_logfile(void *drcontext)
{
#ifdef DEBUG
dcontext_t *dcontext = (dcontext_t *) drcontext;
if (dcontext != NULL)
return dcontext->logfile;
else
return main_logfile;
#else
return INVALID_FILE;
#endif
}
DR_API
/* Returns true iff the -stderr_mask runtime option is non-zero, indicating
* that the user wants notification messages printed to stderr.
*/
bool
dr_is_notify_on(void)
{
return (dynamo_options.stderr_mask != 0);
}
#ifdef WINDOWS
DR_API file_t
dr_get_stdout_file(void)
{
return get_stdout_handle();
}
DR_API file_t
dr_get_stderr_file(void)
{
return get_stderr_handle();
}
DR_API file_t
dr_get_stdin_file(void)
{
return get_stdin_handle();
}
#endif
#ifdef PROGRAM_SHEPHERDING
DR_API void
dr_write_forensics_report(void *dcontext, file_t file,
dr_security_violation_type_t violation,
dr_security_violation_action_t action,
const char *violation_name)
{
security_violation_t sec_violation;
action_type_t sec_action;
switch (violation) {
case DR_RCO_STACK_VIOLATION:
sec_violation = STACK_EXECUTION_VIOLATION;
break;
case DR_RCO_HEAP_VIOLATION:
sec_violation = HEAP_EXECUTION_VIOLATION;
break;
case DR_RCT_RETURN_VIOLATION:
sec_violation = RETURN_TARGET_VIOLATION;
break;
case DR_RCT_INDIRECT_CALL_VIOLATION:
sec_violation = INDIRECT_CALL_RCT_VIOLATION;
break;
case DR_RCT_INDIRECT_JUMP_VIOLATION:
sec_violation = INDIRECT_JUMP_RCT_VIOLATION;
break;
default:
CLIENT_ASSERT(false, "dr_write_forensics_report does not support "
"DR_UNKNOWN_VIOLATION or invalid violation types");
return;
}
switch (action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
sec_action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_CONTINUE:
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
sec_action = ACTION_CONTINUE;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
sec_action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
sec_action = ACTION_THROW_EXCEPTION;
break;
default:
CLIENT_ASSERT(false, "dr_write_forensics_report invalid action selection");
return;
}
/* FIXME - could use a better message. */
append_diagnostics(file, action_message[sec_action], violation_name, sec_violation);
}
#endif /* PROGRAM_SHEPHERDING */
#ifdef WINDOWS
DR_API void
dr_messagebox(const char *fmt, ...)
{
dcontext_t *dcontext = get_thread_private_dcontext();
char msg[MAX_LOG_LENGTH];
wchar_t wmsg[MAX_LOG_LENGTH];
va_list ap;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
va_start(ap, fmt);
vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap);
NULL_TERMINATE_BUFFER(msg);
snwprintf(wmsg, BUFFER_SIZE_ELEMENTS(wmsg), L"%S", msg);
NULL_TERMINATE_BUFFER(wmsg);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
nt_messagebox(wmsg, debugbox_get_title());
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
va_end(ap);
}
static ssize_t
dr_write_to_console(bool to_stdout, const char *fmt, va_list ap)
{
bool res = true;
char msg[MAX_LOG_LENGTH];
uint written = 0;
int len;
HANDLE std;
CLIENT_ASSERT(dr_using_console(), "internal logic error");
ASSERT(priv_kernel32 != NULL &&
kernel32_WriteFile != NULL);
/* kernel32!GetStdHandle(STD_OUTPUT_HANDLE) == our PEB-based get_stdout_handle */
std = (to_stdout ? get_stdout_handle() : get_stderr_handle());
if (std == INVALID_HANDLE_VALUE)
return false;
len = vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap);
/* Let user know if message was truncated */
if (len < 0 || len == BUFFER_SIZE_ELEMENTS(msg))
res = false;
NULL_TERMINATE_BUFFER(msg);
/* Make this routine work in all kinds of windows by going through
* kernel32!WriteFile, which will call WriteConsole for us.
*/
res = res &&
kernel32_WriteFile(std, msg, (DWORD) strlen(msg), (LPDWORD) &written, NULL);
return (res ? written : 0);
}
static ssize_t
dr_write_to_console_varg(bool to_stdout, const char *fmt, ...)
{
va_list ap;
ssize_t res;
va_start(ap, fmt);
res = dr_write_to_console(to_stdout, fmt, ap);
va_end(ap);
return res;
}
DR_API
bool
dr_using_console(void)
{
bool res;
if (get_os_version() >= WINDOWS_VERSION_8) {
FILE_FS_DEVICE_INFORMATION device_info;
HANDLE herr = get_stderr_handle();
/* The handle is invalid iff it's a gui app and the parent is a console */
if (herr == INVALID_HANDLE_VALUE) {
module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll");
if (privload_attach_parent_console(app_kernel32->start) == false) {
dr_free_module_data(app_kernel32);
return false;
}
dr_free_module_data(app_kernel32);
herr = get_stderr_handle();
}
if (nt_query_volume_info(herr, &device_info, sizeof(device_info),
FileFsDeviceInformation) == STATUS_SUCCESS) {
if (device_info.DeviceType == FILE_DEVICE_CONSOLE)
return true;
}
return false;
}
/* We detect cmd window using what kernel32!WriteFile uses: a handle
* having certain bits set.
*/
res = (((ptr_int_t)get_stderr_handle() & 0x10000003) == 0x3);
CLIENT_ASSERT(!res || get_os_version() < WINDOWS_VERSION_8,
"Please report this: Windows 8 does have old-style consoles!");
return res;
}
DR_API
bool
dr_enable_console_printing(void)
{
bool success = false;
/* b/c private loader sets cxt sw code up front based on whether have windows
* priv libs or not, this can only be called during client init()
*/
if (dynamo_initialized) {
CLIENT_ASSERT(false, "dr_enable_console_printing() must be called during init");
return false;
}
/* Direct writes to std handles work on win8+ (xref i#911) but we don't need
* a separate check as the handle is detected as a non-console handle.
*/
if (!dr_using_console())
return true;
if (!INTERNAL_OPTION(private_loader))
return false;
if (!print_to_console) {
if (priv_kernel32 == NULL) {
/* Not using load_shared_library() b/c it won't search paths
* for us. XXX: should add os-shared interface for
* locate-and-load.
*/
priv_kernel32 = (shlib_handle_t)
locate_and_load_private_library("kernel32.dll", false/*!reachable*/);
}
if (priv_kernel32 != NULL && kernel32_WriteFile == NULL) {
module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll");
kernel32_WriteFile = (kernel32_WriteFile_t)
lookup_library_routine(priv_kernel32, "WriteFile");
/* There is some problem in loading 32 bit kernel32.dll
* when 64 bit kernel32.dll is already loaded. If kernel32 is
* not loaded we can't call privload_console_share because it
* assumes kernel32 is loaded
*/
if (app_kernel32 == NULL) {
success = false;
} else {
success = privload_console_share(priv_kernel32, app_kernel32->start);
dr_free_module_data(app_kernel32);
}
}
/* We go ahead and cache whether dr_using_console(). If app really
* changes its console, client could call this routine again
* as a workaround. Seems unlikely: better to have better perf.
*/
print_to_console = (priv_kernel32 != NULL &&
kernel32_WriteFile != NULL && success);
}
return print_to_console;
}
#endif /* WINDOWS */
DR_API void
dr_printf(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
#ifdef WINDOWS
if (print_to_console)
dr_write_to_console(true/*stdout*/, fmt, ap);
else
#endif
do_file_write(STDOUT, fmt, ap);
va_end(ap);
}
DR_API ssize_t
dr_fprintf(file_t f, const char *fmt, ...)
{
ssize_t written;
va_list ap;
va_start(ap, fmt);
#ifdef WINDOWS
if ((f == STDOUT || f == STDERR) && print_to_console) {
written = dr_write_to_console(f == STDOUT, fmt, ap);
if (written <= 0)
written = -1;
} else
#endif
written = do_file_write(f, fmt, ap);
va_end(ap);
return written;
}
DR_API int
dr_snprintf(char *buf, size_t max, const char *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
/* PR 219380: we use our_vsnprintf instead of ntdll._vsnprintf b/c the
* latter does not support floating point.
* Plus, our_vsnprintf returns -1 for > max chars (matching Windows
* behavior, but which Linux libc version does not do).
*/
res = our_vsnprintf(buf, max, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_vsnprintf(char *buf, size_t max, const char *fmt, va_list ap)
{
return our_vsnprintf(buf, max, fmt, ap);
}
DR_API int
dr_snwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
res = our_vsnprintf_wide(buf, max, fmt, ap);
va_end(ap);
return res;
}
DR_API int
dr_vsnwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, va_list ap)
{
return our_vsnprintf_wide(buf, max, fmt, ap);
}
DR_API int
dr_sscanf(const char *str, const char *fmt, ...)
{
int res;
va_list ap;
va_start(ap, fmt);
res = our_vsscanf(str, fmt, ap);
va_end(ap);
return res;
}
DR_API const char *
dr_get_token(const char *str, char *buf, size_t buflen)
{
/* We don't indicate whether any truncation happened. The
* reasoning is that this is meant to be used on a string of known
* size ahead of time, so the max size for any one token is known.
*/
const char *pos = str;
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(buflen), "buflen too large");
if (parse_word(str, &pos, buf, (uint)buflen) == NULL)
return NULL;
else
return pos;
}
DR_API void
dr_print_instr(void *drcontext, file_t f, instr_t *instr, const char *msg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_print_instr: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library,
"dr_print_instr: drcontext is invalid");
dr_fprintf(f, "%s "PFX" ", msg, instr_get_translation(instr));
instr_disassemble(dcontext, instr, f);
dr_fprintf(f, "\n");
}
DR_API void
dr_print_opnd(void *drcontext, file_t f, opnd_t opnd, const char *msg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_print_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library,
"dr_print_opnd: drcontext is invalid");
dr_fprintf(f, "%s ", msg);
opnd_disassemble(dcontext, opnd, f);
dr_fprintf(f, "\n");
}
/***************************************************************************
* Thread support
*/
DR_API
/* Returns the DR context of the current thread */
void *
dr_get_current_drcontext(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
return (void *) dcontext;
}
DR_API thread_id_t
dr_get_thread_id(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_thread_id: drcontext is invalid");
return dcontext->owning_thread;
}
#ifdef WINDOWS
/* Added for DrMem i#1254 */
DR_API HANDLE
dr_get_dr_thread_handle(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_thread_id: drcontext is invalid");
return dcontext->thread_record->handle;
}
#endif
DR_API void *
dr_get_tls_field(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_get_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_tls_field: drcontext is invalid");
return dcontext->client_data->user_field;
}
DR_API void
dr_set_tls_field(void *drcontext, void *value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_set_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_set_tls_field: drcontext is invalid");
dcontext->client_data->user_field = value;
}
DR_API void *
dr_get_dr_segment_base(IN reg_id_t seg)
{
#ifdef AARCHXX
if (seg == dr_reg_stolen)
return os_get_dr_tls_base(get_thread_private_dcontext());
else
return NULL;
#else
return get_segment_base(seg);
#endif
}
DR_API
bool
dr_raw_tls_calloc(OUT reg_id_t *tls_register,
OUT uint *offset,
IN uint num_slots,
IN uint alignment)
{
CLIENT_ASSERT(tls_register != NULL,
"dr_raw_tls_calloc: tls_register cannot be NULL");
CLIENT_ASSERT(offset != NULL,
"dr_raw_tls_calloc: offset cannot be NULL");
*tls_register = IF_X86_ELSE(SEG_TLS, dr_reg_stolen);
if (num_slots == 0)
return true;
return os_tls_calloc(offset, num_slots, alignment);
}
DR_API
bool
dr_raw_tls_cfree(uint offset, uint num_slots)
{
if (num_slots == 0)
return true;
return os_tls_cfree(offset, num_slots);
}
DR_API
opnd_t
dr_raw_tls_opnd(void *drcontext, reg_id_t tls_register, uint tls_offs)
{
CLIENT_ASSERT(drcontext != NULL, "dr_raw_tls_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_raw_tls_opnd: drcontext is invalid");
IF_X86_ELSE({
return opnd_create_far_base_disp_ex(tls_register, DR_REG_NULL, DR_REG_NULL,
0, tls_offs, OPSZ_PTR,
/* modern processors don't want addr16
* prefixes
*/
false, true, false);
}, {
return OPND_CREATE_MEMPTR(tls_register, tls_offs);
});
}
DR_API
void
dr_insert_read_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t tls_register, uint tls_offs, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_read_raw_tls: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
IF_X86_ELSE({
MINSERT(ilist, where, INSTR_CREATE_mov_ld
(dcontext, opnd_create_reg(reg),
dr_raw_tls_opnd(drcontext, tls_register, tls_offs)));
}, {
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
dr_raw_tls_opnd(drcontext, tls_register, tls_offs)));
});
}
DR_API
void
dr_insert_write_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t tls_register, uint tls_offs, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_write_raw_tls: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
IF_X86_ELSE({
MINSERT(ilist, where, INSTR_CREATE_mov_st
(dcontext,
dr_raw_tls_opnd(drcontext, tls_register, tls_offs),
opnd_create_reg(reg)));
}, {
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, dr_raw_tls_opnd(drcontext, tls_register, tls_offs),
opnd_create_reg(reg)));
});
}
DR_API
/* Current thread gives up its time quantum. */
void
dr_thread_yield(void)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
else
dcontext->client_data->at_safe_to_terminate_syscall = true;
os_thread_yield();
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
else
dcontext->client_data->at_safe_to_terminate_syscall = false;
}
DR_API
/* Current thread sleeps for time_ms milliseconds. */
void
dr_sleep(int time_ms)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = true;
else
dcontext->client_data->at_safe_to_terminate_syscall = true;
os_thread_sleep(time_ms);
if (IS_CLIENT_THREAD(dcontext))
dcontext->client_data->client_thread_safe_for_synch = false;
else
dcontext->client_data->at_safe_to_terminate_syscall = false;
}
#ifdef CLIENT_SIDELINE
DR_API
bool
dr_client_thread_set_suspendable(bool suspendable)
{
/* see notes in synch_with_all_threads() */
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!IS_CLIENT_THREAD(dcontext))
return false;
dcontext->client_data->suspendable = suspendable;
return true;
}
#endif
DR_API
bool
dr_suspend_all_other_threads_ex(OUT void ***drcontexts,
OUT uint *num_suspended,
OUT uint *num_unsuspended,
dr_suspend_flags_t flags)
{
uint out_suspended = 0, out_unsuspended = 0;
thread_record_t **threads;
int num_threads;
dcontext_t *my_dcontext = get_thread_private_dcontext();
int i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(OWN_NO_LOCKS(my_dcontext),
"dr_suspend_all_other_threads cannot be called while holding a lock");
CLIENT_ASSERT(drcontexts != NULL && num_suspended != NULL,
"dr_suspend_all_other_threads invalid params");
LOG(GLOBAL, LOG_FRAGMENT, 2,
"\ndr_suspend_all_other_threads: thread "TIDFMT" suspending all threads\n",
get_thread_id());
/* suspend all DR-controlled threads at safe locations */
if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER,
&threads, &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER,
/* if we fail to suspend a thread (e.g., for
* privilege reasons), ignore and continue
*/
THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) {
LOG(GLOBAL, LOG_FRAGMENT, 2,
"\ndr_suspend_all_other_threads: failed to suspend every thread\n");
/* some threads may have been successfully suspended so we must return
* their info so they'll be resumed. I believe there is thus no
* scenario under which we return false.
*/
}
/* now we own the thread_initexit_lock */
CLIENT_ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock),
"internal locking error");
/* To avoid two passes we allocate the array now. It may be larger than
* necessary if we had suspend failures but taht's ok.
* We hide the threads num and array in extra slots.
*/
*drcontexts = (void **)
global_heap_alloc((num_threads+2)*sizeof(dcontext_t*) HEAPACCT(ACCT_THREAD_MGT));
for (i = 0; i < num_threads; i++) {
dcontext_t *dcontext = threads[i]->dcontext;
if (dcontext != NULL) { /* include my_dcontext here */
if (dcontext != my_dcontext) {
/* must translate BEFORE freeing any memory! */
if (!thread_synch_successful(threads[i])) {
out_unsuspended++;
} else if (is_thread_currently_native(threads[i]) &&
!TEST(DR_SUSPEND_NATIVE, flags)) {
out_unsuspended++;
} else if (thread_synch_state_no_xfer(dcontext)) {
/* FIXME: for all other synchall callers, the app
* context should be sitting in their mcontext, even
* though we can't safely get their native context and
* translate it.
*/
(*drcontexts)[out_suspended] = (void *) dcontext;
out_suspended++;
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
dcontext->client_data->mcontext_in_dcontext = true;
} else {
(*drcontexts)[out_suspended] = (void *) dcontext;
out_suspended++;
/* It's not safe to clobber the thread's mcontext with
* its own translation b/c for shared_syscall we store
* the continuation pc in the esi slot.
* We could translate here into heap-allocated memory,
* but some clients may just want to stop
* the world but not examine the threads, so we lazily
* translate in dr_get_mcontext().
*/
CLIENT_ASSERT(!dcontext->client_data->suspended,
"inconsistent usage of dr_suspend_all_other_threads");
CLIENT_ASSERT(dcontext->client_data->cur_mc == NULL,
"inconsistent usage of dr_suspend_all_other_threads");
dcontext->client_data->suspended = true;
}
}
}
}
/* Hide the two extra vars we need the client to pass back to us */
(*drcontexts)[out_suspended] = (void *) threads;
(*drcontexts)[out_suspended+1] = (void *)(ptr_uint_t) num_threads;
*num_suspended = out_suspended;
if (num_unsuspended != NULL)
*num_unsuspended = out_unsuspended;
return true;
}
DR_API
bool
dr_suspend_all_other_threads(OUT void ***drcontexts,
OUT uint *num_suspended,
OUT uint *num_unsuspended)
{
return dr_suspend_all_other_threads_ex(drcontexts, num_suspended,
num_unsuspended, 0);
}
bool
dr_resume_all_other_threads(IN void **drcontexts,
IN uint num_suspended)
{
thread_record_t **threads;
int num_threads;
uint i;
CLIENT_ASSERT(drcontexts != NULL,
"dr_suspend_all_other_threads invalid params");
LOG(GLOBAL, LOG_FRAGMENT, 2,
"dr_resume_all_other_threads\n");
threads = (thread_record_t **) drcontexts[num_suspended];
num_threads = (int)(ptr_int_t) drcontexts[num_suspended+1];
for (i = 0; i < num_suspended; i++) {
dcontext_t *dcontext = (dcontext_t *) drcontexts[i];
if (dcontext->client_data->cur_mc != NULL) {
/* clear any cached mc from dr_get_mcontext_priv() */
heap_free(dcontext, dcontext->client_data->cur_mc,
sizeof(*dcontext->client_data->cur_mc) HEAPACCT(ACCT_CLIENT));
dcontext->client_data->cur_mc = NULL;
}
dcontext->client_data->suspended = false;
}
global_heap_free(drcontexts, (num_threads+2)*sizeof(dcontext_t*)
HEAPACCT(ACCT_THREAD_MGT));
end_synch_with_all_threads(threads, num_threads, true/*resume*/);
return true;
}
DR_API
bool
dr_is_thread_native(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
return is_thread_currently_native(dcontext->thread_record);
}
DR_API
bool
dr_retakeover_suspended_native_thread(void *drcontext)
{
bool res;
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid param");
/* XXX: I don't quite see why I need to pop these 2 when I'm doing
* what a regular retakeover would do
*/
KSTOP_NOT_MATCHING_DC(dcontext, fcache_default);
KSTOP_NOT_MATCHING_DC(dcontext, dispatch_num_exits);
res = os_thread_take_over_suspended_native(dcontext);
return res;
}
# ifdef UNIX
DR_API
bool
dr_set_itimer(int which, uint millisec,
void (*func)(void *drcontext, dr_mcontext_t *mcontext))
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (func == NULL)
return false;
return set_itimer_callback(dcontext, which, millisec, NULL,
(void (*)(dcontext_t *, dr_mcontext_t *))func);
}
uint
dr_get_itimer(int which)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
return get_itimer_frequency(dcontext, which);
}
# endif /* UNIX */
#endif /* CLIENT_INTERFACE */
DR_API
/* Inserts inst as a non-application instruction into ilist prior to "where" */
void
instrlist_meta_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta(inst);
instrlist_preinsert(ilist, where, inst);
}
DR_API
/* Inserts inst as a non-application instruction into ilist after "where" */
void
instrlist_meta_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta(inst);
instrlist_postinsert(ilist, where, inst);
}
DR_API
/* Inserts inst as a non-application instruction onto the end of ilist */
void
instrlist_meta_append(instrlist_t *ilist, instr_t *inst)
{
instr_set_meta(inst);
instrlist_append(ilist, inst);
}
DR_API
void
instrlist_meta_fault_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_preinsert(ilist, where, inst);
}
DR_API
void
instrlist_meta_fault_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_postinsert(ilist, where, inst);
}
DR_API
void
instrlist_meta_fault_append(instrlist_t *ilist, instr_t *inst)
{
instr_set_meta_may_fault(inst, true);
instrlist_append(ilist, inst);
}
static void
convert_va_list_to_opnd(dcontext_t *dcontext, opnd_t **args, uint num_args, va_list ap)
{
uint i;
ASSERT(num_args > 0);
/* allocate at least one argument opnd */
/* we don't check for GLOBAL_DCONTEXT since DR internally calls this */
*args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, num_args,
ACCT_CLEANCALL, UNPROTECTED);
for (i = 0; i < num_args; i++) {
(*args)[i] = va_arg(ap, opnd_t);
CLIENT_ASSERT(opnd_is_valid((*args)[i]),
"Call argument: bad operand. Did you create a valid opnd_t?");
}
}
static void
free_va_opnd_list(dcontext_t *dcontext, uint num_args, opnd_t *args)
{
if (num_args != 0) {
HEAP_ARRAY_FREE(dcontext, args, opnd_t, num_args,
ACCT_CLEANCALL, UNPROTECTED);
}
}
/* dr_insert_* are used by general DR */
/* Inserts a complete call to callee with the passed-in arguments */
void
dr_insert_call(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(dcontext, &args, num_args, ap);
va_end(ap);
}
insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS,
vmcode_get_start(), callee, num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
}
bool
dr_insert_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where,
byte *encode_pc, void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
bool direct;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
direct = insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS, encode_pc,
callee, num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
return direct;
}
/* Not exported. Currently used for ARM to avoid storing to %lr. */
void
dr_insert_call_noreturn(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, uint num_args, ...)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
opnd_t *args = NULL;
va_list ap;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_call_noreturn: drcontext cannot be NULL");
if (num_args != 0) {
va_start(ap, num_args);
convert_va_list_to_opnd(dcontext, &args, num_args, ap);
va_end(ap);
}
insert_meta_call_vargs(dcontext, ilist, where, 0, vmcode_get_start(), callee,
num_args, args);
if (num_args != 0)
free_va_opnd_list(dcontext, num_args, args);
}
/* Internal utility routine for inserting context save for a clean call.
* Returns the size of the data stored on the DR stack
* (in case the caller needs to align the stack pointer).
* XSP and XAX are modified by this call.
*/
static uint
prepare_for_call_ex(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where)
{
instr_t *in;
uint dstack_offs;
in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where);
dstack_offs = prepare_for_clean_call(dcontext, cci, ilist, where);
/* now go through and mark inserted instrs as meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != where) {
instr_set_meta(in);
in = instr_get_next(in);
}
return dstack_offs;
}
/* Internal utility routine for inserting context restore for a clean call. */
static void
cleanup_after_call_ex(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where, uint sizeof_param_area)
{
instr_t *in;
in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where);
if (sizeof_param_area > 0) {
/* clean up the parameter area */
CLIENT_ASSERT(sizeof_param_area <= 127,
"cleanup_after_call_ex: sizeof_param_area must be <= 127");
/* mark it meta down below */
instrlist_preinsert(ilist, where,
XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT8(sizeof_param_area)));
}
cleanup_after_clean_call(dcontext, cci, ilist, where);
/* now go through and mark inserted instrs as meta */
if (in == NULL)
in = instrlist_first(ilist);
else
in = instr_get_next(in);
while (in != where) {
instr_set_meta(in);
in = instr_get_next(in);
}
}
/* Inserts a complete call to callee with the passed-in arguments, wrapped
* by an app save and restore.
*
* If "save_flags" includes DR_CLEANCALL_SAVE_FLOAT, saves the fp/mmx/sse state.
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_prepare_for_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*
* NOTE : dr_insert_cbr_instrumentation has assumption about the clean call
* instrumentation layout, changes to the clean call instrumentation may break
* dr_insert_cbr_instrumentation.
*/
void
dr_insert_clean_call_ex_varg(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, dr_cleancall_save_t save_flags,
uint num_args, opnd_t *args)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
uint dstack_offs, pad = 0;
size_t buf_sz = 0;
clean_call_info_t cci; /* information for clean call insertion. */
bool save_fpstate = TEST(DR_CLEANCALL_SAVE_FLOAT, save_flags);
meta_call_flags_t call_flags = META_CALL_CLEAN | META_CALL_RETURNS;
byte *encode_pc;
CLIENT_ASSERT(drcontext != NULL, "dr_insert_clean_call: drcontext cannot be NULL");
STATS_INC(cleancall_inserted);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: insert clean call to "PFX"\n", callee);
/* analyze the clean call, return true if clean call can be inlined. */
if (analyze_clean_call(dcontext, &cci, where, callee,
save_fpstate, TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags),
num_args, args) &&
!TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags)) {
#ifdef CLIENT_INTERFACE
/* we can perform the inline optimization and return. */
STATS_INC(cleancall_inlined);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: inlined callee "PFX"\n", callee);
insert_inline_clean_call(dcontext, &cci, ilist, where, args);
return;
#else /* CLIENT_INTERFACE */
ASSERT_NOT_REACHED();
#endif /* CLIENT_INTERFACE */
}
/* honor requests from caller */
if (TEST(DR_CLEANCALL_NOSAVE_FLAGS, save_flags)) {
/* even if we remove flag saves we want to keep mcontext shape */
cci.preserve_mcontext = true;
cci.skip_save_flags = true;
/* we assume this implies DF should be 0 already */
cci.skip_clear_flags = true;
/* XXX: should also provide DR_CLEANCALL_NOSAVE_NONAFLAGS to
* preserve just arith flags on return from a call
*/
}
if (TESTANY(DR_CLEANCALL_NOSAVE_XMM |
DR_CLEANCALL_NOSAVE_XMM_NONPARAM |
DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) {
uint i;
/* even if we remove xmm saves we want to keep mcontext shape */
cci.preserve_mcontext = true;
/* start w/ all */
#if defined(X64) && defined(WINDOWS)
cci.num_simd_skip = 6;
#else
/* all 8 (or 16) are scratch */
cci.num_simd_skip = NUM_SIMD_REGS;
#endif
for (i=0; i<cci.num_simd_skip; i++)
cci.simd_skip[i] = true;
/* now remove those used for param/retval */
#ifdef X64
if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONPARAM, save_flags)) {
/* xmm0-3 (-7 for linux) are used for params */
# ifdef UNIX
for (i=0; i<7; i++)
# else
for (i=0; i<3; i++)
# endif
cci.simd_skip[i] = false;
cci.num_simd_skip -= i;
}
if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) {
/* xmm0 (and xmm1 for linux) are used for retvals */
cci.simd_skip[0] = false;
cci.num_simd_skip--;
# ifdef UNIX
cci.simd_skip[1] = false;
cci.num_simd_skip--;
# endif
}
#endif
}
dstack_offs = prepare_for_call_ex(dcontext, &cci, ilist, where);
#ifdef X64
/* PR 218790: we assume that dr_prepare_for_call() leaves stack 16-byte
* aligned, which is what insert_meta_call_vargs requires. */
if (cci.should_align) {
CLIENT_ASSERT(ALIGNED(dstack_offs, 16),
"internal error: bad stack alignment");
}
#endif
if (save_fpstate) {
/* save on the stack: xref PR 202669 on clients using more stack */
buf_sz = proc_fpstate_save_size();
/* we need 16-byte-alignment */
pad = ALIGN_FORWARD_UINT(dstack_offs, 16) - dstack_offs;
IF_X64(CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(buf_sz + pad),
"dr_insert_clean_call: internal truncation error"));
MINSERT(ilist, where, XINST_CREATE_sub(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32((int)(buf_sz + pad))));
dr_insert_save_fpstate(drcontext, ilist, where,
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0,
OPSZ_512));
}
/* PR 302951: restore state if clean call args reference app memory.
* We use a hack here: this is the only instance where we mark as our-mangling
* but do not have a translation target set, which indicates to the restore
* routines that this is a clean call. If the client adds instrs in the middle
* translation will fail; if the client modifies any instr, the our-mangling
* flag will disappear and translation will fail.
*/
instrlist_set_our_mangling(ilist, true);
if (TEST(DR_CLEANCALL_INDIRECT, save_flags))
encode_pc = vmcode_unreachable_pc();
else
encode_pc = vmcode_get_start();
if (TEST(DR_CLEANCALL_RETURNS_TO_NATIVE, save_flags))
call_flags |= META_CALL_RETURNS_TO_NATIVE;
insert_meta_call_vargs(dcontext, ilist, where, call_flags,
encode_pc, callee, num_args, args);
instrlist_set_our_mangling(ilist, false);
if (save_fpstate) {
dr_insert_restore_fpstate(drcontext, ilist, where,
opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0,
OPSZ_512));
MINSERT(ilist, where, XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP),
OPND_CREATE_INT32(buf_sz + pad)));
}
cleanup_after_call_ex(dcontext, &cci, ilist, where, 0);
}
void
dr_insert_clean_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, dr_cleancall_save_t save_flags, uint num_args, ...)
{
opnd_t *args = NULL;
if (num_args != 0) {
va_list ap;
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, save_flags,
num_args, args);
if (num_args != 0)
free_va_opnd_list(drcontext, num_args, args);
}
DR_API
void
dr_insert_clean_call(void *drcontext, instrlist_t *ilist, instr_t *where,
void *callee, bool save_fpstate, uint num_args, ...)
{
dr_cleancall_save_t flags = (save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0);
opnd_t *args = NULL;
if (num_args != 0) {
va_list ap;
va_start(ap, num_args);
convert_va_list_to_opnd(drcontext, &args, num_args, ap);
va_end(ap);
}
dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, flags, num_args, args);
if (num_args != 0)
free_va_opnd_list(drcontext, num_args, args);
}
/* Utility routine for inserting a clean call to an instrumentation routine
* Returns the size of the data stored on the DR stack (in case the caller
* needs to align the stack pointer). XSP and XAX are modified by this call.
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* prepare_for_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*/
DR_API uint
dr_prepare_for_call(void *drcontext, instrlist_t *ilist, instr_t *where)
{
CLIENT_ASSERT(drcontext != NULL, "dr_prepare_for_call: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_prepare_for_call: drcontext is invalid");
return prepare_for_call_ex((dcontext_t *)drcontext, NULL, ilist, where);
}
DR_API void
dr_cleanup_after_call(void *drcontext, instrlist_t *ilist, instr_t *where,
uint sizeof_param_area)
{
CLIENT_ASSERT(drcontext != NULL, "dr_cleanup_after_call: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_cleanup_after_call: drcontext is invalid");
cleanup_after_call_ex((dcontext_t *)drcontext, NULL, ilist, where,
sizeof_param_area);
}
#ifdef CLIENT_INTERFACE
DR_API void
dr_swap_to_clean_stack(void *drcontext, instrlist_t *ilist, instr_t *where)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_swap_to_clean_stack: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_swap_to_clean_stack: drcontext is invalid");
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
if (SCRATCH_ALWAYS_TLS()) {
MINSERT(ilist, where, instr_create_save_to_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, SCRATCH_REG0);
/* save app xsp, and then bring in dstack to xsp */
MINSERT(ilist, where, instr_create_save_to_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, XSP_OFFSET));
/* DSTACK_OFFSET isn't within the upcontext so if it's separate this won't
* work right. FIXME - the dcontext accessing routines are a mess of shared
* vs. no shared support, separate context vs. no separate context support etc. */
ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, SCRATCH_REG0, REG_XSP, DSTACK_OFFSET));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, SCRATCH_REG0, TLS_REG0_SLOT));
}
else {
MINSERT(ilist, where, instr_create_save_to_dcontext
(dcontext, REG_XSP, XSP_OFFSET));
MINSERT(ilist, where, instr_create_restore_dynamo_stack(dcontext));
}
}
DR_API void
dr_restore_app_stack(void *drcontext, instrlist_t *ilist, instr_t *where)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_restore_app_stack: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_app_stack: drcontext is invalid");
/* restore stack */
if (SCRATCH_ALWAYS_TLS()) {
/* use the register we're about to clobber as scratch space */
insert_get_mcontext_base(dcontext, ilist, where, REG_XSP);
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, REG_XSP, REG_XSP, XSP_OFFSET));
} else {
MINSERT(ilist, where, instr_create_restore_from_dcontext
(dcontext, REG_XSP, XSP_OFFSET));
}
}
#define SPILL_SLOT_TLS_MAX 2
#define NUM_TLS_SPILL_SLOTS (SPILL_SLOT_TLS_MAX + 1)
#define NUM_SPILL_SLOTS (SPILL_SLOT_MAX + 1)
/* The three tls slots we make available to clients. We reserve TLS_REG0_SLOT for our
* own use in dr convenience routines. Note the +1 is because the max is an array index
* (so zero based) while array size is number of slots. We don't need to +1 in
* SPILL_SLOT_MC_REG because subtracting SPILL_SLOT_TLS_MAX already accounts for it. */
static const ushort SPILL_SLOT_TLS_OFFS[NUM_TLS_SPILL_SLOTS] =
{ TLS_REG3_SLOT, TLS_REG2_SLOT, TLS_REG1_SLOT };
static const reg_id_t SPILL_SLOT_MC_REG[NUM_SPILL_SLOTS - NUM_TLS_SPILL_SLOTS] = {
#ifdef X86
/* The dcontext reg slots we make available to clients. We reserve XAX and XSP for
* our own use in dr convenience routines. */
# ifdef X64
REG_R15, REG_R14, REG_R13, REG_R12, REG_R11, REG_R10, REG_R9, REG_R8,
# endif
REG_XDI, REG_XSI, REG_XBP, REG_XDX, REG_XCX, REG_XBX
#elif defined(AARCHXX)
/* DR_REG_R0 is not used here. See prepare_for_clean_call. */
DR_REG_R6, DR_REG_R5, DR_REG_R4, DR_REG_R3, DR_REG_R2, DR_REG_R1
#endif /* X86/ARM */
};
DR_API void
dr_save_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_save_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_save_reg: invalid spill slot selection");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_save_reg requires pointer-sized gpr");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
MINSERT(ilist, where,
XINST_CREATE_store(dcontext, opnd_create_tls_slot(offs),
opnd_create_reg(reg)));
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
if (SCRATCH_ALWAYS_TLS()) {
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
reg_id_t tmp = (reg == SCRATCH_REG0) ? SCRATCH_REG1 : SCRATCH_REG0;
MINSERT(ilist, where, instr_create_save_to_tls
(dcontext, tmp, TLS_REG0_SLOT));
insert_get_mcontext_base(dcontext, ilist, where, tmp);
MINSERT(ilist, where, instr_create_save_to_dc_via_reg
(dcontext, tmp, reg, offs));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, tmp, TLS_REG0_SLOT));
} else {
MINSERT(ilist, where, instr_create_save_to_dcontext(dcontext, reg, offs));
}
}
}
/* if want to save 8 or 16-bit reg, must pass in containing ptr-sized reg! */
DR_API void
dr_restore_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg,
dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_restore_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_restore_reg: invalid spill slot selection");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_restore_reg requires a pointer-sized gpr");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
MINSERT(ilist, where,
XINST_CREATE_load(dcontext, opnd_create_reg(reg),
opnd_create_tls_slot(offs)));
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
if (SCRATCH_ALWAYS_TLS()) {
/* PR 219620: For thread-shared, we need to get the dcontext
* dynamically rather than use the constant passed in here.
*/
/* use the register we're about to clobber as scratch space */
insert_get_mcontext_base(dcontext, ilist, where, reg);
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, reg, reg, offs));
} else {
MINSERT(ilist, where,
instr_create_restore_from_dcontext(dcontext, reg, offs));
}
}
}
DR_API dr_spill_slot_t
dr_max_opnd_accessible_spill_slot()
{
if (SCRATCH_ALWAYS_TLS())
return SPILL_SLOT_TLS_MAX;
else
return SPILL_SLOT_MAX;
}
/* creates an opnd to access spill slot slot, slot must be <=
* dr_max_opnd_accessible_spill_slot() */
opnd_t
reg_spill_slot_opnd(dcontext_t *dcontext, dr_spill_slot_t slot)
{
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]);
return opnd_create_tls_slot(offs);
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
int offs = opnd_get_reg_dcontext_offs(reg_slot);
ASSERT(!SCRATCH_ALWAYS_TLS()); /* client assert above should catch */
return opnd_create_dcontext_field(dcontext, offs);
}
}
DR_API
opnd_t
dr_reg_spill_slot_opnd(void *drcontext, dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_reg_spill_slot_opnd: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_reg_spill_slot_opnd: drcontext is invalid");
CLIENT_ASSERT(slot <= dr_max_opnd_accessible_spill_slot(),
"dr_reg_spill_slot_opnd: slot must be less than "
"dr_max_opnd_accessible_spill_slot()");
return reg_spill_slot_opnd(dcontext, slot);
}
DR_API
/* used to read a saved register spill slot from a clean call or a restore_state_event */
reg_t
dr_read_saved_reg(void *drcontext, dr_spill_slot_t slot)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(drcontext != NULL, "dr_read_saved_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_read_saved_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_read_saved_reg: invalid spill slot selection");
/* FIXME - should we allow clients to read other threads saved registers? It's not
* as dangerous as write, but I can't think of a usage scenario where you'd want to
* Seems more likely to be a bug. */
CLIENT_ASSERT(dcontext == get_thread_private_dcontext(),
"dr_read_saved_reg(): drcontext does not belong to current thread");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = SPILL_SLOT_TLS_OFFS[slot];
return *(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs);
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
return reg_get_value_priv(reg_slot, get_mcontext(dcontext));
}
}
DR_API
/* used to write a saved register spill slot from a clean call */
void
dr_write_saved_reg(void *drcontext, dr_spill_slot_t slot, reg_t value)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(drcontext != NULL, "dr_write_saved_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_write_saved_reg: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_write_saved_reg: invalid spill slot selection");
/* FIXME - should we allow clients to write to other threads saved registers?
* I can't think of a usage scenario where that would be correct, seems much more
* likely to be a difficult to diagnose bug that crashes the app or dr. */
CLIENT_ASSERT(dcontext == get_thread_private_dcontext(),
"dr_write_saved_reg(): drcontext does not belong to current thread");
if (slot <= SPILL_SLOT_TLS_MAX) {
ushort offs = SPILL_SLOT_TLS_OFFS[slot];
*(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs) = value;
} else {
reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS];
reg_set_value_priv(reg_slot, get_mcontext(dcontext), value);
}
}
DR_API
/**
* Inserts into ilist prior to "where" instruction(s) to read into the
* general-purpose full-size register reg from the user-controlled drcontext
* field for this thread.
*/
void
dr_insert_read_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_read_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
if (SCRATCH_ALWAYS_TLS()) {
/* For thread-shared, since reg must be general-purpose we can
* use it as a base pointer (repeatedly). Plus it's already dead.
*/
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, reg, TLS_DCONTEXT_SLOT));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, reg, reg, CLIENT_DATA_OFFSET));
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_MEMPTR(reg, offsetof(client_data_t, user_field))));
} else {
MINSERT(ilist, where, XINST_CREATE_load
(dcontext, opnd_create_reg(reg),
OPND_CREATE_ABSMEM(&dcontext->client_data->user_field, OPSZ_PTR)));
}
}
DR_API
/**
* Inserts into ilist prior to "where" instruction(s) to write the
* general-purpose full-size register reg to the user-controlled drcontext field
* for this thread.
*/
void
dr_insert_write_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where,
reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_write_tls_field: drcontext cannot be NULL");
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"must use a pointer-sized general-purpose register");
if (SCRATCH_ALWAYS_TLS()) {
reg_id_t spill = SCRATCH_REG0;
if (reg == spill) /* don't need sub-reg test b/c we know it's pointer-sized */
spill = SCRATCH_REG1;
MINSERT(ilist, where, instr_create_save_to_tls(dcontext, spill, TLS_REG0_SLOT));
MINSERT(ilist, where, instr_create_restore_from_tls
(dcontext, spill, TLS_DCONTEXT_SLOT));
MINSERT(ilist, where, instr_create_restore_from_dc_via_reg
(dcontext, spill, spill, CLIENT_DATA_OFFSET));
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, OPND_CREATE_MEMPTR(spill,
offsetof(client_data_t, user_field)),
opnd_create_reg(reg)));
MINSERT(ilist, where,
instr_create_restore_from_tls(dcontext, spill, TLS_REG0_SLOT));
} else {
MINSERT(ilist, where, XINST_CREATE_store
(dcontext, OPND_CREATE_ABSMEM
(&dcontext->client_data->user_field, OPSZ_PTR),
opnd_create_reg(reg)));
}
}
DR_API void
dr_save_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
CLIENT_ASSERT(drcontext != NULL,
"dr_save_arith_flags: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_arith_flags: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_save_arith_flags: invalid spill slot selection");
dr_save_reg(drcontext, ilist, where, reg, slot);
dr_save_arith_flags_to_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_restore_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where,
dr_spill_slot_t slot)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
CLIENT_ASSERT(drcontext != NULL,
"dr_restore_arith_flags: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_arith_flags: drcontext is invalid");
CLIENT_ASSERT(slot <= SPILL_SLOT_MAX,
"dr_restore_arith_flags: invalid spill slot selection");
dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg);
dr_restore_reg(drcontext, ilist, where, reg, slot);
}
DR_API void
dr_save_arith_flags_to_xax(void *drcontext, instrlist_t *ilist, instr_t *where)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
dr_save_arith_flags_to_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_restore_arith_flags_from_xax(void *drcontext, instrlist_t *ilist,
instr_t *where)
{
reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0);
CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only");
dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg);
}
DR_API void
dr_save_arith_flags_to_reg(void *drcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_save_arith_flags_to_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_save_arith_flags_to_reg: drcontext is invalid");
#ifdef X86
CLIENT_ASSERT(reg == DR_REG_XAX,
"only xax should be used for save arith flags in X86");
/* flag saving code:
* lahf
* seto al
*/
MINSERT(ilist, where, INSTR_CREATE_lahf(dcontext));
MINSERT(ilist, where,
INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL)));
#elif defined(ARM)
/* flag saving code: mrs reg, cpsr */
MINSERT(ilist, where,
INSTR_CREATE_mrs(dcontext,
opnd_create_reg(reg),
opnd_create_reg(DR_REG_CPSR)));
#elif defined(AARCH64)
/* flag saving code: mrs reg, nzcv */
MINSERT(ilist, where,
INSTR_CREATE_mrs(dcontext,
opnd_create_reg(reg),
opnd_create_reg(DR_REG_NZCV)));
#endif /* X86/ARM/AARCH64 */
}
DR_API void
dr_restore_arith_flags_from_reg(void *drcontext, instrlist_t *ilist,
instr_t *where, reg_id_t reg)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_restore_arith_flags_from_reg: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_restore_arith_flags_from_reg: drcontext is invalid");
#ifdef X86
CLIENT_ASSERT(reg == DR_REG_XAX,
"only xax should be used for save arith flags in X86");
/* flag restoring code:
* add 0x7f,%al
* sahf
*/
/* do an add such that OF will be set only if seto set
* the MSB of saveto to 1
*/
MINSERT(ilist, where,
INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f)));
MINSERT(ilist, where, INSTR_CREATE_sahf(dcontext));
#elif defined(ARM)
/* flag restoring code: mrs reg, apsr_nzcvqg */
MINSERT(ilist, where,
INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_CPSR),
OPND_CREATE_INT_MSR_NZCVQG(),
opnd_create_reg(reg)));
#elif defined(AARCH64)
/* flag restoring code: mrs reg, nzcv */
MINSERT(ilist, where,
INSTR_CREATE_msr(dcontext,
opnd_create_reg(DR_REG_NZCV),
opnd_create_reg(reg)));
#endif /* X86/ARM/AARCH64 */
}
/* providing functionality of old -instr_calls and -instr_branches flags
*
* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*/
DR_API void
dr_insert_call_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
ptr_uint_t target, address;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_call_instrumentation: drcontext cannot be NULL");
address = (ptr_uint_t) instr_get_translation(instr);
/* dr_insert_ubr_instrumentation() uses this function */
CLIENT_ASSERT(instr_is_call(instr) || instr_is_ubr(instr),
"dr_insert_{ubr,call}_instrumentation must be applied to a ubr");
CLIENT_ASSERT(address != 0,
"dr_insert_{ubr,call}_instrumentation: can't determine app address");
if (opnd_is_pc(instr_get_target(instr))) {
if (opnd_is_far_pc(instr_get_target(instr))) {
/* FIXME: handle far pc */
CLIENT_ASSERT(false,
"dr_insert_{ubr,call}_instrumentation: far pc not supported");
}
/* In release build for far pc keep going assuming 0 base */
target = (ptr_uint_t) opnd_get_pc(instr_get_target(instr));
}
else if (opnd_is_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
target = (ptr_uint_t) instr_get_translation(tgt);
CLIENT_ASSERT(target != 0,
"dr_insert_{ubr,call}_instrumentation: unknown target");
if (opnd_is_far_instr(instr_get_target(instr))) {
/* FIXME: handle far instr */
CLIENT_ASSERT(false,
"dr_insert_{ubr,call}_instrumentation: far instr not supported");
}
} else {
CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: unknown target");
target = 0;
}
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 2,
/* address of call is 1st parameter */
OPND_CREATE_INTPTR(address),
/* call target is 2nd parameter */
OPND_CREATE_INTPTR(target));
}
/* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched. Since we need another
* tls spill slot in this routine we require the caller to give us one. */
DR_API void
dr_insert_mbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, dr_spill_slot_t scratch_slot)
{
#ifdef X86
dcontext_t *dcontext = (dcontext_t *) drcontext;
ptr_uint_t address = (ptr_uint_t) instr_get_translation(instr);
opnd_t tls_opnd;
instr_t *newinst;
reg_id_t reg_target;
/* PR 214051: dr_insert_mbr_instrumentation() broken with -indcall2direct */
CLIENT_ASSERT(!DYNAMO_OPTION(indcall2direct),
"dr_insert_mbr_instrumentation not supported with -opt_speed");
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_mbr_instrumentation: drcontext cannot be NULL");
CLIENT_ASSERT(address != 0,
"dr_insert_mbr_instrumentation: can't determine app address");
CLIENT_ASSERT(instr_is_mbr(instr),
"dr_insert_mbr_instrumentation must be applied to an mbr");
/* We need a TLS spill slot to use. We can use any tls slot that is opnd
* accessible. */
CLIENT_ASSERT(scratch_slot <= dr_max_opnd_accessible_spill_slot(),
"dr_insert_mbr_instrumentation: scratch_slot must be less than "
"dr_max_opnd_accessible_spill_slot()");
/* It is possible for mbr instruction to use XCX register, so we have
* to use an unsed register.
*/
for (reg_target = REG_XAX; reg_target <= REG_XBX; reg_target++) {
if (!instr_uses_reg(instr, reg_target))
break;
}
/* PR 240265: we disallow clients to add post-mbr instrumentation, so we
* avoid doing that here even though it's a little less efficient since
* our mbr mangling will re-grab the target.
* We could keep it post-mbr and mark it w/ a special flag so we allow
* our own but not clients' instrumentation post-mbr: but then we
* hit post-syscall issues for wow64 where post-mbr equals post-syscall
* (PR 240258: though we might solve that some other way).
*/
/* Note that since we're using a client exposed slot we know it will be
* preserved across the clean call. */
tls_opnd = dr_reg_spill_slot_opnd(drcontext, scratch_slot);
newinst = XINST_CREATE_store(dcontext, tls_opnd, opnd_create_reg(reg_target));
/* PR 214962: ensure we'll properly translate the de-ref of app
* memory by marking the spill and de-ref as INSTR_OUR_MANGLING.
*/
instr_set_our_mangling(newinst, true);
MINSERT(ilist, instr, newinst);
if (instr_is_return(instr)) {
/* the retaddr operand is always the final source for all OP_ret* instrs */
opnd_t retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1);
opnd_size_t sz = opnd_get_size(retaddr);
/* even for far ret and iret, retaddr is at TOS */
newinst = instr_create_1dst_1src(dcontext, sz == OPSZ_2 ? OP_movzx : OP_mov_ld,
opnd_create_reg(reg_target), retaddr);
} else {
/* call* or jmp* */
opnd_t src = instr_get_src(instr, 0);
opnd_size_t sz = opnd_get_size(src);
/* if a far cti, we can't fit it into a register: asserted above.
* in release build we'll get just the address here.
*/
if (instr_is_far_cti(instr)) {
if (sz == OPSZ_10) {
sz = OPSZ_8;
} else if (sz == OPSZ_6) {
sz = OPSZ_4;
# ifdef X64
reg_target = reg_64_to_32(reg_target);
# endif
} else /* target has OPSZ_4 */ {
sz = OPSZ_2;
}
opnd_set_size(&src, sz);
}
# ifdef UNIX
/* xref i#1834 the problem with fs and gs segment is a general problem
* on linux, this fix is specific for mbr_instrumentation, but a general
* solution is needed.
*/
if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(src)) {
src = mangle_seg_ref_opnd(dcontext, ilist, instr, src, reg_target);
}
# endif
newinst = instr_create_1dst_1src(dcontext,
sz == OPSZ_2 ? OP_movzx : OP_mov_ld,
opnd_create_reg(reg_target), src);
}
instr_set_our_mangling(newinst, true);
MINSERT(ilist, instr, newinst);
/* Now we want the true app state saved, for dr_get_mcontext().
* We specially recognize our OP_xchg as a restore in
* instr_is_reg_spill_or_restore().
*/
MINSERT(ilist, instr,
INSTR_CREATE_xchg(dcontext, tls_opnd, opnd_create_reg(reg_target)));
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 2,
/* address of mbr is 1st param */
OPND_CREATE_INTPTR(address),
/* indirect target (in tls, xchg-d from ecx) is 2nd param */
tls_opnd);
#elif defined (ARM)
/* i#1551: NYI on ARM.
* Also, we may want to split these out into arch/{x86,arm}/ files
*/
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
/* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via
* dr_insert_clean_call(). We guarantee to clients that all other slots
* (except the XAX mcontext slot) will remain untouched.
*
* NOTE : this routine has assumption about the layout of the clean call,
* so any change to clean call instrumentation layout may break this routine.
*/
static void
dr_insert_cbr_instrumentation_help(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, bool has_fallthrough, opnd_t user_data)
{
#ifdef X86
dcontext_t *dcontext = (dcontext_t *) drcontext;
ptr_uint_t address, target;
int opc;
instr_t *app_flags_ok;
bool out_of_line_switch = false;;
CLIENT_ASSERT(drcontext != NULL,
"dr_insert_cbr_instrumentation: drcontext cannot be NULL");
address = (ptr_uint_t) instr_get_translation(instr);
CLIENT_ASSERT(address != 0,
"dr_insert_cbr_instrumentation: can't determine app address");
CLIENT_ASSERT(instr_is_cbr(instr),
"dr_insert_cbr_instrumentation must be applied to a cbr");
CLIENT_ASSERT(opnd_is_near_pc(instr_get_target(instr)) ||
opnd_is_near_instr(instr_get_target(instr)),
"dr_insert_cbr_instrumentation: target opnd must be a near pc or "
"near instr");
if (opnd_is_near_pc(instr_get_target(instr)))
target = (ptr_uint_t) opnd_get_pc(instr_get_target(instr));
else if (opnd_is_near_instr(instr_get_target(instr))) {
instr_t *tgt = opnd_get_instr(instr_get_target(instr));
target = (ptr_uint_t) instr_get_translation(tgt);
CLIENT_ASSERT(target != 0, "dr_insert_cbr_instrumentation: unknown target");
} else {
CLIENT_ASSERT(false, "dr_insert_cbr_instrumentation: unknown target");
target = 0;
}
app_flags_ok = instr_get_prev(instr);
if (has_fallthrough) {
ptr_uint_t fallthrough = address + instr_length(drcontext, instr);
CLIENT_ASSERT(!opnd_uses_reg(user_data, DR_REG_XBX),
"register ebx should not be used");
CLIENT_ASSERT(fallthrough > address, "wrong fallthrough address");
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 5,
/* push address of mbr onto stack as 1st parameter */
OPND_CREATE_INTPTR(address),
/* target is 2nd parameter */
OPND_CREATE_INTPTR(target),
/* fall-throug is 3rd parameter */
OPND_CREATE_INTPTR(fallthrough),
/* branch direction (put in ebx below) is 4th parameter */
opnd_create_reg(REG_XBX),
/* user defined data is 5th parameter */
opnd_is_null(user_data) ? OPND_CREATE_INT32(0) : user_data);
} else {
dr_insert_clean_call(drcontext, ilist, instr, callee, false/*no fpstate*/, 3,
/* push address of mbr onto stack as 1st parameter */
OPND_CREATE_INTPTR(address),
/* target is 2nd parameter */
OPND_CREATE_INTPTR(target),
/* branch direction (put in ebx below) is 3rd parameter */
opnd_create_reg(REG_XBX));
}
/* calculate whether branch taken or not
* since the clean call mechanism clobbers eflags, we
* must insert our checks prior to that clobbering.
* since we do it AFTER the pusha, we don't have to save; but, we
* can't use a param that's part of any calling convention b/c w/
* PR 250976 our clean call will get it from the pusha.
* ebx is a good choice.
*/
/* We expect:
mov 0x400e5e34 -> %esp
pusha %esp %eax %ebx %ecx %edx %ebp %esi %edi -> %esp (%esp)
pushf %esp -> %esp (%esp)
push $0x00000000 %esp -> %esp (%esp)
popf %esp (%esp) -> %esp
mov 0x400e5e40 -> %eax
push %eax %esp -> %esp (%esp)
* We also assume all clean call instrs are expanded.
*/
/* Because the clean call might be optimized, we cannot assume the sequence.
* We assume that the clean call will not be inlined for having more than one
* arguments, so we scan to find either a call instr or a popf.
* if a popf, do as before.
* if a call, move back to right before push xbx or mov rbx => r3.
*/
if (app_flags_ok == NULL)
app_flags_ok = instrlist_first(ilist);
/* r2065 added out-of-line clean call context switch, so we need to check
* how the context switch code is inserted.
*/
while (!instr_opcode_valid(app_flags_ok) ||
instr_get_opcode(app_flags_ok) != OP_call) {
app_flags_ok = instr_get_next(app_flags_ok);
CLIENT_ASSERT(app_flags_ok != NULL,
"dr_insert_cbr_instrumentation: cannot find call instr");
if (instr_get_opcode(app_flags_ok) == OP_popf)
break;
}
if (instr_get_opcode(app_flags_ok) == OP_call) {
if (opnd_get_pc(instr_get_target(app_flags_ok)) == (app_pc)callee) {
/* call to clean callee
* move a few instrs back till right before push xbx, or mov rbx => r3
*/
while (app_flags_ok != NULL) {
if (instr_reg_in_src(app_flags_ok, DR_REG_XBX))
break;
app_flags_ok = instr_get_prev(app_flags_ok);
}
} else {
/* call to clean call context save */
ASSERT(opnd_get_pc(instr_get_target(app_flags_ok)) ==
get_clean_call_save(dcontext _IF_X64(GENCODE_X64)));
out_of_line_switch = true;
}
ASSERT(app_flags_ok != NULL);
}
/* i#1155: for out-of-line context switch
* we insert two parts of code to setup "taken" arg for clean call:
* - compute "taken" and put it onto the stack right before call to context
* save, where DR already swapped stack and adjusted xsp to point beyond
* mcontext plus temp stack size.
* It is 2 slots away b/c 1st is retaddr.
* - move the "taken" from stack to ebx to compatible with existing code
* right after context save returns and before arg setup, where xsp
* points beyond mcontext (xref emit_clean_call_save).
* It is 2 slots + temp stack size away.
* XXX: we could optimize the code by computing "taken" after clean call
* save if the eflags are not cleared.
*/
/* put our code before the popf or use of xbx */
opc = instr_get_opcode(instr);
if (opc == OP_jecxz || opc == OP_loop || opc == OP_loope || opc == OP_loopne) {
/* for 8-bit cbrs w/ multiple conditions and state, simpler to
* simply execute them -- they're rare so shouldn't be a perf hit.
* after all, ecx is saved, can clobber it.
* we do:
* loop/jecxz taken
* not_taken: mov 0, ebx
* jmp done
* taken: mov 1, ebx
* done:
*/
opnd_t opnd_taken = out_of_line_switch ?
/* 2 slots away from xsp, xref comment above for i#1155 */
OPND_CREATE_MEM32(REG_XSP, -2*(int)XSP_SZ /* ret+taken */) :
opnd_create_reg(REG_EBX);
instr_t *branch = instr_clone(dcontext, instr);
instr_t *not_taken =
INSTR_CREATE_mov_imm(dcontext, opnd_taken,
OPND_CREATE_INT32(0));
instr_t *taken =
INSTR_CREATE_mov_imm(dcontext, opnd_taken,
OPND_CREATE_INT32(1));
instr_t *done = INSTR_CREATE_label(dcontext);
instr_set_target(branch, opnd_create_instr(taken));
/* client-added meta instrs should not have translation set */
instr_set_translation(branch, NULL);
MINSERT(ilist, app_flags_ok, branch);
MINSERT(ilist, app_flags_ok, not_taken);
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_jmp_short(dcontext, opnd_create_instr(done)));
MINSERT(ilist, app_flags_ok, taken);
MINSERT(ilist, app_flags_ok, done);
if (out_of_line_switch) {
if (opc == OP_loop || opc == OP_loope || opc == OP_loopne) {
/* We executed OP_loop* before we saved xcx, so we must restore
* it. We should be able to use OP_lea b/c OP_loop* uses
* addr prefix to shrink pointer-sized xcx, not data prefix.
*/
reg_id_t xcx = opnd_get_reg(instr_get_dst(instr, 0));
MINSERT(ilist, app_flags_ok, INSTR_CREATE_lea
(dcontext, opnd_create_reg(xcx),
opnd_create_base_disp(xcx, DR_REG_NULL, 0, 1, OPSZ_lea)));
}
ASSERT(instr_get_opcode(app_flags_ok) == OP_call);
/* 2 slots + temp_stack_size away from xsp,
* xref comment above for i#1155
*/
opnd_taken = OPND_CREATE_MEM32
(REG_XSP, -2*(int)XSP_SZ-get_clean_call_temp_stack_size());
MINSERT(ilist, instr_get_next(app_flags_ok),
XINST_CREATE_load(dcontext,
opnd_create_reg(REG_EBX),
opnd_taken));
}
} else {
/* build a setcc equivalent of instr's jcc operation
* WARNING: this relies on order of OP_ enum!
*/
opnd_t opnd_taken = out_of_line_switch ?
/* 2 slots away from xsp, xref comment above for i#1155 */
OPND_CREATE_MEM8(REG_XSP, -2*(int)XSP_SZ /* ret+taken */) :
opnd_create_reg(REG_BL);
opc = instr_get_opcode(instr);
if (opc <= OP_jnle_short)
opc += (OP_jo - OP_jo_short);
CLIENT_ASSERT(opc >= OP_jo && opc <= OP_jnle,
"dr_insert_cbr_instrumentation: unknown opcode");
opc = opc - OP_jo + OP_seto;
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_setcc(dcontext, opc, opnd_taken));
if (out_of_line_switch) {
app_flags_ok = instr_get_next(app_flags_ok);
/* 2 slots + temp_stack_size away from xsp,
* xref comment above for i#1155
*/
opnd_taken = OPND_CREATE_MEM8
(REG_XSP, -2*(int)XSP_SZ-get_clean_call_temp_stack_size());
}
/* movzx ebx <- bl */
MINSERT(ilist, app_flags_ok,
INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX),
opnd_taken));
}
/* now branch dir is in ebx and will be passed to clean call */
#elif defined (ARM)
/* i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
}
DR_API void
dr_insert_cbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee,
false /* no fallthrough */, opnd_create_null());
}
DR_API void
dr_insert_cbr_instrumentation_ex(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee, opnd_t user_data)
{
dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee,
true /* has fallthrough */, user_data);
}
DR_API void
dr_insert_ubr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr,
void *callee)
{
/* same as call */
dr_insert_call_instrumentation(drcontext, ilist, instr, callee);
}
/* This may seem like a pretty targeted API function, but there's no
* clean way for a client to do this on its own due to DR's
* restrictions on bb instrumentation (i#782).
*/
DR_API
bool
dr_clobber_retaddr_after_read(void *drcontext, instrlist_t *ilist, instr_t *instr,
ptr_uint_t value)
{
/* the client could be using note fields so we use a label and xfer to
* a note field during the mangling pass
*/
if (instr_is_return(instr)) {
instr_t *label = INSTR_CREATE_label(drcontext);
dr_instr_label_data_t *data = instr_get_label_data_area(label);
/* we could coordinate w/ drmgr and use some reserved note label value
* but only if we run out of instr flags. so we set to 0 to not
* overlap w/ any client uses (DRMGR_NOTE_NONE == 0).
*/
label->note = 0;
/* these values are read back in mangle() */
data->data[0] = (ptr_uint_t) instr;
data->data[1] = value;
label->flags |= INSTR_CLOBBER_RETADDR;
instr->flags |= INSTR_CLOBBER_RETADDR;
instrlist_meta_preinsert(ilist, instr, label);
return true;
}
return false;
}
DR_API bool
dr_mcontext_xmm_fields_valid(void)
{
return preserve_xmm_caller_saved();
}
#endif /* CLIENT_INTERFACE */
/* dr_get_mcontext() needed for translating clean call arg errors */
/* Fills in whichever of dmc or mc is non-NULL */
bool
dr_get_mcontext_priv(dcontext_t *dcontext, dr_mcontext_t *dmc, priv_mcontext_t *mc)
{
priv_mcontext_t *state;
CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)),
"DR context protection NYI");
if (mc == NULL) {
CLIENT_ASSERT(dmc != NULL, "invalid context");
/* catch uses that forget to set size: perhaps in a few releases,
* when most old clients have been converted, remove this (we'll
* still return false)
*/
CLIENT_ASSERT(dmc->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(dmc->flags != 0 && (dmc->flags & ~(DR_MC_ALL)) == 0,
"dr_mcontext_t.flags field not set properly");
} else
CLIENT_ASSERT(dmc == NULL, "invalid internal params");
#ifdef CLIENT_INTERFACE
/* i#117/PR 395156: support getting mcontext from events where mcontext is
* stable. It would be nice to support it from init and 1st thread init,
* but the mcontext is not available at those points.
*
* Since DR calls this routine when recreating state and wants the
* clean call version, can't distinguish by whereami=WHERE_FCACHE,
* so we set a flag in the supported events. If client routine
* crashes and we recreate then we want clean call version anyway
* so should be ok. Note that we want in_pre_syscall for other
* reasons (dr_syscall_set_param() for Windows) so we keep it a
* separate flag.
*/
/* no support for init or initial thread init */
if (!dynamo_initialized)
return false;
if (dcontext->client_data->cur_mc != NULL) {
if (mc != NULL)
*mc = *dcontext->client_data->cur_mc;
else if (!priv_mcontext_to_dr_mcontext(dmc, dcontext->client_data->cur_mc))
return false;
return true;
}
if (dcontext->client_data->suspended) {
/* A thread suspended by dr_suspend_all_other_threads() has its
* context translated lazily here.
* We cache the result in cur_mc to avoid a translation cost next time.
*/
bool res;
priv_mcontext_t *mc_xl8;
if (mc != NULL)
mc_xl8 = mc;
else {
dcontext->client_data->cur_mc = (priv_mcontext_t *)
heap_alloc(dcontext, sizeof(*dcontext->client_data->cur_mc)
HEAPACCT(ACCT_CLIENT));
/* We'll clear this cache in dr_resume_all_other_threads() */
mc_xl8 = dcontext->client_data->cur_mc;
}
res = thread_get_mcontext(dcontext->thread_record, mc_xl8);
CLIENT_ASSERT(res, "failed to get mcontext of suspended thread");
res = translate_mcontext(dcontext->thread_record, mc_xl8,
false/*do not restore memory*/, NULL);
CLIENT_ASSERT(res, "failed to xl8 mcontext of suspended thread");
if (mc == NULL && !priv_mcontext_to_dr_mcontext(dmc, mc_xl8))
return false;
return true;
}
/* PR 207947: support mcontext access from syscall events */
if (dcontext->client_data->mcontext_in_dcontext ||
dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall) {
if (mc != NULL)
*mc = *get_mcontext(dcontext);
else if (!priv_mcontext_to_dr_mcontext(dmc, get_mcontext(dcontext)))
return false;
return true;
}
#endif
/* dr_prepare_for_call() puts the machine context on the dstack
* with pusha and pushf, but only fills in xmm values for
* preserve_xmm_caller_saved(): however, we tell the client that the xmm
* fields are not valid otherwise. so, we just have to copy the
* state from the dstack.
*/
state = get_priv_mcontext_from_dstack(dcontext);
if (mc != NULL)
*mc = *state;
else if (!priv_mcontext_to_dr_mcontext(dmc, state))
return false;
/* esp is a dstack value -- get the app stack's esp from the dcontext */
if (mc != NULL)
mc->xsp = get_mcontext(dcontext)->xsp;
else if (TEST(DR_MC_CONTROL, dmc->flags))
dmc->xsp = get_mcontext(dcontext)->xsp;
#ifdef ARM
if (TEST(DR_MC_INTEGER, dmc->flags)) {
/* get the stolen register's app value */
if (mc != NULL)
set_stolen_reg_val(mc, (reg_t) get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT)));
else {
set_stolen_reg_val(dr_mcontext_as_priv_mcontext(dmc),
(reg_t) get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT)));
}
}
#endif
/* XXX: should we set the pc field?
* If we do we'll have to adopt a different solution for i#1685 in our Windows
* hooks where today we use the pc slot for temp storage.
*/
return true;
}
DR_API bool
dr_get_mcontext(void *drcontext, dr_mcontext_t *dmc)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
return dr_get_mcontext_priv(dcontext, dmc, NULL);
}
#ifdef CLIENT_INTERFACE
DR_API bool
dr_set_mcontext(void *drcontext, dr_mcontext_t *context)
{
priv_mcontext_t *state;
dcontext_t *dcontext = (dcontext_t *)drcontext;
IF_ARM(reg_t reg_val = 0 /* silence the compiler warning */;)
CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)),
"DR context protection NYI");
CLIENT_ASSERT(context != NULL, "invalid context");
CLIENT_ASSERT(context->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(context->flags != 0 && (context->flags & ~(DR_MC_ALL)) == 0,
"dr_mcontext_t.flags field not set properly");
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
/* PR 207947: support mcontext access from syscall events */
if (dcontext->client_data->mcontext_in_dcontext ||
dcontext->client_data->in_pre_syscall ||
dcontext->client_data->in_post_syscall) {
if (!dr_mcontext_to_priv_mcontext(get_mcontext(dcontext), context))
return false;
return true;
}
/* copy the machine context to the dstack area created with
* dr_prepare_for_call(). note that xmm0-5 copied there
* will override any save_fpstate xmm values, as desired.
*/
state = get_priv_mcontext_from_dstack(dcontext);
#ifdef ARM
if (TEST(DR_MC_INTEGER, context->flags)) {
/* Set the stolen register's app value in TLS, not on stack (we rely
* on our stolen reg retaining its value on the stack)
*/
priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(context);
set_tls(os_tls_offset(TLS_REG_STOLEN_SLOT), (void *) get_stolen_reg_val(mc));
/* save the reg val on the stack to be clobbered by the the copy below */
reg_val = get_stolen_reg_val(state);
}
#endif
if (!dr_mcontext_to_priv_mcontext(state, context))
return false;
#ifdef ARM
if (TEST(DR_MC_INTEGER, context->flags)) {
/* restore the reg val on the stack clobbered by the copy above */
set_stolen_reg_val(state, reg_val);
}
#endif
if (TEST(DR_MC_CONTROL, context->flags)) {
/* esp will be restored from a field in the dcontext */
get_mcontext(dcontext)->xsp = context->xsp;
}
/* XXX: should we support setting the pc field? */
return true;
}
DR_API
bool
dr_redirect_execution(dr_mcontext_t *mcontext)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
CLIENT_ASSERT(mcontext->size == sizeof(dr_mcontext_t),
"dr_mcontext_t.size field not set properly");
CLIENT_ASSERT(mcontext->flags == DR_MC_ALL,
"dr_mcontext_t.flags must be DR_MC_ALL");
/* PR 352429: squash current trace.
* FIXME: will clients use this so much that this will be a perf issue?
* samples/cbr doesn't hit this even at -trace_threshold 1
*/
if (is_building_trace(dcontext)) {
LOG(THREAD, LOG_INTERP, 1, "squashing trace-in-progress\n");
trace_abort(dcontext);
}
dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc);
dcontext->whereami = WHERE_FCACHE;
set_last_exit(dcontext, (linkstub_t *)get_client_linkstub());
transfer_to_dispatch(dcontext, dr_mcontext_as_priv_mcontext(mcontext),
true/*full_DR_state*/);
/* on success we won't get here */
return false;
}
DR_API
byte *
dr_redirect_native_target(void *drcontext)
{
#ifdef PROGRAM_SHEPHERDING
/* This feature is unavail for prog shep b/c of the cross-ib-type pollution,
* as well as the lack of source tag info when exiting the ibl (i#1150).
*/
return NULL;
#else
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_redirect_native_target(): drcontext cannot be NULL");
/* The client has no way to know the mode of our gencode so we set LSB here */
return PC_AS_JMP_TGT(DEFAULT_ISA_MODE, get_client_ibl_xfer_entry(dcontext));
#endif
}
/***************************************************************************
* ADAPTIVE OPTIMIZATION SUPPORT
* *Note for non owning thread support (i.e. sideline) all methods assume
* the dcontext valid, the client will have to insure this with a lock
* on thread_exit!!
*
* *need way for side thread to get a dcontext to use for logging and mem
* alloc, before do that should think more about mem alloc in/for adaptive
* routines
*
* *made local mem alloc by side thread safe (see heap.c)
*
* *loging not safe if not owning thread?
*/
DR_API
/* Schedules the fragment to be deleted. Once this call is completed,
* an existing executing fragment is allowed to complete, but control
* will not enter the fragment again before it is deleted.
*
* NOTE: this comment used to say, "after deletion, control may still
* reach the fragment by indirect branch.". We believe this is now only
* true for shared fragments, which are not currently supported.
*/
bool
dr_delete_fragment(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool deletable = false, waslinking;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(),
"dr_delete_fragment() only valid with -thread_private");
CLIENT_ASSERT(drcontext != NULL, "dr_delete_fragment(): drcontext cannot be NULL");
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_delete_fragment not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return false;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
#ifdef CLIENT_SIDELINE
mutex_lock(&(dcontext->client_data->sideline_mutex));
fragment_get_fragment_delete_mutex(dcontext);
#else
CLIENT_ASSERT(drcontext == get_thread_private_dcontext(),
"dr_delete_fragment(): drcontext does not belong to current thread");
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL && (f->flags & FRAG_CANNOT_DELETE) == 0) {
client_todo_list_t * todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t,
ACCT_CLIENT, UNPROTECTED);
client_todo_list_t * iter = dcontext->client_data->to_do;
todo->next = NULL;
todo->ilist = NULL;
todo->tag = tag;
if (iter == NULL)
dcontext->client_data->to_do = todo;
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = todo;
}
deletable = true;
/* unlink fragment so will return to dynamo and delete.
* Do not remove the fragment from the hashtable --
* we need to be able to look up the fragment when
* inspecting the to_do list in dispatch.
*/
if ((f->flags & FRAG_LINKED_INCOMING) != 0)
unlink_fragment_incoming(dcontext, f);
fragment_remove_from_ibt_tables(dcontext, f, false);
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
mutex_unlock(&(dcontext->client_data->sideline_mutex));
#endif
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return deletable;
}
DR_API
/* Schedules the fragment at 'tag' for replacement. Once this call is
* completed, an existing executing fragment is allowed to complete,
* but control will not enter the fragment again before it is replaced.
*
* NOTE: this comment used to say, "after replacement, control may still
* reach the fragment by indirect branch.". We believe this is now only
* true for shared fragments, which are not currently supported.
*
* Takes control of the ilist and all responsibility for deleting it and the
* instrs inside of it. The client should not keep, use, reference, etc. the
* instrlist or any of the instrs it contains after they are passed in.
*/
bool
dr_replace_fragment(void *drcontext, void *tag, instrlist_t *ilist)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
bool frag_found, waslinking;
fragment_t * f;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(),
"dr_replace_fragment() only valid with -thread_private");
CLIENT_ASSERT(drcontext != NULL, "dr_replace_fragment(): drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_replace_fragment: drcontext is invalid");
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_replace_fragment not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return false;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
#ifdef CLIENT_SIDELINE
mutex_lock(&(dcontext->client_data->sideline_mutex));
fragment_get_fragment_delete_mutex(dcontext);
#else
CLIENT_ASSERT(drcontext == get_thread_private_dcontext(),
"dr_replace_fragment(): drcontext does not belong to current thread");
#endif
f = fragment_lookup(dcontext, tag);
frag_found = (f != NULL);
if (frag_found) {
client_todo_list_t * iter = dcontext->client_data->to_do;
client_todo_list_t * todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t,
ACCT_CLIENT, UNPROTECTED);
todo->next = NULL;
todo->ilist = ilist;
todo->tag = tag;
if (iter == NULL)
dcontext->client_data->to_do = todo;
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = todo;
}
/* unlink fragment so will return to dynamo and replace for next time
* its executed
*/
if ((f->flags & FRAG_LINKED_INCOMING) != 0)
unlink_fragment_incoming(dcontext, f);
fragment_remove_from_ibt_tables(dcontext, f, false);
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
mutex_unlock(&(dcontext->client_data->sideline_mutex));
#endif
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return frag_found;
}
#ifdef UNSUPPORTED_API
/* FIXME - doesn't work with shared fragments. Consider removing since dr_flush_region
* and dr_delay_flush_region give us most of this functionality. */
DR_API
/* Flushes all fragments containing 'flush_tag', or the entire code
* cache if flush_tag is NULL. 'curr_tag' must specify the tag of the
* currently-executing fragment. If curr_tag is NULL, flushing can be
* delayed indefinitely. Note that flushing is performed across all
* threads, but other threads may continue to execute fragments
* containing 'curr_tag' until those fragments finish.
*/
void dr_flush_fragments(void *drcontext, void *curr_tag, void *flush_tag)
{
client_flush_req_t *iter, *flush;
dcontext_t *dcontext = (dcontext_t *)drcontext;
/* We want to unlink the currently executing fragment so we'll
* force a context switch to DR. That way, we'll perform the
* flush as soon as possible. Unfortunately, the client may not
* know the tag of the current trace. Therefore, we unlink all
* fragments in the region.
*
* Note that we aren't unlinking or ibl-invalidating (i.e., making
* unreachable) any fragments in other threads containing curr_tag
* until the delayed flush happens in enter_nolinking().
*/
if (curr_tag != NULL)
vm_area_unlink_incoming(dcontext, (app_pc)curr_tag);
flush = HEAP_TYPE_ALLOC(dcontext, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush->flush_callback = NULL;
if (flush_tag == NULL) {
flush->start = UNIVERSAL_REGION_BASE;
flush->size = UNIVERSAL_REGION_SIZE;
} else {
flush->start = (app_pc)flush_tag;
flush->size = 1;
}
flush->next = NULL;
iter = dcontext->client_data->flush_list;
if (iter == NULL) {
dcontext->client_data->flush_list = flush;
}
else {
while (iter->next != NULL)
iter = iter->next;
iter->next = flush;
}
}
#endif /* UNSUPPORTED_API */
DR_API
/* Flush all fragments that contain code from the region [start, start+size).
* Uses a synchall flush to guarantee that no execution occurs out of the fragments
* flushed once this returns. Requires caller to be holding no locks (dr or client) and
* to be !couldbelinking (xref PR 199115, 227619). Caller must use
* dr_redirect_execution() to return to the cache. */
bool
dr_flush_region(app_pc start, size_t size)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n", __FUNCTION__, start, start+size);
/* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are
* !couldbelinking (see PR 227619) restricting where this routine can be used. */
CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event "
"callback that doesn't support calling this routine; see header file "
"for restrictions.");
/* Flush requires caller to hold no locks that might block a couldbelinking thread
* (which includes almost all dr locks). FIXME - some event callbacks are holding
* dr locks (see PR 227619) so can't call this routine. Since we are going to use
* a synchall flush, holding client locks is disallowed too (could block a thread
* at an unsafe spot for synch). */
CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client "
"lock or was called from an event callback that doesn't support "
"calling this routine; see header file for restrictions.");
CLIENT_ASSERT(size != 0, "dr_flush_region: 0 is invalid size for flush");
/* release build check of requirements, as many as possible at least */
if (size == 0 || is_couldbelinking(dcontext))
return false;
if (!executable_vm_area_executed_from(start, start + size))
return true;
flush_fragments_from_region(dcontext, start, size, true/*force synchall*/);
return true;
}
DR_API
/* Flush all fragments that contain code from the region [start, start+size).
* Uses an unlink flush which guarantees that no thread will enter a fragment that was
* flushed once this returns (threads already in a flushed fragment will continue).
* Requires caller to be holding no locks (dr or client) and to be !couldbelinking
* (xref PR 199115, 227619). */
bool
dr_unlink_flush_region(app_pc start, size_t size)
{
dcontext_t *dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
LOG(THREAD, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n", __FUNCTION__, start, start+size);
/* This routine won't work with coarse_units */
CLIENT_ASSERT(!DYNAMO_OPTION(coarse_units),
/* as of now, coarse_units are always disabled with -thread_private. */
"dr_unlink_flush_region is not supported with -opt_memory unless -thread_private or -enable_full_api is also specified");
/* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are
* !couldbelinking (see PR 227619) restricting where this routine can be used. */
CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event "
"callback that doesn't support calling this routine, see header file "
"for restrictions.");
/* Flush requires caller to hold no locks that might block a couldbelinking thread
* (which includes almost all dr locks). FIXME - some event callbacks are holding
* dr locks (see PR 227619) so can't call this routine. FIXME - some event callbacks
* are couldbelinking (see PR 227619) so can't allow the caller to hold any client
* locks that could block threads in one of those events (otherwise we don't need
* to care about client locks) */
CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client "
"lock or was called from an event callback that doesn't support "
"calling this routine, see header file for restrictions.");
CLIENT_ASSERT(size != 0, "dr_unlink_flush_region: 0 is invalid size for flush");
/* release build check of requirements, as many as possible at least */
if (size == 0 || is_couldbelinking(dcontext))
return false;
if (!executable_vm_area_executed_from(start, start + size))
return true;
flush_fragments_from_region(dcontext, start, size, false/*don't force synchall*/);
return true;
}
DR_API
/* Flush all fragments that contain code from the region [start, start+size) at the next
* convenient time. Unlike dr_flush_region() this routine has no restrictions on lock
* or couldbelinking status; the downside is that the delay till the flush actually
* occurs is unbounded (FIXME - we could do something safely here to try to speed it
* up like unlinking shared_syscall etc.), but should occur before any new code is
* executed or any nudges are processed. */
bool
dr_delay_flush_region(app_pc start, size_t size, uint flush_id,
void (*flush_completion_callback) (int flush_id))
{
client_flush_req_t *flush;
LOG(THREAD_GET, LOG_FRAGMENT, 2, "%s: "PFX"-"PFX"\n",
__FUNCTION__, start, start+size);
if (size == 0) {
CLIENT_ASSERT(false, "dr_delay_flush_region: 0 is invalid size for flush");
return false;
}
/* With the new module load event at 1st execution (i#884), we get a lot of
* flush requests during creation of a bb from things like drwrap_replace().
* To avoid them flushing from a new module we check overlap up front here.
*/
if (!executable_vm_area_executed_from(start, start+size)) {
return true;
}
/* FIXME - would be nice if we could check the requirements and call
* dr_unlink_flush_region() here if it's safe. Is difficult to detect non-dr locks
* that could block a couldbelinking thread though. */
flush = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_flush_req_t, ACCT_CLIENT,
UNPROTECTED);
memset(flush, 0x0, sizeof(client_flush_req_t));
flush->start = (app_pc)start;
flush->size = size;
flush->flush_id = flush_id;
flush->flush_callback = flush_completion_callback;
mutex_lock(&client_flush_request_lock);
flush->next = client_flush_requests;
client_flush_requests = flush;
mutex_unlock(&client_flush_request_lock);
return true;
}
DR_API
/* returns whether or not there is a fragment in the drcontext fcache at tag
*/
bool
dr_fragment_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return f != NULL;
}
DR_API
bool
dr_bb_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f = fragment_lookup(dcontext, tag);
if (f != NULL && !TEST(FRAG_IS_TRACE, f->flags)) {
return true;
}
return false;
}
DR_API
/* Looks up the fragment associated with the application pc tag.
* If not found, returns 0.
* If found, returns the total size occupied in the cache by the fragment.
*/
uint
dr_fragment_size(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
fragment_t *f;
int size = 0;
CLIENT_ASSERT(drcontext != NULL, "dr_fragment_size: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_fragment_size: drcontext is invalid");
#ifdef CLIENT_SIDELINE
/* used to check to see if owning thread, if so don't need lock */
/* but the check for owning thread more expensive then just getting lock */
/* to check if owner get_thread_id() == dcontext->owning_thread */
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f == NULL)
size = 0;
else
size = f->size;
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return size;
}
DR_API
/* Retrieves the application PC of a fragment */
app_pc
dr_fragment_app_pc(void *tag)
{
#ifdef WINDOWS
tag = get_app_pc_from_intercept_pc_if_necessary((app_pc)tag);
CLIENT_ASSERT(tag != NULL, "dr_fragment_app_pc shouldn't be NULL");
DODEBUG({
/* Without -hide our DllMain routine ends up in the cache (xref PR 223120).
* On Linux fini() ends up in the cache.
*/
if (DYNAMO_OPTION(hide) && is_dynamo_address(tag) &&
/* support client interpreting code out of its library */
!is_in_client_lib(tag)) {
/* downgraded from assert for client interpreting its own generated code */
SYSLOG_INTERNAL_WARNING_ONCE("dr_fragment_app_pc is a DR/client pc");
}
});
#endif
return tag;
}
DR_API
/* i#268: opposite of dr_fragment_app_pc() */
app_pc
dr_app_pc_for_decoding(app_pc pc)
{
#ifdef WINDOWS
app_pc displaced;
if (is_intercepted_app_pc(pc, &displaced))
return displaced;
#endif
return pc;
}
DR_API
app_pc
dr_app_pc_from_cache_pc(byte *cache_pc)
{
app_pc res = NULL;
dcontext_t *dcontext = get_thread_private_dcontext();
bool waslinking;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL);
/* i#1989: there's no easy way to get a translation without a proper dcontext */
CLIENT_ASSERT(!fragment_thread_exited(dcontext),
"dr_app_pc_from_cache_pc not supported from the thread exit event");
if (fragment_thread_exited(dcontext))
return NULL;
waslinking = is_couldbelinking(dcontext);
if (!waslinking)
enter_couldbelinking(dcontext, NULL, false);
/* suppress asserts about faults in meta instrs */
DODEBUG({ dcontext->client_data->is_translating = true; });
res = recreate_app_pc(dcontext, cache_pc, NULL);
DODEBUG({ dcontext->client_data->is_translating = false; });
if (!waslinking)
enter_nolinking(dcontext, NULL, false);
return res;
}
DR_API
bool
dr_using_app_state(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return os_using_app_state(dcontext);
}
DR_API
void
dr_switch_to_app_state(void *drcontext)
{
dr_switch_to_app_state_ex(drcontext, DR_STATE_ALL);
}
DR_API
void
dr_switch_to_app_state_ex(void *drcontext, dr_state_flags_t flags)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
os_swap_context(dcontext, true/*to app*/, flags);
}
DR_API
void
dr_switch_to_dr_state(void *drcontext)
{
dr_switch_to_dr_state_ex(drcontext, DR_STATE_ALL);
}
DR_API
void
dr_switch_to_dr_state_ex(void *drcontext, dr_state_flags_t flags)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
os_swap_context(dcontext, false/*to dr*/, flags);
}
/***************************************************************************
* CUSTOM TRACES SUPPORT
* *could use a method to unmark a trace head, would be nice if DR
* notified the client when it marked a trace head and gave the client a
* chance to override its decision
*/
DR_API
/* Marks the fragment associated with the application pc tag as
* a trace head. The fragment need not exist yet -- once it is
* created it will be marked as a trace head.
*
* DR associates a counter with a trace head and once it
* passes the -hot_threshold parameter, DR begins building
* a trace. Before each fragment is added to the trace, DR
* calls the client routine dr_end_trace to determine whether
* to end the trace. (dr_end_trace will be called both for
* standard DR traces and for client-defined traces.)
*
* Note, some fragments are unsuitable for trace heads. DR will
* ignore attempts to mark such fragments as trace heads and will return
* false. If the client marks a fragment that doesn't exist yet as a trace
* head and DR later determines that the fragment is unsuitable for
* a trace head it will unmark the fragment as a trace head without
* notifying the client.
*
* Returns true if the target fragment is marked as a trace head.
*
* If coarse, headness depends on path: currently this will only have
* links from tag's coarse unit unlinked.
*/
bool /* FIXME: dynamorio_app_init returns an int! */
dr_mark_trace_head(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
fragment_t *f;
fragment_t coarse_f;
bool success = true;
CLIENT_ASSERT(drcontext != NULL, "dr_mark_trace_head: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_mark_trace_head: drcontext is invalid");
/* Required to make the future-fragment lookup and add atomic and for
* mark_trace_head. We have to grab before fragment_delete_mutex so
* we pay the cost of acquiring up front even when f->flags doesn't
* require it.
*/
SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, acquire, change_linking_lock);
#ifdef CLIENT_SIDELINE
/* used to check to see if owning thread, if so don't need lock */
/* but the check for owning thread more expensive then just getting lock */
/* to check if owner get_thread_id() == dcontext->owning_thread */
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup_fine_and_coarse(dcontext, tag, &coarse_f, NULL);
if (f == NULL) {
future_fragment_t *fut;
fut = fragment_lookup_future(dcontext, tag);
if (fut == NULL) {
/* need to create a future fragment */
fut = fragment_create_and_add_future(dcontext, tag, FRAG_IS_TRACE_HEAD);
} else {
/* don't call mark_trace_head, it will try to do some linking */
fut->flags |= FRAG_IS_TRACE_HEAD;
}
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : will mark fragment as trace head when built "
": address "PFX"\n", tag);
#endif
} else {
/* check precluding conditions */
if (TEST(FRAG_IS_TRACE, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : not marking as trace head, is already "
"a trace : address "PFX"\n", tag);
#endif
success = false;
} else if (TEST(FRAG_CANNOT_BE_TRACE, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : not marking as trace head, particular "
"fragment cannot be trace head : address "PFX"\n", tag);
#endif
success = false;
} else if (TEST(FRAG_IS_TRACE_HEAD, f->flags)) {
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 2,
"Client mark trace head : fragment already marked as trace head : "
"address "PFX"\n", tag);
#endif
success = true;
} else {
mark_trace_head(dcontext, f, NULL, NULL);
#ifndef CLIENT_SIDELINE
LOG(THREAD, LOG_MONITOR, 3,
"Client mark trace head : just marked as trace head : address "PFX"\n",
tag);
#endif
}
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, release, change_linking_lock);
return success;
}
DR_API
/* Checks to see if the fragment (or future fragment) in the drcontext
* fcache at tag is marked as a trace head
*/
bool
dr_trace_head_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool trace_head;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL)
trace_head = (f->flags & FRAG_IS_TRACE_HEAD) != 0;
else {
future_fragment_t *fut = fragment_lookup_future(dcontext, tag);
if (fut != NULL)
trace_head = (fut->flags & FRAG_IS_TRACE_HEAD) != 0;
else
trace_head = false;
}
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return trace_head;
}
DR_API
/* checks to see that if there is a trace in the drcontext fcache at tag
*/
bool
dr_trace_exists_at(void *drcontext, void *tag)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
fragment_t *f;
bool trace;
#ifdef CLIENT_SIDELINE
fragment_get_fragment_delete_mutex(dcontext);
#endif
f = fragment_lookup(dcontext, tag);
if (f != NULL)
trace = (f->flags & FRAG_IS_TRACE) != 0;
else
trace = false;
#ifdef CLIENT_SIDELINE
fragment_release_fragment_delete_mutex(dcontext);
#endif
return trace;
}
#ifdef UNSUPPORTED_API
DR_API
/* All basic blocks created after this routine is called will have a prefix
* that restores the ecx register. Exit ctis can be made to target this prefix
* instead of the normal entry point by using the instr_branch_set_prefix_target()
* routine.
* WARNING: this routine should almost always be called during client
* initialization, since having a mixture of prefixed and non-prefixed basic
* blocks can lead to trouble.
*/
void
dr_add_prefixes_to_basic_blocks(void)
{
if (DYNAMO_OPTION(coarse_units)) {
/* coarse_units doesn't support prefixes in general.
* the variation by addr prefix according to processor type
* is also not stored in pcaches.
*/
CLIENT_ASSERT(false,
"dr_add_prefixes_to_basic_blocks() not supported with -opt_memory");
}
options_make_writable();
dynamo_options.bb_prefixes = true;
options_restore_readonly();
}
#endif /* UNSUPPORTED_API */
DR_API
/* Insert code to get the segment base address pointed at by seg into
* register reg. In Linux, it is only supported with -mangle_app_seg option.
* In Windows, it only supports getting base address of the TLS segment.
*/
bool
dr_insert_get_seg_base(void *drcontext, instrlist_t *ilist, instr_t *instr,
reg_id_t seg, reg_id_t reg)
{
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_get_seg_base: reg has wrong size\n");
#ifdef X86
CLIENT_ASSERT(reg_is_segment(seg),
"dr_insert_get_seg_base: seg is not a segment register");
# ifdef UNIX
CLIENT_ASSERT(INTERNAL_OPTION(mangle_app_seg),
"dr_insert_get_seg_base is supported"
"with -mangle_app_seg only");
/* FIXME: we should remove the constraint below by always mangling SEG_TLS,
* 1. Getting TLS base could be a common request by clients.
* 2. The TLS descriptor setup and selector setup can be separated,
* so we must intercept all descriptor setup. It will not be large
* runtime overhead for keeping track of the app's TLS segment base.
*/
CLIENT_ASSERT(INTERNAL_OPTION(private_loader) || seg != SEG_TLS,
"dr_insert_get_seg_base supports TLS seg"
"only with -private_loader");
if (!INTERNAL_OPTION(mangle_app_seg) ||
!(INTERNAL_OPTION(private_loader) || seg != SEG_TLS))
return false;
if (seg == SEG_FS || seg == SEG_GS) {
instrlist_meta_preinsert
(ilist, instr,
instr_create_restore_from_tls(drcontext, reg,
os_get_app_tls_base_offset(seg)));
} else {
instrlist_meta_preinsert
(ilist, instr,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg),
OPND_CREATE_INTPTR(0)));
}
# else /* Windows */
if (seg == SEG_TLS) {
instrlist_meta_preinsert
(ilist, instr,
XINST_CREATE_load(drcontext,
opnd_create_reg(reg),
opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL,
0, SELF_TIB_OFFSET, OPSZ_PTR)));
} else if (seg == SEG_CS || seg == SEG_DS || seg == SEG_ES) {
/* XXX: we assume flat address space */
instrlist_meta_preinsert
(ilist, instr,
INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg),
OPND_CREATE_INTPTR(0)));
} else
return false;
# endif /* UNIX/Windows */
#elif defined (ARM)
/* i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
#endif /* X86/ARM */
return true;
}
DR_API
reg_id_t
dr_get_stolen_reg()
{
return IF_X86_ELSE(REG_NULL, dr_reg_stolen);
}
DR_API
bool
dr_insert_get_stolen_reg_value(void *drcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg)
{
IF_X86(CLIENT_ASSERT(false, "dr_insert_get_stolen_reg: should not be reached\n"));
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_get_stolen_reg: reg has wrong size\n");
CLIENT_ASSERT(!reg_is_stolen(reg),
"dr_insert_get_stolen_reg: reg is used by DynamoRIO\n");
#ifdef AARCHXX
instrlist_meta_preinsert
(ilist, instr,
instr_create_restore_from_tls(drcontext, reg, TLS_REG_STOLEN_SLOT));
#endif
return true;
}
DR_API
bool
dr_insert_set_stolen_reg_value(void *drcontext, instrlist_t *ilist,
instr_t *instr, reg_id_t reg)
{
IF_X86(CLIENT_ASSERT(false, "dr_insert_set_stolen_reg: should not be reached\n"));
CLIENT_ASSERT(reg_is_pointer_sized(reg),
"dr_insert_set_stolen_reg: reg has wrong size\n");
CLIENT_ASSERT(!reg_is_stolen(reg),
"dr_insert_set_stolen_reg: reg is used by DynamoRIO\n");
#ifdef AARCHXX
instrlist_meta_preinsert
(ilist, instr,
instr_create_save_to_tls(drcontext, reg, TLS_REG_STOLEN_SLOT));
#endif
return true;
}
DR_API
int
dr_remove_it_instrs(void *drcontext, instrlist_t *ilist)
{
#if !defined(ARM)
return 0;
#else
int res = 0;
instr_t *inst, *next;
for (inst = instrlist_first(ilist); inst != NULL; inst = next) {
next = instr_get_next(inst);
if (instr_get_opcode(inst) == OP_it) {
res++;
instrlist_remove(ilist, inst);
instr_destroy(drcontext, inst);
}
}
return res;
#endif
}
DR_API
int
dr_insert_it_instrs(void *drcontext, instrlist_t *ilist)
{
#if !defined(ARM)
return 0;
#else
instr_t *first = instrlist_first(ilist);
if (first == NULL || instr_get_isa_mode(first) != DR_ISA_ARM_THUMB)
return 0;
return reinstate_it_blocks((dcontext_t*)drcontext, ilist,
instrlist_first(ilist), NULL);
#endif
}
/***************************************************************************
* PERSISTENCE
*/
/* Up to caller to synchronize. */
uint
instrument_persist_ro_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
size_t i;
/* Store the set of clients in use as we require the same set in order
* to validate the pcache on use. Note that we can't just have -client_lib
* be OP_PCACHE_GLOBAL b/c it contains client options too.
* We have no unique guids for clients so we store the full path.
* We ignore ids. We do care about priority order: clients must
* be in the same order in addition to having the same path.
*
* XXX: we could go further and store client library checksum, etc. hashes,
* but that precludes clients from doing their own proper versioning.
*
* XXX: we could also put the set of clients into the pcache namespace to allow
* simultaneous use of pcaches with different sets of clients (empty set
* vs under tool, in particular): but doesn't really seem useful enough
* for the trouble
*/
for (i=0; i<num_client_libs; i++) {
sz += strlen(client_libs[i].path) + 1/*NULL*/;
}
sz++; /* double NULL ends it */
/* Now for clients' own data.
* For user_data, we assume each sequence of <size, patch, persist> is
* atomic: caller holds a mutex across the sequence. Thus, we can use
* global storage.
*/
if (persist_ro_size_callbacks.num > 0) {
call_all_ret(sz, +=, , persist_ro_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
}
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_ro(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
size_t i;
char nul = '\0';
ASSERT(fd != INVALID_FILE);
for (i=0; i<num_client_libs; i++) {
size_t sz = strlen(client_libs[i].path) + 1/*NULL*/;
if (os_write(fd, client_libs[i].path, sz) != (ssize_t)sz)
return false;
}
/* double NULL ends it */
if (os_write(fd, &nul, sizeof(nul)) != (ssize_t)sizeof(nul))
return false;
/* Now for clients' own data */
if (persist_ro_size_callbacks.num > 0) {
call_all_ret(res, = res &&, , persist_ro_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
}
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_ro(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
size_t i;
const char *c;
ASSERT(map != NULL);
/* Ensure we have the same set of tools (see comments above) */
i = 0;
c = (const char *) map;
while (*c != '\0') {
if (i >= num_client_libs)
return false; /* too many clients */
if (strcmp(client_libs[i].path, c) != 0)
return false; /* client path mismatch */
c += strlen(c) + 1;
i++;
}
if (i < num_client_libs)
return false; /* too few clients */
c++;
/* Now for clients' own data */
if (resurrect_ro_callbacks.num > 0) {
call_all_ret(res, = res &&, , resurrect_ro_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, (byte **) &c);
}
return res;
}
/* Up to caller to synchronize. */
uint
instrument_persist_rx_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
if (persist_rx_size_callbacks.num == 0)
return 0;
call_all_ret(sz, +=, , persist_rx_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_rx(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
ASSERT(fd != INVALID_FILE);
if (persist_rx_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_rx_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_rx(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
ASSERT(map != NULL);
if (resurrect_rx_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , resurrect_rx_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, &map);
return res;
}
/* Up to caller to synchronize. */
uint
instrument_persist_rw_size(dcontext_t *dcontext, void *perscxt, size_t file_offs)
{
size_t sz = 0;
if (persist_rw_size_callbacks.num == 0)
return 0;
call_all_ret(sz, +=, , persist_rw_size_callbacks,
size_t (*)(void *, void *, size_t, void **),
(void *)dcontext, perscxt, file_offs + sz,
&persist_user_data[idx]);
/* using size_t for API w/ clients in case we want to widen in future */
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large");
return (uint) sz;
}
/* Up to caller to synchronize.
* Returns true iff all writes succeeded.
*/
bool
instrument_persist_rw(dcontext_t *dcontext, void *perscxt, file_t fd)
{
bool res = true;
ASSERT(fd != INVALID_FILE);
if (persist_rw_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_rw_callbacks,
bool (*)(void *, void *, file_t, void *),
(void *)dcontext, perscxt, fd, persist_user_data[idx]);
return res;
}
/* Returns true if successfully validated and de-serialized */
bool
instrument_resurrect_rw(dcontext_t *dcontext, void *perscxt, byte *map)
{
bool res = true;
ASSERT(map != NULL);
if (resurrect_rw_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , resurrect_rx_callbacks,
bool (*)(void *, void *, byte **),
(void *)dcontext, perscxt, &map);
return res;
}
bool
instrument_persist_patch(dcontext_t *dcontext, void *perscxt,
byte *bb_start, size_t bb_size)
{
bool res = true;
if (persist_patch_callbacks.num == 0)
return true;
call_all_ret(res, = res &&, , persist_patch_callbacks,
bool (*)(void *, void *, byte *, size_t, void *),
(void *)dcontext, perscxt, bb_start, bb_size,
persist_user_data[idx]);
return res;
}
DR_API
bool
dr_register_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
if (func_size == NULL || func_persist == NULL || func_resurrect == NULL)
return false;
add_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true);
add_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true);
add_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect, true);
return true;
}
DR_API
bool
dr_unregister_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt,
size_t file_offs, void **user_data OUT),
bool (*func_persist)(void *drcontext, void *perscxt,
file_t fd, void *user_data),
bool (*func_resurrect)(void *drcontext, void *perscxt,
byte **map INOUT))
{
bool res = true;
if (func_size != NULL) {
res = remove_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true)
&& res;
} else
res = false;
if (func_persist != NULL) {
res = remove_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true)
&& res;
} else
res = false;
if (func_resurrect != NULL) {
res = remove_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect,
true) && res;
} else
res = false;
return res;
}
DR_API
bool
dr_register_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt,
byte *bb_start, size_t bb_size,
void *user_data))
{
if (func_patch == NULL)
return false;
add_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true);
return true;
}
DR_API
bool
dr_unregister_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt,
byte *bb_start, size_t bb_size,
void *user_data))
{
return remove_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true);
}
DR_API
/* Create instructions for storing pointer-size integer val to dst,
* and then insert them into ilist prior to where.
* The "first" and "last" created instructions are returned.
*/
void
instrlist_insert_mov_immed_ptrsz(void *drcontext, ptr_int_t val, opnd_t dst,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
CLIENT_ASSERT(opnd_get_size(dst) == OPSZ_PTR, "wrong dst size");
insert_mov_immed_ptrsz((dcontext_t *)drcontext, val, dst,
ilist, where, first, last);
}
DR_API
/* Create instructions for pushing pointer-size integer val on the stack,
* and then insert them into ilist prior to where.
* The "first" and "last" created instructions are returned.
*/
void
instrlist_insert_push_immed_ptrsz(void *drcontext, ptr_int_t val,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
insert_push_immed_ptrsz((dcontext_t *)drcontext, val, ilist, where,
first, last);
}
DR_API
void
instrlist_insert_mov_instr_addr(void *drcontext, instr_t *src_inst, byte *encode_pc,
opnd_t dst, instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
CLIENT_ASSERT(opnd_get_size(dst) == OPSZ_PTR, "wrong dst size");
if (encode_pc == NULL) {
/* Pass highest code cache address.
* XXX: unless we're beyond the reservation! Would still be reachable
* from rest of vmcode, but might be higher than vmcode_get_end()!
*/
encode_pc = vmcode_get_end();
}
insert_mov_instr_addr((dcontext_t *)drcontext, src_inst, encode_pc, dst,
ilist, where, first, last);
}
DR_API
void
instrlist_insert_push_instr_addr(void *drcontext, instr_t *src_inst, byte *encode_pc,
instrlist_t *ilist, instr_t *where,
OUT instr_t **first, OUT instr_t **last)
{
if (encode_pc == NULL) {
/* Pass highest code cache address.
* XXX: unless we're beyond the reservation! Would still be reachable
* from rest of vmcode, but might be higher than vmcode_get_end()!
*/
encode_pc = vmcode_get_end();
}
insert_push_instr_addr((dcontext_t *)drcontext, src_inst, encode_pc,
ilist, where, first, last);
}
#endif /* CLIENT_INTERFACE */
| 1 | 11,122 | One or the other should be non-NULL. If segments is NULL, the alloc above will have size zero, which we do not allow (there's no header): it should assert in debug build. So there should be asserts that one is non-NULL at the top, and if there really needs to be some kind of defensive check down here, it should cover the alloc too. | DynamoRIO-dynamorio | c |
@@ -106,7 +106,11 @@ class User < ActiveRecord::Base
def self.from_oauth_hash(auth_hash)
user_data = auth_hash.extra.raw_info.to_hash
- self.find_or_create_by(email_address: user_data['email'])
+ user = self.for_email(user_data['email'])
+ if user_data['first_name'].present? && user_data['last_name'].present?
+ user.update_attributes(first_name: user_data['first_name'], last_name: user_data['last_name'])
+ end
+ user
end
def role_on(proposal) | 1 | class User < ActiveRecord::Base
has_paper_trail class_name: 'C2Version'
validates :client_slug, inclusion: {
in: ->(_) { Proposal.client_slugs },
message: "'%{value}' is not in Proposal.client_slugs #{Proposal.client_slugs.inspect}",
allow_blank: true
}
validates :email_address, presence: true, uniqueness: true
validates_email_format_of :email_address
has_many :steps, dependent: :destroy
has_many :comments, dependent: :destroy
has_many :observations, dependent: :destroy
has_many :user_roles, dependent: :destroy
has_many :roles, through: :user_roles
has_many :proposals, foreign_key: "requester_id", dependent: :destroy
has_many :outgoing_delegations, class_name: 'ApprovalDelegate', foreign_key: 'assigner_id'
def self.active
where(active: true)
end
# this is for user_roles specifically, not proposals or any other objects for which
# this user might have roles.
# rubocop:disable Style/PredicateName
def has_role?(name_or_role)
if name_or_role.is_a?(Role)
self.roles.include?(name_or_role)
else
self.roles.exists?(name: name_or_role)
end
end
# rubocop:enable Style/PredicateName
def add_role(name_or_role)
if name_or_role.is_a?(Role)
role = name_or_role
else
role = Role.find_or_create_by!(name: name_or_role)
end
self.user_roles.find_or_create_by!(role: role)
end
def self.with_role(name_or_role)
if name_or_role.is_a?(Role)
name_or_role.users
else
User.joins(:roles).where(roles: { name: name_or_role })
end
end
def self.sql_for_role_slug(role, slug)
self.with_role(role).select(:id).where(client_slug: slug).to_sql
end
def full_name
if first_name.present? && last_name.present?
"#{first_name} #{last_name}"
else
email_address
end
end
def requested_proposals
Proposal.where(requester_id: self.id)
end
def last_requested_proposal
self.requested_proposals.order('created_at DESC').first
end
def add_delegate(other)
self.outgoing_delegations.create!(assignee: other)
end
def delegates_to?(other)
self.outgoing_delegations.exists?(assignee_id: other.id)
end
def client_admin?
self.has_role?('client_admin')
end
def admin?
has_role?('admin')
end
def not_admin?
!admin?
end
def self.for_email(email)
User.find_or_create_by(email_address: email.strip.downcase)
end
def self.for_email_with_slug(email, client_slug)
u = self.for_email(email)
unless u.client_slug
u.client_slug = client_slug
end
u
end
def self.from_oauth_hash(auth_hash)
user_data = auth_hash.extra.raw_info.to_hash
self.find_or_create_by(email_address: user_data['email'])
end
def role_on(proposal)
RolePicker.new(self, proposal)
end
end
| 1 | 15,372 | I'm surprised rubocop isn't picking up singe quotes? | 18F-C2 | rb |
@@ -251,14 +251,7 @@ import (
var _ time.Duration
var _ strings.Reader
var _ aws.Config
-
-func parseTime(layout, value string) *time.Time {
- t, err := time.Parse(layout, value)
- if err != nil {
- panic(err)
- }
- return &t
-}
+var _, _ = protocol.ParseTime("unixTimestamp", "2016-09-27T15:50Z")
`))
| 1 | // +build codegen
package api
import (
"bytes"
"encoding/json"
"fmt"
"os"
"sort"
"strings"
"text/template"
"github.com/aws/aws-sdk-go/private/util"
)
type Examples map[string][]Example
// ExamplesDefinition is the structural representation of the examples-1.json file
type ExamplesDefinition struct {
*API `json:"-"`
Examples Examples `json:"examples"`
}
// Example is a single entry within the examples-1.json file.
type Example struct {
API *API `json:"-"`
Operation *Operation `json:"-"`
OperationName string `json:"-"`
Index string `json:"-"`
Builder examplesBuilder `json:"-"`
VisitedErrors map[string]struct{} `json:"-"`
Title string `json:"title"`
Description string `json:"description"`
ID string `json:"id"`
Comments Comments `json:"comments"`
Input map[string]interface{} `json:"input"`
Output map[string]interface{} `json:"output"`
}
type Comments struct {
Input map[string]interface{} `json:"input"`
Output map[string]interface{} `json:"output"`
}
var exampleFuncMap = template.FuncMap{
"commentify": commentify,
"wrap": wrap,
"generateExampleInput": generateExampleInput,
"generateTypes": generateTypes,
}
var exampleCustomizations = map[string]template.FuncMap{}
var exampleTmpls = template.Must(template.New("example").Funcs(exampleFuncMap).Parse(`
{{ generateTypes . }}
{{ commentify (wrap .Title 80) }}
//
{{ commentify (wrap .Description 80) }}
func Example{{ .API.StructName }}_{{ .MethodName }}() {
svc := {{ .API.PackageName }}.New(session.New())
input := {{ generateExampleInput . }}
result, err := svc.{{ .OperationName }}(input)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
{{ range $_, $ref := .Operation.ErrorRefs -}}
{{ if not ($.HasVisitedError $ref) -}}
case {{ .API.PackageName }}.{{ $ref.Shape.ErrorCodeName }}:
fmt.Println({{ .API.PackageName }}.{{ $ref.Shape.ErrorCodeName }}, aerr.Error())
{{ end -}}
{{ end -}}
default:
fmt.Println(aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
fmt.Println(err.Error())
}
return
}
fmt.Println(result)
}
`))
// Names will return the name of the example. This will also be the name of the operation
// that is to be tested.
func (exs Examples) Names() []string {
names := make([]string, 0, len(exs))
for k := range exs {
names = append(names, k)
}
sort.Strings(names)
return names
}
func (exs Examples) GoCode() string {
buf := bytes.NewBuffer(nil)
for _, opName := range exs.Names() {
examples := exs[opName]
for _, ex := range examples {
buf.WriteString(util.GoFmt(ex.GoCode()))
buf.WriteString("\n")
}
}
return buf.String()
}
// ExampleCode will generate the example code for the given Example shape.
// TODO: Can delete
func (ex Example) GoCode() string {
var buf bytes.Buffer
m := exampleFuncMap
if fMap, ok := exampleCustomizations[ex.API.PackageName()]; ok {
m = fMap
}
tmpl := exampleTmpls.Funcs(m)
if err := tmpl.ExecuteTemplate(&buf, "example", &ex); err != nil {
panic(err)
}
return strings.TrimSpace(buf.String())
}
func generateExampleInput(ex Example) string {
if ex.Operation.HasInput() {
return fmt.Sprintf("&%s{\n%s\n}",
ex.Builder.GoType(&ex.Operation.InputRef, true),
ex.Builder.BuildShape(&ex.Operation.InputRef, ex.Input, false),
)
}
return ""
}
// generateTypes will generate no types for default examples, but customizations may
// require their own defined types.
func generateTypes(ex Example) string {
return ""
}
// correctType will cast the value to the correct type when printing the string.
// This is due to the json decoder choosing numbers to be floats, but the shape may
// actually be an int. To counter this, we pass the shape's type and properly do the
// casting here.
func correctType(memName string, t string, value interface{}) string {
if value == nil {
return ""
}
v := ""
switch value.(type) {
case string:
v = value.(string)
case int:
v = fmt.Sprintf("%d", value.(int))
case float64:
if t == "integer" || t == "long" || t == "int64" {
v = fmt.Sprintf("%d", int(value.(float64)))
} else {
v = fmt.Sprintf("%f", value.(float64))
}
case bool:
v = fmt.Sprintf("%t", value.(bool))
}
return convertToCorrectType(memName, t, v)
}
func convertToCorrectType(memName, t, v string) string {
return fmt.Sprintf("%s: %s,\n", memName, getValue(t, v))
}
func getValue(t, v string) string {
if t[0] == '*' {
t = t[1:]
}
switch t {
case "string":
return fmt.Sprintf("aws.String(%q)", v)
case "integer", "long", "int64":
return fmt.Sprintf("aws.Int64(%s)", v)
case "float", "float64", "double":
return fmt.Sprintf("aws.Float64(%s)", v)
case "boolean":
return fmt.Sprintf("aws.Bool(%s)", v)
default:
panic("Unsupported type: " + t)
}
}
// AttachExamples will create a new ExamplesDefinition from the examples file
// and reference the API object.
func (a *API) AttachExamples(filename string) {
p := ExamplesDefinition{API: a}
f, err := os.Open(filename)
defer f.Close()
if err != nil {
panic(err)
}
err = json.NewDecoder(f).Decode(&p)
if err != nil {
panic(err)
}
p.setup()
}
var examplesBuilderCustomizations = map[string]examplesBuilder{
"wafregional": wafregionalExamplesBuilder{},
}
func (p *ExamplesDefinition) setup() {
var builder examplesBuilder
ok := false
if builder, ok = examplesBuilderCustomizations[p.API.PackageName()]; !ok {
builder = defaultExamplesBuilder{}
}
keys := p.Examples.Names()
for _, n := range keys {
examples := p.Examples[n]
for i, e := range examples {
n = p.ExportableName(n)
e.OperationName = n
e.API = p.API
e.Index = fmt.Sprintf("shared%02d", i)
e.Builder = builder
e.VisitedErrors = map[string]struct{}{}
op := p.API.Operations[e.OperationName]
e.OperationName = p.ExportableName(e.OperationName)
e.Operation = op
p.Examples[n][i] = e
}
}
p.API.Examples = p.Examples
}
var exampleHeader = template.Must(template.New("exampleHeader").Parse(`
import (
{{ .Builder.Imports .API }}
)
var _ time.Duration
var _ strings.Reader
var _ aws.Config
func parseTime(layout, value string) *time.Time {
t, err := time.Parse(layout, value)
if err != nil {
panic(err)
}
return &t
}
`))
type exHeader struct {
Builder examplesBuilder
API *API
}
// ExamplesGoCode will return a code representation of the entry within the
// examples.json file.
func (a *API) ExamplesGoCode() string {
var buf bytes.Buffer
var builder examplesBuilder
ok := false
if builder, ok = examplesBuilderCustomizations[a.PackageName()]; !ok {
builder = defaultExamplesBuilder{}
}
if err := exampleHeader.ExecuteTemplate(&buf, "exampleHeader", &exHeader{builder, a}); err != nil {
panic(err)
}
code := a.Examples.GoCode()
if len(code) == 0 {
return ""
}
buf.WriteString(code)
return buf.String()
}
// TODO: In the operation docuentation where we list errors, this needs to be done
// there as well.
func (ex *Example) HasVisitedError(errRef *ShapeRef) bool {
errName := errRef.Shape.ErrorCodeName()
_, ok := ex.VisitedErrors[errName]
ex.VisitedErrors[errName] = struct{}{}
return ok
}
func parseTimeString(ref *ShapeRef, memName, v string) string {
if ref.Location == "header" {
return fmt.Sprintf("%s: parseTime(%q, %q),\n", memName, "Mon, 2 Jan 2006 15:04:05 GMT", v)
} else {
switch ref.API.Metadata.Protocol {
case "json", "rest-json":
return fmt.Sprintf("%s: parseTime(%q, %q),\n", memName, "2006-01-02T15:04:05Z", v)
case "rest-xml", "ec2", "query":
return fmt.Sprintf("%s: parseTime(%q, %q),\n", memName, "2006-01-02T15:04:05Z", v)
default:
panic("Unsupported time type: " + ref.API.Metadata.Protocol)
}
}
}
func (ex *Example) MethodName() string {
return fmt.Sprintf("%s_%s", ex.OperationName, ex.Index)
}
| 1 | 9,824 | Is this line still needed? | aws-aws-sdk-go | go |
@@ -153,10 +153,8 @@ bool EDPSimple::createSEDPEndpoints()
watt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
- watt.times.nackResponseDelay.seconds = 0;
- watt.times.nackResponseDelay.fraction = 0;
- watt.times.initialHeartbeatDelay.seconds = 0;
- watt.times.initialHeartbeatDelay.fraction = 0;
+ watt.times.heartbeatPeriod.seconds = 1;
+ watt.times.heartbeatPeriod.fraction = 0;
if(mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX &&
mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0)
watt.mode = ASYNCHRONOUS_WRITER; | 1 | // Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file EDPSimple.cpp
*
*/
#include <fastrtps/rtps/builtin/discovery/endpoint/EDPSimple.h>
#include "EDPSimpleListeners.h"
#include <fastrtps/rtps/builtin/discovery/participant/PDPSimple.h>
#include "../../../participant/RTPSParticipantImpl.h"
#include <fastrtps/rtps/writer/StatefulWriter.h>
#include <fastrtps/rtps/reader/StatefulReader.h>
#include <fastrtps/rtps/attributes/HistoryAttributes.h>
#include <fastrtps/rtps/attributes/WriterAttributes.h>
#include <fastrtps/rtps/attributes/ReaderAttributes.h>
#include <fastrtps/rtps/history/ReaderHistory.h>
#include <fastrtps/rtps/history/WriterHistory.h>
#include <fastrtps/rtps/builtin/data/WriterProxyData.h>
#include <fastrtps/rtps/builtin/data/ReaderProxyData.h>
#include <fastrtps/rtps/builtin/data/ParticipantProxyData.h>
#include <fastrtps/rtps/builtin/BuiltinProtocols.h>
#include <fastrtps/log/Log.h>
#include <mutex>
namespace eprosima {
namespace fastrtps{
namespace rtps {
EDPSimple::EDPSimple(PDPSimple* p,RTPSParticipantImpl* part):
EDP(p,part),
mp_pubListen(nullptr),
mp_subListen(nullptr)
{
// TODO Auto-generated constructor stub
}
EDPSimple::~EDPSimple()
{
#if HAVE_SECURITY
if(this->sedp_builtin_publications_secure_writer_.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(sedp_builtin_publications_secure_writer_.first);
delete(sedp_builtin_publications_secure_writer_.second);
}
if(this->sedp_builtin_publications_secure_reader_.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(sedp_builtin_publications_secure_reader_.first);
delete(sedp_builtin_publications_secure_reader_.second);
}
if(this->sedp_builtin_subscriptions_secure_writer_.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(sedp_builtin_subscriptions_secure_writer_.first);
delete(sedp_builtin_subscriptions_secure_writer_.second);
}
if(this->sedp_builtin_subscriptions_secure_reader_.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(sedp_builtin_subscriptions_secure_reader_.first);
delete(sedp_builtin_subscriptions_secure_reader_.second);
}
#endif
if(this->mp_PubReader.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(mp_PubReader.first);
delete(mp_PubReader.second);
}
if(this->mp_SubReader.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(mp_SubReader.first);
delete(mp_SubReader.second);
}
if(this->mp_PubWriter.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(mp_PubWriter.first);
delete(mp_PubWriter.second);
}
if(this->mp_SubWriter.first !=nullptr)
{
this->mp_RTPSParticipant->deleteUserEndpoint(mp_SubWriter.first);
delete(mp_SubWriter.second);
}
if(mp_pubListen!=nullptr)
delete(mp_pubListen);
if(mp_subListen !=nullptr)
delete(mp_subListen);
}
bool EDPSimple::initEDP(BuiltinAttributes& attributes)
{
logInfo(RTPS_EDP,"Beginning Simple Endpoint Discovery Protocol");
m_discovery = attributes;
if(!createSEDPEndpoints())
{
logError(RTPS_EDP,"Problem creation SimpleEDP endpoints");
return false;
}
#if HAVE_SECURITY
if(!create_sedp_secure_endpoints())
{
logError(RTPS_EDP,"Problem creation SimpleEDP endpoints");
return false;
}
#endif
return true;
}
bool EDPSimple::createSEDPEndpoints()
{
WriterAttributes watt;
ReaderAttributes ratt;
HistoryAttributes hatt;
bool created = true;
RTPSReader* raux = nullptr;
RTPSWriter* waux = nullptr;
if(m_discovery.m_simpleEDP.use_PublicationWriterANDSubscriptionReader)
{
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 5000;
hatt.payloadMaxSize = DISCOVERY_PUBLICATION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.writerHistoryMemoryPolicy;
mp_PubWriter.second = new WriterHistory(hatt);
//Wparam.pushMode = true;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.topicKind = WITH_KEY;
watt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
watt.times.nackResponseDelay.seconds = 0;
watt.times.nackResponseDelay.fraction = 0;
watt.times.initialHeartbeatDelay.seconds = 0;
watt.times.initialHeartbeatDelay.fraction = 0;
if(mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX &&
mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0)
watt.mode = ASYNCHRONOUS_WRITER;
created &=this->mp_RTPSParticipant->createWriter(&waux,watt,mp_PubWriter.second,nullptr,c_EntityId_SEDPPubWriter,true);
if(created)
{
mp_PubWriter.first = dynamic_cast<StatefulWriter*>(waux);
logInfo(RTPS_EDP,"SEDP Publication Writer created");
}
else
{
delete(mp_PubWriter.second);
mp_PubWriter.second = nullptr;
}
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 1000000;
hatt.payloadMaxSize = DISCOVERY_SUBSCRIPTION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.readerHistoryMemoryPolicy;
mp_SubReader.second = new ReaderHistory(hatt);
//Rparam.historyMaxSize = 100;
ratt.expectsInlineQos = false;
ratt.endpoint.reliabilityKind = RELIABLE;
ratt.endpoint.topicKind = WITH_KEY;
ratt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.times.heartbeatResponseDelay.seconds = 0;
ratt.times.heartbeatResponseDelay.fraction = 0;
ratt.times.initialAcknackDelay.seconds = 0;
ratt.times.initialAcknackDelay.fraction = 0;
this->mp_subListen = new EDPSimpleSUBListener(this);
created &=this->mp_RTPSParticipant->createReader(&raux,ratt,mp_SubReader.second,mp_subListen,c_EntityId_SEDPSubReader,true);
if(created)
{
mp_SubReader.first = dynamic_cast<StatefulReader*>(raux);
logInfo(RTPS_EDP,"SEDP Subscription Reader created");
}
else
{
delete(mp_SubReader.second);
mp_SubReader.second = nullptr;
delete(mp_subListen);
mp_subListen = nullptr;
}
}
if(m_discovery.m_simpleEDP.use_PublicationReaderANDSubscriptionWriter)
{
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 1000000;
hatt.payloadMaxSize = DISCOVERY_PUBLICATION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.readerHistoryMemoryPolicy;
mp_PubReader.second = new ReaderHistory(hatt);
//Rparam.historyMaxSize = 100;
ratt.expectsInlineQos = false;
ratt.endpoint.reliabilityKind = RELIABLE;
ratt.endpoint.topicKind = WITH_KEY;
ratt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.times.heartbeatResponseDelay.seconds = 0;
ratt.times.heartbeatResponseDelay.fraction = 0;
ratt.times.initialAcknackDelay.seconds = 0;
ratt.times.initialAcknackDelay.fraction = 0;
this->mp_pubListen = new EDPSimplePUBListener(this);
created &=this->mp_RTPSParticipant->createReader(&raux,ratt,mp_PubReader.second,mp_pubListen,c_EntityId_SEDPPubReader,true);
if(created)
{
mp_PubReader.first = dynamic_cast<StatefulReader*>(raux);
logInfo(RTPS_EDP,"SEDP Publication Reader created");
}
else
{
delete(mp_PubReader.second);
mp_PubReader.second = nullptr;
delete(mp_pubListen);
mp_pubListen = nullptr;
}
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 5000;
hatt.payloadMaxSize = DISCOVERY_SUBSCRIPTION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.writerHistoryMemoryPolicy;
mp_SubWriter.second = new WriterHistory(hatt);
//Wparam.pushMode = true;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.topicKind = WITH_KEY;
watt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
watt.times.nackResponseDelay.seconds = 0;
watt.times.nackResponseDelay.fraction = 0;
watt.times.initialHeartbeatDelay.seconds = 0;
watt.times.initialHeartbeatDelay.fraction = 0;
if(mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX &&
mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0)
watt.mode = ASYNCHRONOUS_WRITER;
created &=this->mp_RTPSParticipant->createWriter(&waux, watt, mp_SubWriter.second, nullptr,
c_EntityId_SEDPSubWriter, true);
if(created)
{
mp_SubWriter.first = dynamic_cast<StatefulWriter*>(waux);
logInfo(RTPS_EDP,"SEDP Subscription Writer created");
}
else
{
delete(mp_SubWriter.second);
mp_SubWriter.second = nullptr;
}
}
logInfo(RTPS_EDP,"Creation finished");
return created;
}
#if HAVE_SECURITY
bool EDPSimple::create_sedp_secure_endpoints()
{
WriterAttributes watt;
ReaderAttributes ratt;
HistoryAttributes hatt;
bool created = true;
RTPSReader* raux = nullptr;
RTPSWriter* waux = nullptr;
auto& part_attr = mp_RTPSParticipant->security_attributes();
security::PluginParticipantSecurityAttributes plugin_part_attr(part_attr.plugin_participant_attributes);
if(m_discovery.m_simpleEDP.enable_builtin_secure_publications_writer_and_subscriptions_reader)
{
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 5000;
hatt.payloadMaxSize = DISCOVERY_PUBLICATION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.writerHistoryMemoryPolicy;
sedp_builtin_publications_secure_writer_.second = new WriterHistory(hatt);
//Wparam.pushMode = true;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.topicKind = WITH_KEY;
watt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
watt.times.nackResponseDelay.seconds = 0;
watt.times.nackResponseDelay.fraction = 0;
watt.times.initialHeartbeatDelay.seconds = 0;
watt.times.initialHeartbeatDelay.fraction = 0;
watt.endpoint.security_attributes().is_submessage_protected = part_attr.is_discovery_protected;
watt.endpoint.security_attributes().plugin_endpoint_attributes = PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_VALID;
if (part_attr.is_discovery_protected)
{
if (plugin_part_attr.is_discovery_encrypted)
watt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ENCRYPTED;
if (plugin_part_attr.is_discovery_origin_authenticated)
watt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ORIGIN_AUTHENTICATED;
}
if(mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX &&
mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0)
watt.mode = ASYNCHRONOUS_WRITER;
created &=this->mp_RTPSParticipant->createWriter(&waux, watt, sedp_builtin_publications_secure_writer_.second,
nullptr, sedp_builtin_publications_secure_writer, true);
if(created)
{
sedp_builtin_publications_secure_writer_.first = dynamic_cast<StatefulWriter*>(waux);
logInfo(RTPS_EDP,"SEDP Publication Writer created");
}
else
{
delete(sedp_builtin_publications_secure_writer_.second);
sedp_builtin_publications_secure_writer_.second = nullptr;
}
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 1000000;
hatt.payloadMaxSize = DISCOVERY_SUBSCRIPTION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.readerHistoryMemoryPolicy;
sedp_builtin_subscriptions_secure_reader_.second = new ReaderHistory(hatt);
//Rparam.historyMaxSize = 100;
ratt.expectsInlineQos = false;
ratt.endpoint.reliabilityKind = RELIABLE;
ratt.endpoint.topicKind = WITH_KEY;
ratt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.times.heartbeatResponseDelay.seconds = 0;
ratt.times.heartbeatResponseDelay.fraction = 0;
ratt.times.initialAcknackDelay.seconds = 0;
ratt.times.initialAcknackDelay.fraction = 0;
ratt.endpoint.security_attributes().is_submessage_protected = part_attr.is_discovery_protected;
ratt.endpoint.security_attributes().plugin_endpoint_attributes = PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_VALID;
if (part_attr.is_discovery_protected)
{
if (plugin_part_attr.is_discovery_encrypted)
ratt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ENCRYPTED;
if (plugin_part_attr.is_discovery_origin_authenticated)
ratt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ORIGIN_AUTHENTICATED;
}
created &=this->mp_RTPSParticipant->createReader(&raux, ratt, sedp_builtin_subscriptions_secure_reader_.second,
mp_subListen, sedp_builtin_subscriptions_secure_reader, true);
if(created)
{
sedp_builtin_subscriptions_secure_reader_.first = dynamic_cast<StatefulReader*>(raux);
logInfo(RTPS_EDP,"SEDP Subscription Reader created");
}
else
{
delete(sedp_builtin_subscriptions_secure_reader_.second);
sedp_builtin_subscriptions_secure_reader_.second = nullptr;
}
}
if(m_discovery.m_simpleEDP.enable_builtin_secure_subscriptions_writer_and_publications_reader)
{
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 1000000;
hatt.payloadMaxSize = DISCOVERY_PUBLICATION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.readerHistoryMemoryPolicy;
sedp_builtin_publications_secure_reader_.second = new ReaderHistory(hatt);
//Rparam.historyMaxSize = 100;
ratt.expectsInlineQos = false;
ratt.endpoint.reliabilityKind = RELIABLE;
ratt.endpoint.topicKind = WITH_KEY;
ratt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.times.heartbeatResponseDelay.seconds = 0;
ratt.times.heartbeatResponseDelay.fraction = 0;
ratt.times.initialAcknackDelay.seconds = 0;
ratt.times.initialAcknackDelay.fraction = 0;
ratt.endpoint.security_attributes().is_submessage_protected = part_attr.is_discovery_protected;
ratt.endpoint.security_attributes().plugin_endpoint_attributes = PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_VALID;
if (part_attr.is_discovery_protected)
{
if (plugin_part_attr.is_discovery_encrypted)
ratt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ENCRYPTED;
if (plugin_part_attr.is_discovery_origin_authenticated)
ratt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ORIGIN_AUTHENTICATED;
}
created &=this->mp_RTPSParticipant->createReader(&raux, ratt, sedp_builtin_publications_secure_reader_.second,
mp_pubListen, sedp_builtin_publications_secure_reader, true);
if(created)
{
sedp_builtin_publications_secure_reader_.first = dynamic_cast<StatefulReader*>(raux);
logInfo(RTPS_EDP,"SEDP Publication Reader created");
}
else
{
delete(sedp_builtin_publications_secure_reader_.second);
sedp_builtin_publications_secure_reader_.second = nullptr;
}
hatt.initialReservedCaches = 100;
hatt.maximumReservedCaches = 5000;
hatt.payloadMaxSize = DISCOVERY_SUBSCRIPTION_DATA_MAX_SIZE;
hatt.memoryPolicy = mp_PDP->mp_builtin->m_att.writerHistoryMemoryPolicy;
sedp_builtin_subscriptions_secure_writer_.second = new WriterHistory(hatt);
//Wparam.pushMode = true;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.topicKind = WITH_KEY;
watt.endpoint.unicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = this->mp_PDP->getLocalParticipantProxyData()->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
watt.times.nackResponseDelay.seconds = 0;
watt.times.nackResponseDelay.fraction = 0;
watt.times.initialHeartbeatDelay.seconds = 0;
watt.times.initialHeartbeatDelay.fraction = 0;
watt.endpoint.security_attributes().is_submessage_protected = part_attr.is_discovery_protected;
watt.endpoint.security_attributes().plugin_endpoint_attributes = PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_VALID;
if (part_attr.is_discovery_protected)
{
if (plugin_part_attr.is_discovery_encrypted)
watt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ENCRYPTED;
if (plugin_part_attr.is_discovery_origin_authenticated)
watt.endpoint.security_attributes().plugin_endpoint_attributes |= PLUGIN_ENDPOINT_SECURITY_ATTRIBUTES_FLAG_IS_SUBMESSAGE_ORIGIN_AUTHENTICATED;
}
if(mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX &&
mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0)
watt.mode = ASYNCHRONOUS_WRITER;
created &=this->mp_RTPSParticipant->createWriter(&waux, watt, sedp_builtin_subscriptions_secure_writer_.second,
nullptr, sedp_builtin_subscriptions_secure_writer, true);
if(created)
{
sedp_builtin_subscriptions_secure_writer_.first = dynamic_cast<StatefulWriter*>(waux);
logInfo(RTPS_EDP,"SEDP Subscription Writer created");
}
else
{
delete(sedp_builtin_subscriptions_secure_writer_.second);
sedp_builtin_subscriptions_secure_writer_.second = nullptr;
}
}
logInfo(RTPS_EDP,"Creation finished");
return created;
}
#endif
bool EDPSimple::processLocalReaderProxyData(RTPSReader* local_reader, ReaderProxyData* rdata)
{
logInfo(RTPS_EDP,rdata->guid().entityId);
(void)local_reader;
auto* writer = &mp_SubWriter;
#if HAVE_SECURITY
if(local_reader->getAttributes().security_attributes().is_discovery_protected)
{
writer = &sedp_builtin_subscriptions_secure_writer_;
}
#endif
if(writer->first != nullptr)
{
// TODO(Ricardo) Write a getCdrSerializedPayload for ReaderProxyData.
CacheChange_t* change = writer->first->new_change([]() -> uint32_t {return DISCOVERY_SUBSCRIPTION_DATA_MAX_SIZE;},
ALIVE,rdata->key());
if(change !=nullptr)
{
rdata->toParameterList();
CDRMessage_t aux_msg(change->serializedPayload);
#if __BIG_ENDIAN__
change->serializedPayload.encapsulation = (uint16_t)PL_CDR_BE;
aux_msg.msg_endian = BIGEND;
#else
change->serializedPayload.encapsulation = (uint16_t)PL_CDR_LE;
aux_msg.msg_endian = LITTLEEND;
#endif
ParameterList_t parameter_list = rdata->toParameterList();
ParameterList::writeParameterListToCDRMsg(&aux_msg, ¶meter_list, true);
change->serializedPayload.length = (uint16_t)aux_msg.length;
{
std::unique_lock<std::recursive_mutex> lock(*writer->second->getMutex());
for(auto ch = writer->second->changesBegin(); ch != writer->second->changesEnd(); ++ch)
{
if((*ch)->instanceHandle == change->instanceHandle)
{
writer->second->remove_change(*ch);
break;
}
}
}
writer->second->add_change(change);
return true;
}
return false;
}
return true;
}
bool EDPSimple::processLocalWriterProxyData(RTPSWriter* local_writer, WriterProxyData* wdata)
{
logInfo(RTPS_EDP, wdata->guid().entityId);
(void)local_writer;
auto* writer = &mp_PubWriter;
#if HAVE_SECURITY
if(local_writer->getAttributes().security_attributes().is_discovery_protected)
{
writer = &sedp_builtin_publications_secure_writer_;
}
#endif
if(writer->first !=nullptr)
{
CacheChange_t* change = writer->first->new_change([]() -> uint32_t {return DISCOVERY_PUBLICATION_DATA_MAX_SIZE;},
ALIVE, wdata->key());
if(change != nullptr)
{
//wdata->toParameterList();
CDRMessage_t aux_msg(change->serializedPayload);
#if __BIG_ENDIAN__
change->serializedPayload.encapsulation = (uint16_t)PL_CDR_BE;
aux_msg.msg_endian = BIGEND;
#else
change->serializedPayload.encapsulation = (uint16_t)PL_CDR_LE;
aux_msg.msg_endian = LITTLEEND;
#endif
ParameterList_t parameter_list = wdata->toParameterList();
ParameterList::writeParameterListToCDRMsg(&aux_msg, ¶meter_list, true);
change->serializedPayload.length = (uint16_t)aux_msg.length;
{
std::unique_lock<std::recursive_mutex> lock(*writer->second->getMutex());
for(auto ch = writer->second->changesBegin(); ch != writer->second->changesEnd(); ++ch)
{
if((*ch)->instanceHandle == change->instanceHandle)
{
writer->second->remove_change(*ch);
break;
}
}
}
writer->second->add_change(change);
return true;
}
return false;
}
return true;
}
bool EDPSimple::removeLocalWriter(RTPSWriter* W)
{
logInfo(RTPS_EDP,W->getGuid().entityId);
auto* writer = &mp_PubWriter;
#if HAVE_SECURITY
if(W->getAttributes().security_attributes().is_discovery_protected)
{
writer = &sedp_builtin_publications_secure_writer_;
}
#endif
if(writer->first!=nullptr)
{
InstanceHandle_t iH;
iH = W->getGuid();
CacheChange_t* change = writer->first->new_change([]() -> uint32_t {return DISCOVERY_PUBLICATION_DATA_MAX_SIZE;},
NOT_ALIVE_DISPOSED_UNREGISTERED,iH);
if(change != nullptr)
{
{
std::lock_guard<std::recursive_mutex> guard(*writer->second->getMutex());
for(auto ch = writer->second->changesBegin(); ch != writer->second->changesEnd(); ++ch)
{
if((*ch)->instanceHandle == change->instanceHandle)
{
writer->second->remove_change(*ch);
break;
}
}
}
writer->second->add_change(change);
}
}
return mp_PDP->removeWriterProxyData(W->getGuid());
}
bool EDPSimple::removeLocalReader(RTPSReader* R)
{
logInfo(RTPS_EDP,R->getGuid().entityId);
auto* writer = &mp_SubWriter;
#if HAVE_SECURITY
if(R->getAttributes().security_attributes().is_discovery_protected)
{
writer = &sedp_builtin_subscriptions_secure_writer_;
}
#endif
if(writer->first!=nullptr)
{
InstanceHandle_t iH;
iH = (R->getGuid());
CacheChange_t* change = writer->first->new_change([]() -> uint32_t {return DISCOVERY_SUBSCRIPTION_DATA_MAX_SIZE;},
NOT_ALIVE_DISPOSED_UNREGISTERED,iH);
if(change != nullptr)
{
{
std::lock_guard<std::recursive_mutex> guard(*writer->second->getMutex());
for(auto ch = writer->second->changesBegin(); ch != writer->second->changesEnd(); ++ch)
{
if((*ch)->instanceHandle == change->instanceHandle)
{
writer->second->remove_change(*ch);
break;
}
}
}
writer->second->add_change(change);
}
}
return mp_PDP->removeReaderProxyData(R->getGuid());
}
void EDPSimple::assignRemoteEndpoints(const ParticipantProxyData& pdata)
{
logInfo(RTPS_EDP,"New DPD received, adding remote endpoints to our SimpleEDP endpoints");
uint32_t endp = pdata.m_availableBuiltinEndpoints;
uint32_t auxendp = endp;
auxendp &=DISC_BUILTIN_ENDPOINT_PUBLICATION_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_PubReader.first!=nullptr) //Exist Pub Writer and i have pub reader
{
logInfo(RTPS_EDP,"Adding SEDP Pub Writer to my Pub Reader");
RemoteWriterAttributes watt(pdata.m_VendorId);
watt.guid.guidPrefix = pdata.m_guid.guidPrefix;
watt.guid.entityId = c_EntityId_SEDPPubWriter;
watt.endpoint.persistence_guid = watt.guid;
watt.endpoint.unicastLocatorList = pdata.m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = pdata.m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
mp_PubReader.first->matched_writer_add(watt);
}
auxendp = endp;
auxendp &=DISC_BUILTIN_ENDPOINT_PUBLICATION_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_PubWriter.first!=nullptr) //Exist Pub Detector
{
logInfo(RTPS_EDP,"Adding SEDP Pub Reader to my Pub Writer");
RemoteReaderAttributes ratt(pdata.m_VendorId);
ratt.expectsInlineQos = false;
ratt.guid.guidPrefix = pdata.m_guid.guidPrefix;
ratt.guid.entityId = c_EntityId_SEDPPubReader;
ratt.endpoint.unicastLocatorList = pdata.m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = pdata.m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
mp_PubWriter.first->matched_reader_add(ratt);
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_SubReader.first!=nullptr) //Exist Pub Announcer
{
logInfo(RTPS_EDP,"Adding SEDP Sub Writer to my Sub Reader");
RemoteWriterAttributes watt(pdata.m_VendorId);
watt.guid.guidPrefix = pdata.m_guid.guidPrefix;
watt.guid.entityId = c_EntityId_SEDPSubWriter;
watt.endpoint.persistence_guid = watt.guid;
watt.endpoint.unicastLocatorList = pdata.m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = pdata.m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
mp_SubReader.first->matched_writer_add(watt);
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_SubWriter.first!=nullptr) //Exist Pub Announcer
{
logInfo(RTPS_EDP,"Adding SEDP Sub Reader to my Sub Writer");
RemoteReaderAttributes ratt(pdata.m_VendorId);
ratt.expectsInlineQos = false;
ratt.guid.guidPrefix = pdata.m_guid.guidPrefix;
ratt.guid.entityId = c_EntityId_SEDPSubReader;
ratt.endpoint.unicastLocatorList = pdata.m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = pdata.m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
mp_SubWriter.first->matched_reader_add(ratt);
}
#if HAVE_SECURITY
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_publications_secure_reader_.first != nullptr)
{
WriterProxyData watt;
watt.guid().guidPrefix = pdata.m_guid.guidPrefix;
watt.guid().entityId = sedp_builtin_publications_secure_writer;
watt.persistence_guid(watt.guid());
watt.unicastLocatorList(pdata.m_metatrafficUnicastLocatorList);
watt.multicastLocatorList(pdata.m_metatrafficMulticastLocatorList);
watt.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
watt.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS;
if(!mp_RTPSParticipant->security_manager().discovered_builtin_writer(
sedp_builtin_publications_secure_reader_.first->getGuid(), pdata.m_guid, watt,
sedp_builtin_publications_secure_reader_.first->getAttributes().security_attributes()))
{
logError(RTPS_EDP, "Security manager returns an error for writer " <<
sedp_builtin_publications_secure_reader_.first->getGuid());
}
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_publications_secure_writer_.first!=nullptr)
{
ReaderProxyData ratt;
ratt.m_expectsInlineQos = false;
ratt.guid().guidPrefix = pdata.m_guid.guidPrefix;
ratt.guid().entityId = sedp_builtin_publications_secure_reader;
ratt.unicastLocatorList(pdata.m_metatrafficUnicastLocatorList);
ratt.multicastLocatorList(pdata.m_metatrafficMulticastLocatorList);
ratt.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS;
ratt.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
if(!mp_RTPSParticipant->security_manager().discovered_builtin_reader(
sedp_builtin_publications_secure_writer_.first->getGuid(), pdata.m_guid, ratt,
sedp_builtin_publications_secure_writer_.first->getAttributes().security_attributes()))
{
logError(RTPS_EDP, "Security manager returns an error for writer " <<
sedp_builtin_publications_secure_writer_.first->getGuid());
}
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_subscriptions_secure_reader_.first != nullptr)
{
WriterProxyData watt;
watt.guid().guidPrefix = pdata.m_guid.guidPrefix;
watt.guid().entityId = sedp_builtin_subscriptions_secure_writer;
watt.persistence_guid(watt.guid());
watt.unicastLocatorList(pdata.m_metatrafficUnicastLocatorList);
watt.multicastLocatorList(pdata.m_metatrafficMulticastLocatorList);
watt.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
watt.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS;
if(!mp_RTPSParticipant->security_manager().discovered_builtin_writer(
sedp_builtin_subscriptions_secure_reader_.first->getGuid(), pdata.m_guid, watt,
sedp_builtin_subscriptions_secure_reader_.first->getAttributes().security_attributes()))
{
logError(RTPS_EDP, "Security manager returns an error for writer " <<
sedp_builtin_subscriptions_secure_reader_.first->getGuid());
}
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_subscriptions_secure_writer_.first!=nullptr)
{
logInfo(RTPS_EDP,"Adding SEDP Sub Reader to my Sub Writer");
ReaderProxyData ratt;
ratt.m_expectsInlineQos = false;
ratt.guid().guidPrefix = pdata.m_guid.guidPrefix;
ratt.guid().entityId = sedp_builtin_subscriptions_secure_reader;
ratt.unicastLocatorList(pdata.m_metatrafficUnicastLocatorList);
ratt.multicastLocatorList(pdata.m_metatrafficMulticastLocatorList);
ratt.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS;
ratt.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS;
if(!mp_RTPSParticipant->security_manager().discovered_builtin_reader(
sedp_builtin_subscriptions_secure_writer_.first->getGuid(), pdata.m_guid, ratt,
sedp_builtin_subscriptions_secure_writer_.first->getAttributes().security_attributes()))
{
logError(RTPS_EDP, "Security manager returns an error for writer " <<
sedp_builtin_subscriptions_secure_writer_.first->getGuid());
}
}
#endif
}
void EDPSimple::removeRemoteEndpoints(ParticipantProxyData* pdata)
{
logInfo(RTPS_EDP,"For RTPSParticipant: "<<pdata->m_guid);
uint32_t endp = pdata->m_availableBuiltinEndpoints;
uint32_t auxendp = endp;
auxendp &=DISC_BUILTIN_ENDPOINT_PUBLICATION_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_PubReader.first!=nullptr) //Exist Pub Writer and i have pub reader
{
RemoteWriterAttributes watt;
watt.guid.guidPrefix = pdata->m_guid.guidPrefix;
watt.guid.entityId = c_EntityId_SEDPPubWriter;
watt.endpoint.persistence_guid = watt.guid;
watt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
mp_PubReader.first->matched_writer_remove(watt);
}
auxendp = endp;
auxendp &=DISC_BUILTIN_ENDPOINT_PUBLICATION_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_PubWriter.first!=nullptr) //Exist Pub Detector
{
RemoteReaderAttributes ratt;
ratt.expectsInlineQos = false;
ratt.guid.guidPrefix = pdata->m_guid.guidPrefix;
ratt.guid.entityId = c_EntityId_SEDPPubReader;
ratt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
mp_PubWriter.first->matched_reader_remove(ratt);
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_SubReader.first!=nullptr) //Exist Pub Announcer
{
logInfo(RTPS_EDP,"Adding SEDP Sub Writer to my Sub Reader");
RemoteWriterAttributes watt;
watt.guid.guidPrefix = pdata->m_guid.guidPrefix;
watt.guid.entityId = c_EntityId_SEDPSubWriter;
watt.endpoint.persistence_guid = watt.guid;
watt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
mp_SubReader.first->matched_writer_remove(watt);
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp!=0 && mp_SubWriter.first!=nullptr) //Exist Pub Announcer
{
logInfo(RTPS_EDP,"Adding SEDP Sub Reader to my Sub Writer");
RemoteReaderAttributes ratt;
ratt.expectsInlineQos = false;
ratt.guid.guidPrefix = pdata->m_guid.guidPrefix;
ratt.guid.entityId = c_EntityId_SEDPSubReader;
ratt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
mp_SubWriter.first->matched_reader_remove(ratt);
}
#if HAVE_SECURITY
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_publications_secure_reader_.first != nullptr)
{
RemoteWriterAttributes watt;
watt.guid.guidPrefix = pdata->m_guid.guidPrefix;
watt.guid.entityId = sedp_builtin_publications_secure_writer;
watt.endpoint.persistence_guid = watt.guid;
watt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
if(sedp_builtin_publications_secure_reader_.first->matched_writer_remove(watt))
{
mp_RTPSParticipant->security_manager().remove_writer(
sedp_builtin_publications_secure_reader_.first->getGuid(), pdata->m_guid, watt.guid);
}
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_publications_secure_writer_.first != nullptr)
{
RemoteReaderAttributes ratt;
ratt.guid.guidPrefix = pdata->m_guid.guidPrefix;
ratt.guid.entityId = sedp_builtin_publications_secure_reader;
ratt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
if(sedp_builtin_publications_secure_writer_.first->matched_reader_remove(ratt))
{
mp_RTPSParticipant->security_manager().remove_reader(
sedp_builtin_publications_secure_writer_.first->getGuid(), pdata->m_guid, ratt.guid);
}
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_ANNOUNCER;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_subscriptions_secure_reader_.first != nullptr)
{
logInfo(RTPS_EDP,"Adding SEDP Sub Writer to my Sub Reader");
RemoteWriterAttributes watt;
watt.guid.guidPrefix = pdata->m_guid.guidPrefix;
watt.guid.entityId = sedp_builtin_subscriptions_secure_writer;
watt.endpoint.persistence_guid = watt.guid;
watt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
watt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//watt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
watt.endpoint.reliabilityKind = RELIABLE;
watt.endpoint.durabilityKind = TRANSIENT_LOCAL;
if(sedp_builtin_subscriptions_secure_reader_.first->matched_writer_remove(watt))
{
mp_RTPSParticipant->security_manager().remove_writer(
sedp_builtin_subscriptions_secure_reader_.first->getGuid(), pdata->m_guid, watt.guid);
}
}
auxendp = endp;
auxendp &= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_DETECTOR;
//FIXME: FIX TO NOT FAIL WITH BAD BUILTIN ENDPOINT SET
//auxendp = 1;
if(auxendp != 0 && sedp_builtin_subscriptions_secure_writer_.first!=nullptr)
{
logInfo(RTPS_EDP,"Adding SEDP Sub Reader to my Sub Writer");
RemoteReaderAttributes ratt;
ratt.guid.guidPrefix = pdata->m_guid.guidPrefix;
ratt.guid.entityId = sedp_builtin_subscriptions_secure_reader;
ratt.endpoint.unicastLocatorList = pdata->m_metatrafficUnicastLocatorList;
ratt.endpoint.multicastLocatorList = pdata->m_metatrafficMulticastLocatorList;
//ratt.endpoint.remoteLocatorList = m_discovery.initialPeersList;
ratt.endpoint.durabilityKind = TRANSIENT_LOCAL;
ratt.endpoint.reliabilityKind = RELIABLE;
if(sedp_builtin_subscriptions_secure_writer_.first->matched_reader_remove(ratt))
{
mp_RTPSParticipant->security_manager().remove_reader(
sedp_builtin_subscriptions_secure_writer_.first->getGuid(), pdata->m_guid, ratt.guid);
}
}
#endif
}
#if HAVE_SECURITY
bool EDPSimple::pairing_remote_writer_with_local_builtin_reader_after_security(const GUID_t& local_reader,
const WriterProxyData& remote_writer_data)
{
bool returned_value = false;
if(local_reader.entityId == sedp_builtin_publications_secure_reader)
{
RemoteWriterAttributes attrs = remote_writer_data.toRemoteWriterAttributes();
sedp_builtin_publications_secure_reader_.first->matched_writer_add(attrs);
returned_value = true;
}
else if(local_reader.entityId == sedp_builtin_subscriptions_secure_reader)
{
RemoteWriterAttributes attrs = remote_writer_data.toRemoteWriterAttributes();
sedp_builtin_subscriptions_secure_reader_.first->matched_writer_add(attrs);
returned_value = true;
}
return returned_value;
}
bool EDPSimple::pairing_remote_reader_with_local_builtin_writer_after_security(const GUID_t& local_writer,
const ReaderProxyData& remote_reader_data)
{
bool returned_value = false;
if(local_writer.entityId == sedp_builtin_publications_secure_writer)
{
RemoteReaderAttributes attrs = remote_reader_data.toRemoteReaderAttributes();
sedp_builtin_publications_secure_writer_.first->matched_reader_add(attrs);
returned_value = true;
}
else if(local_writer.entityId == sedp_builtin_subscriptions_secure_writer)
{
RemoteReaderAttributes attrs = remote_reader_data.toRemoteReaderAttributes();
sedp_builtin_subscriptions_secure_writer_.first->matched_reader_add(attrs);
returned_value = true;
}
return returned_value;
}
#endif
} /* namespace rtps */
} /* namespace fastrtps */
} /* namespace eprosima */
| 1 | 13,524 | Maybe use specific values for all the watt.times fields? Even better, have a const for it at the top of the file? | eProsima-Fast-DDS | cpp |
@@ -21,9 +21,10 @@ import inputCore
#: The directory in which liblouis braille tables are located.
TABLES_DIR = r"louis\tables"
+PATTERNS_TABLE = os.path.join(TABLES_DIR, "braille-patterns.cti")
-#: The table file names and information.
-TABLES = (
+#: The braille table file names and information.
+tables = [
# (fileName, displayName, supportsInput),
# Translators: The name of a braille table displayed in the
# braille settings dialog. | 1 | #braille.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2008-2014 NV Access Limited
import itertools
import os
import pkgutil
import wx
import louis
import keyboardHandler
import baseObject
import config
from logHandler import log
import controlTypes
import api
import textInfos
import brailleDisplayDrivers
import inputCore
#: The directory in which liblouis braille tables are located.
TABLES_DIR = r"louis\tables"
#: The table file names and information.
TABLES = (
# (fileName, displayName, supportsInput),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ar-ar-g1.utb", _("Arabic grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ar-fa.utb", _("Farsi grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("as-in-g1.utb", _("Assamese grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("be-in-g1.utb", _("Bengali grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("bg.ctb", _("Bulgarian 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("cy-cy-g1.utb", _("Welsh grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("cy-cy-g2.ctb", _("Welsh grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("cz-cz-g1.utb", _("Czech grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("da-dk-g16.utb", _("Danish 6 dot grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("da-dk-g18.utb", _("Danish 8 dot grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("da-dk-g26.ctb", _("Danish 6 dot grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("da-dk-g28.ctb", _("Danish 8 dot grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("de-de-comp8.ctb", _("German 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("de-de-g0.utb", _("German grade 0"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("de-de-g1.ctb", _("German grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("de-de-g2.ctb", _("German grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-gb-comp8.ctb", _("English (U.K.) 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-gb-g1.utb", _("English (U.K.) grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-GB-g2.ctb", _("English (U.K.) grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-us-comp6.ctb", _("English (U.S.) 6 dot computer braille"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-us-comp8.ctb", _("English (U.S.) 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-us-g1.ctb", _("English (U.S.) grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("en-us-g2.ctb", _("English (U.S.) grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Es-Es-G0.utb", _("Spanish 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("es-g1.ctb", _("Spanish grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("et-g0.utb", _("Estonian grade 0"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ethio-g1.ctb", _("Ethiopic grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("fi.utb", _("Finnish 6 dot"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("fi-fi-8dot.ctb", _("Finnish 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("fr-bfu-comp6.utb", _("French (unified) 6 dot computer braille"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("fr-bfu-comp8.utb", _("French (unified) 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("fr-bfu-g2.ctb", _("French (unified) Grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("fr-ca-g1.utb", _("French (Canada) grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Fr-Ca-g2.ctb", _("French (Canada) grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ga-g1.utb", _("Irish grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ga-g2.ctb", _("Irish grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("gu-in-g1.utb", _("Gujarati grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("gr-gr-g1.utb", _("Greek (Greece) grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("he.ctb", _("Hebrew 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("hi-in-g1.utb", _("Hindi grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("hr.ctb", _("Croatian 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("hu-hu-comp8.ctb", _("Hungarian 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("hu-hu-g1.ctb", _("Hungarian grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("is.ctb", _("Icelandic 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("it-it-comp6.utb", _("Italian 6 dot computer braille"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("it-it-comp8.utb", _("Italian 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ka-in-g1.utb", _("Kannada grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ko-2006-g1.ctb", _("Korean grade 1 (2006)"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ko-2006-g2.ctb", _("Korean grade 2 (2006)"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ko-g1.ctb", _("Korean grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ko-g2.ctb", _("Korean grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ks-in-g1.utb", _("Kashmiri grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Lv-Lv-g1.utb", _("Latvian grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ml-in-g1.utb", _("Malayalam grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("mn-in-g1.utb", _("Manipuri grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("mr-in-g1.utb", _("Marathi grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("nl-BE-g1.ctb", _("Dutch (Belgium) grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("nl-NL-g1.ctb", _("Dutch (Netherlands) grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("no-no.ctb", _("Norwegian 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("No-No-g0.utb", _("Norwegian grade 0"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("No-No-g1.ctb", _("Norwegian grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("No-No-g2.ctb", _("Norwegian grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("No-No-g3.ctb", _("Norwegian grade 3"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("np-in-g1.utb", _("Nepali grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("or-in-g1.utb", _("Oriya grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Pl-Pl-g1.utb", _("Polish grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("pt-pt-comp8.ctb", _("Portuguese 8 dot computer braille"), True),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Pt-Pt-g1.utb", _("Portuguese grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Pt-Pt-g2.ctb", _("Portuguese grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("pu-in-g1.utb", _("Punjabi grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ru-compbrl.ctb", _("Russian braille for computer code"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ru-ru-g1.utb", _("Russian grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("sa-in-g1.utb", _("Sanskrit grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("Se-Se-g1.utb", _("Swedish grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("sk-sk-g1.utb", _("Slovak grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("sl-si-g1.utb", _("Slovene grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("sr-g1.ctb", _("Serbian grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("ta-ta-g1.ctb", _("Tamil grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("te-in-g1.utb", _("Telegu grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("tr.ctb", _("Turkish grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("UEBC-g1.utb", _("Unified English Braille Code grade 1"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("UEBC-g2.ctb", _("Unified English Braille Code grade 2"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("zh-hk.ctb", _("Chinese (Hong Kong, Cantonese)"), False),
# Translators: The name of a braille table displayed in the
# braille settings dialog.
("zh-tw.ctb", _("Chinese (Taiwan, Mandarin)"), False),
)
#: Braille tables that support input (only computer braille tables yet).
INPUT_TABLES = tuple(t for t in TABLES if t[2])
roleLabels = {
# Translators: Displayed in braille for an object which is an
# editable text field.
controlTypes.ROLE_EDITABLETEXT: _("edt"),
# Translators: Displayed in braille for an object which is a
# list.
controlTypes.ROLE_LIST: _("lst"),
# Translators: Displayed in braille for an object which is a
# menu bar.
controlTypes.ROLE_MENUBAR: _("mnubar"),
# Translators: Displayed in braille for an object which is a
# menu.
controlTypes.ROLE_POPUPMENU: _("mnu"),
# Translators: Displayed in braille for an object which is a
# button.
controlTypes.ROLE_BUTTON: _("btn"),
# Translators: Displayed in braille for an object which is a
# check box.
controlTypes.ROLE_CHECKBOX: _("chk"),
# Translators: Displayed in braille for an object which is a
# radio button.
controlTypes.ROLE_RADIOBUTTON: _("rbtn"),
# Translators: Displayed in braille for an object which is a
# combo box.
controlTypes.ROLE_COMBOBOX: _("cbo"),
# Translators: Displayed in braille for an object which is a
# link.
controlTypes.ROLE_LINK: _("lnk"),
# Translators: Displayed in braille for an object which is a
# dialog.
controlTypes.ROLE_DIALOG: _("dlg"),
# Translators: Displayed in braille for an object which is a
# tree view.
controlTypes.ROLE_TREEVIEW: _("tv"),
# Translators: Displayed in braille for an object which is a
# table.
controlTypes.ROLE_TABLE: _("tb"),
# Translators: Displayed in braille for an object which is a
# separator.
controlTypes.ROLE_SEPARATOR: _("-----"),
# Translators: Displayed in braille for an object which is a
# graphic.
controlTypes.ROLE_GRAPHIC: _("gra"),
}
positiveStateLabels = {
# Translators: Displayed in braille when an object (e.g. a check box) is checked.
controlTypes.STATE_CHECKED: _("(x)"),
# Translators: Displayed in braille when an object (e.g. a check box) is half checked.
controlTypes.STATE_HALFCHECKED: _("(-)"),
# Translators: Displayed in braille when an object is selected.
controlTypes.STATE_SELECTED: _("sel"),
# Translators: Displayed in braille when an object has a popup (usually a sub-menu).
controlTypes.STATE_HASPOPUP: _("submnu"),
# Translators: Displayed in braille when an object supports autocompletion.
controlTypes.STATE_AUTOCOMPLETE: _("..."),
# Translators: Displayed in braille when an object (e.g. a tree view item) is expanded.
controlTypes.STATE_EXPANDED: _("-"),
# Translators: Displayed in braille when an object (e.g. a tree view item) is collapsed.
controlTypes.STATE_COLLAPSED: _("+"),
# Translators: Displayed in braille when an object (e.g. an editable text field) is read-only.
controlTypes.STATE_READONLY: _("ro"),
# Translators: Displayed in braille when an object is clickable.
controlTypes.STATE_CLICKABLE: _("clk"),
}
negativeStateLabels = {
# Translators: Displayed in braille when an object (e.g. a check box) is not checked.
controlTypes.STATE_CHECKED: _("( )"),
}
DOT7 = 64
DOT8 = 128
def NVDAObjectHasUsefulText(obj):
import displayModel
role = obj.role
states = obj.states
return (issubclass(obj.TextInfo,displayModel.DisplayModelTextInfo)
or role in (controlTypes.ROLE_EDITABLETEXT, controlTypes.ROLE_TERMINAL)
or controlTypes.STATE_EDITABLE in states
or (role == controlTypes.ROLE_DOCUMENT and controlTypes.STATE_READONLY not in obj.states))
def _getDisplayDriver(name):
return __import__("brailleDisplayDrivers.%s" % name, globals(), locals(), ("brailleDisplayDrivers",)).BrailleDisplayDriver
def getDisplayList():
displayList = []
# The display that should be placed at the end of the list.
lastDisplay = None
for loader, name, isPkg in pkgutil.iter_modules(brailleDisplayDrivers.__path__):
if name.startswith('_'):
continue
try:
display = _getDisplayDriver(name)
except:
log.error("Error while importing braille display driver %s" % name,
exc_info=True)
continue
try:
if display.check():
if display.name == "noBraille":
lastDisplay = (display.name, display.description)
else:
displayList.append((display.name, display.description))
else:
log.debugWarning("Braille display driver %s reports as unavailable, excluding" % name)
except:
log.error("", exc_info=True)
displayList.sort(key=lambda d : d[1].lower())
if lastDisplay:
displayList.append(lastDisplay)
return displayList
class Region(object):
"""A region of braille to be displayed.
Each portion of braille to be displayed is represented by a region.
The region is responsible for retrieving its text and cursor position, translating it into braille cells and handling cursor routing requests relative to its braille cells.
The L{BrailleBuffer} containing this region will call L{update} and expect that L{brailleCells} and L{brailleCursorPos} will be set appropriately.
L{routeTo} will be called to handle a cursor routing request.
"""
def __init__(self):
#: The original, raw text of this region.
self.rawText = ""
#: The position of the cursor in L{rawText}, C{None} if the cursor is not in this region.
#: @type: int
self.cursorPos = None
#: The translated braille representation of this region.
#: @type: [int, ...]
self.brailleCells = []
#: liblouis typeform flags for each character in L{rawText},
#: C{None} if no typeform info.
#: @type: [int, ...]
self.rawTextTypeforms = None
#: A list mapping positions in L{rawText} to positions in L{brailleCells}.
#: @type: [int, ...]
self.rawToBraillePos = []
#: A list mapping positions in L{brailleCells} to positions in L{rawText}.
#: @type: [int, ...]
self.brailleToRawPos = []
#: The position of the cursor in L{brailleCells}, C{None} if the cursor is not in this region.
#: @type: int
self.brailleCursorPos = None
#: Whether to hide all previous regions.
#: @type: bool
self.hidePreviousRegions = False
#: Whether this region should be positioned at the absolute left of the display when focused.
#: @type: bool
self.focusToHardLeft = False
def update(self):
"""Update this region.
Subclasses should extend this to update L{rawText} and L{cursorPos} if necessary.
The base class method handles translation of L{rawText} into braille, placing the result in L{brailleCells}.
Typeform information from L{rawTextTypeforms} is used, if any.
L{rawToBraillePos} and L{brailleToRawPos} are updated according to the translation.
L{brailleCursorPos} is similarly updated based on L{cursorPos}.
@postcondition: L{brailleCells} and L{brailleCursorPos} are updated and ready for rendering.
"""
mode = louis.dotsIO | louis.pass1Only
if config.conf["braille"]["expandAtCursor"] and self.cursorPos is not None:
mode |= louis.compbrlAtCursor
text=unicode(self.rawText).replace('\0','')
braille, self.brailleToRawPos, self.rawToBraillePos, brailleCursorPos = louis.translate(
[os.path.join(TABLES_DIR, config.conf["braille"]["translationTable"]),
"braille-patterns.cti"],
text,
# liblouis mutates typeform if it is a list.
typeform=tuple(self.rawTextTypeforms) if isinstance(self.rawTextTypeforms, list) else self.rawTextTypeforms,
mode=mode, cursorPos=self.cursorPos or 0)
# liblouis gives us back a character string of cells, so convert it to a list of ints.
# For some reason, the highest bit is set, so only grab the lower 8 bits.
self.brailleCells = [ord(cell) & 255 for cell in braille]
# #2466: HACK: liblouis incorrectly truncates trailing spaces from its output in some cases.
# Detect this and add the spaces to the end of the output.
if self.rawText and self.rawText[-1] == " ":
# rawToBraillePos isn't truncated, even though brailleCells is.
# Use this to figure out how long brailleCells should be and thus how many spaces to add.
correctCellsLen = self.rawToBraillePos[-1] + 1
currentCellsLen = len(self.brailleCells)
if correctCellsLen > currentCellsLen:
self.brailleCells.extend((0,) * (correctCellsLen - currentCellsLen))
if self.cursorPos is not None:
# HACK: The cursorPos returned by liblouis is notoriously buggy (#2947 among other issues).
# rawToBraillePos is usually accurate.
try:
brailleCursorPos = self.rawToBraillePos[self.cursorPos]
except IndexError:
pass
else:
brailleCursorPos = None
self.brailleCursorPos = brailleCursorPos
def routeTo(self, braillePos):
"""Handle a cursor routing request.
For example, this might activate an object or move the cursor to the requested position.
@param braillePos: The routing position in L{brailleCells}.
@type braillePos: int
@note: If routing the cursor, L{brailleToRawPos} can be used to translate L{braillePos} into a position in L{rawText}.
"""
def nextLine(self):
"""Move to the next line if possible.
"""
def previousLine(self, start=False):
"""Move to the previous line if possible.
@param start: C{True} to move to the start of the line, C{False} to move to the end.
@type start: bool
"""
class TextRegion(Region):
"""A simple region containing a string of text.
"""
def __init__(self, text):
super(TextRegion, self).__init__()
self.rawText = text
def getBrailleTextForProperties(**propertyValues):
textList = []
name = propertyValues.get("name")
if name:
textList.append(name)
role = propertyValues.get("role")
states = propertyValues.get("states")
positionInfo = propertyValues.get("positionInfo")
level = positionInfo.get("level") if positionInfo else None
cellCoordsText=propertyValues.get('cellCoordsText')
rowNumber = propertyValues.get("rowNumber")
columnNumber = propertyValues.get("columnNumber")
includeTableCellCoords = propertyValues.get("includeTableCellCoords", True)
if role is not None:
if role == controlTypes.ROLE_HEADING and level:
# Translators: Displayed in braille for a heading with a level.
# %s is replaced with the level.
roleText = _("h%s") % level
level = None
elif role == controlTypes.ROLE_LINK and states and controlTypes.STATE_VISITED in states:
states = states.copy()
states.discard(controlTypes.STATE_VISITED)
# Translators: Displayed in braille for a link which has been visited.
roleText = _("vlnk")
elif (name or cellCoordsText or rowNumber or columnNumber) and role in controlTypes.silentRolesOnFocus:
roleText = None
else:
roleText = roleLabels.get(role, controlTypes.roleLabels[role])
else:
role = propertyValues.get("_role")
roleText = None
value = propertyValues.get("value")
if value and role not in controlTypes.silentValuesForRoles:
textList.append(value)
if states:
positiveStates = controlTypes.processPositiveStates(role, states, controlTypes.REASON_FOCUS, states)
textList.extend(positiveStateLabels.get(state, controlTypes.stateLabels[state]) for state in positiveStates)
negativeStates = controlTypes.processNegativeStates(role, states, controlTypes.REASON_FOCUS, None)
textList.extend(negativeStateLabels.get(state, controlTypes.negativeStateLabels.get(state, _("not %s") % controlTypes.stateLabels[state])) for state in negativeStates)
if roleText:
textList.append(roleText)
description = propertyValues.get("description")
if description:
textList.append(description)
keyboardShortcut = propertyValues.get("keyboardShortcut")
if keyboardShortcut:
textList.append(keyboardShortcut)
if positionInfo:
indexInGroup = positionInfo.get("indexInGroup")
similarItemsInGroup = positionInfo.get("similarItemsInGroup")
if indexInGroup and similarItemsInGroup:
# Translators: Brailled to indicate the position of an item in a group of items (such as a list).
# {number} is replaced with the number of the item in the group.
# {total} is replaced with the total number of items in the group.
textList.append(_("{number} of {total}").format(number=indexInGroup, total=similarItemsInGroup))
if level is not None:
# Translators: Displayed in braille when an object (e.g. a tree view item) has a hierarchical level.
# %s is replaced with the level.
textList.append(_('lv %s')%positionInfo['level'])
if rowNumber:
if includeTableCellCoords and not cellCoordsText:
# Translators: Displayed in braille for a table cell row number.
# %s is replaced with the row number.
textList.append(_("r%s") % rowNumber)
if columnNumber:
columnHeaderText = propertyValues.get("columnHeaderText")
if columnHeaderText:
textList.append(columnHeaderText)
if includeTableCellCoords and not cellCoordsText:
# Translators: Displayed in braille for a table cell column number.
# %s is replaced with the column number.
textList.append(_("c%s") % columnNumber)
if includeTableCellCoords and cellCoordsText:
textList.append(cellCoordsText)
return " ".join([x for x in textList if x])
class NVDAObjectRegion(Region):
"""A region to provide a braille representation of an NVDAObject.
This region will update based on the current state of the associated NVDAObject.
A cursor routing request will activate the object's default action.
"""
def __init__(self, obj, appendText=""):
"""Constructor.
@param obj: The associated NVDAObject.
@type obj: L{NVDAObjects.NVDAObject}
@param appendText: Text which should always be appended to the NVDAObject text, useful if this region will always precede other regions.
@type appendText: str
"""
super(NVDAObjectRegion, self).__init__()
self.obj = obj
self.appendText = appendText
def update(self):
obj = self.obj
presConfig = config.conf["presentation"]
role = obj.role
text = getBrailleTextForProperties(name=obj.name, role=role,
value=obj.value if not NVDAObjectHasUsefulText(obj) else None ,
states=obj.states,
description=obj.description if presConfig["reportObjectDescriptions"] else None,
keyboardShortcut=obj.keyboardShortcut if presConfig["reportKeyboardShortcuts"] else None,
positionInfo=obj.positionInfo if presConfig["reportObjectPositionInformation"] else None,
cellCoordsText=obj.cellCoordsText if config.conf["documentFormatting"]["reportTableCellCoords"] else None,
)
if role == controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.brailleProvider:
try:
text += " " + mathPres.brailleProvider.getBrailleForMathMl(
obj.mathMl)
except (NotImplementedError, LookupError):
pass
self.rawText = text + self.appendText
super(NVDAObjectRegion, self).update()
def routeTo(self, braillePos):
try:
self.obj.doAction()
except NotImplementedError:
pass
def getControlFieldBraille(info, field, ancestors, reportStart, formatConfig):
presCat = field.getPresentationCategory(ancestors, formatConfig)
# Cache this for later use.
field._presCat = presCat
if reportStart:
# If this is a container, only report it if this is the start of the node.
if presCat == field.PRESCAT_CONTAINER and not field.get("_startOfNode"):
return None
else:
# We only report ends for containers
# and only if this is the end of the node.
if presCat != field.PRESCAT_CONTAINER or not field.get("_endOfNode"):
return None
role = field.get("role", controlTypes.ROLE_UNKNOWN)
states = field.get("states", set())
value=field.get('value',None)
if presCat == field.PRESCAT_LAYOUT:
# The only item we report for these fields is clickable, if present.
if controlTypes.STATE_CLICKABLE in states:
return getBrailleTextForProperties(states={controlTypes.STATE_CLICKABLE})
return None
elif role in (controlTypes.ROLE_TABLECELL, controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_TABLEROWHEADER) and field.get("table-id"):
# Table cell.
reportTableHeaders = formatConfig["reportTableHeaders"]
reportTableCellCoords = formatConfig["reportTableCellCoords"]
props = {
"states": states,
"rowNumber": field.get("table-rownumber"),
"columnNumber": field.get("table-columnnumber"),
"includeTableCellCoords": reportTableCellCoords
}
if reportTableHeaders:
props["columnHeaderText"] = field.get("table-columnheadertext")
return getBrailleTextForProperties(**props)
elif reportStart:
props = {
# Don't report the role for math here.
# However, we still need to pass it (hence "_role").
"_role" if role == controlTypes.ROLE_MATH else "role": role,
"states": states,"value":value}
if config.conf["presentation"]["reportKeyboardShortcuts"]:
kbShortcut = field.get("keyboardShortcut")
if kbShortcut:
props["keyboardShortcut"] = kbShortcut
level = field.get("level")
if level:
props["positionInfo"] = {"level": level}
text = getBrailleTextForProperties(**props)
if role == controlTypes.ROLE_MATH:
import mathPres
mathPres.ensureInit()
if mathPres.brailleProvider:
try:
if text:
text += " "
text += mathPres.brailleProvider.getBrailleForMathMl(
info.getMathMl(field))
except (NotImplementedError, LookupError):
pass
return text
else:
# Translators: Displayed in braille at the end of a control field such as a list or table.
# %s is replaced with the control's role.
return (_("%s end") %
getBrailleTextForProperties(role=role))
def getFormatFieldBraille(field):
linePrefix = field.get("line-prefix")
if linePrefix:
return linePrefix
return None
class TextInfoRegion(Region):
pendingCaretUpdate=False #: True if the cursor should be updated for this region on the display
def __init__(self, obj):
super(TextInfoRegion, self).__init__()
self.obj = obj
def _isMultiline(self):
# A region's object can either be an NVDAObject or a tree interceptor.
# Tree interceptors should always be multiline.
from treeInterceptorHandler import TreeInterceptor
if isinstance(self.obj, TreeInterceptor):
return True
# Terminals are inherently multiline, so they don't have the multiline state.
return (self.obj.role == controlTypes.ROLE_TERMINAL or controlTypes.STATE_MULTILINE in self.obj.states)
def _getCursor(self):
"""Retrieve the collapsed cursor.
This should be the start or end of the selection returned by L{_getSelection}.
@return: The cursor.
"""
try:
return self.obj.makeTextInfo(textInfos.POSITION_CARET)
except:
return self.obj.makeTextInfo(textInfos.POSITION_FIRST)
def _getSelection(self):
"""Retrieve the selection.
The start or end of this should be the cursor returned by L{_getCursor}.
@return: The selection.
@rtype: L{textInfos.TextInfo}
"""
try:
return self.obj.makeTextInfo(textInfos.POSITION_SELECTION)
except:
return self.obj.makeTextInfo(textInfos.POSITION_FIRST)
def _setCursor(self, info):
"""Set the cursor.
@param info: The range to which the cursor should be moved.
@type info: L{textInfos.TextInfo}
"""
try:
info.updateCaret()
except NotImplementedError:
log.debugWarning("", exc_info=True)
def _getTypeformFromFormatField(self, field):
typeform = louis.plain_text
if field.get("bold", False):
typeform |= louis.bold
if field.get("italic", False):
typeform |= louis.italic
if field.get("underline", False):
typeform |= louis.underline
return typeform
def _addFieldText(self, text, contentPos):
if self.rawText:
# Separate this field text from the rest of the text.
text = " " + text
self.rawText += text
textLen = len(text)
self.rawTextTypeforms.extend((louis.plain_text,) * textLen)
self._rawToContentPos.extend((contentPos,) * textLen)
def _addTextWithFields(self, info, formatConfig, isSelection=False):
shouldMoveCursorToFirstContent = not isSelection and self.cursorPos is not None
ctrlFields = []
typeform = louis.plain_text
for command in info.getTextWithFields(formatConfig=formatConfig):
if isinstance(command, basestring):
if not command:
continue
if self._endsWithField:
# The last item added was a field,
# so add a space before the content.
self.rawText += " "
self.rawTextTypeforms.append(louis.plain_text)
self._rawToContentPos.append(self._currentContentPos)
if isSelection and self._selectionStart is None:
# This is where the content begins.
self._selectionStart = len(self.rawText)
elif shouldMoveCursorToFirstContent:
# This is the first piece of content after the cursor.
# Position the cursor here, as it may currently be positioned on control field text.
self.cursorPos = len(self.rawText)
shouldMoveCursorToFirstContent = False
self.rawText += command
commandLen = len(command)
self.rawTextTypeforms.extend((typeform,) * commandLen)
endPos = self._currentContentPos + commandLen
self._rawToContentPos.extend(xrange(self._currentContentPos, endPos))
self._currentContentPos = endPos
if isSelection:
# The last time this is set will be the end of the content.
self._selectionEnd = len(self.rawText)
self._endsWithField = False
elif isinstance(command, textInfos.FieldCommand):
cmd = command.command
field = command.field
if cmd == "formatChange":
typeform = self._getTypeformFromFormatField(field)
text = getFormatFieldBraille(field)
if not text:
continue
# Map this field text to the start of the field's content.
self._addFieldText(text, self._currentContentPos)
elif cmd == "controlStart":
if self._skipFieldsNotAtStartOfNode and not field.get("_startOfNode"):
text = None
else:
text = info.getControlFieldBraille(field, ctrlFields, True, formatConfig)
# Place this field on a stack so we can access it for controlEnd.
ctrlFields.append(field)
if not text:
continue
if getattr(field, "_presCat") == field.PRESCAT_MARKER:
# In this case, the field text is what the user cares about,
# not the actual content.
fieldStart = len(self.rawText)
if fieldStart > 0:
# There'll be a space before the field text.
fieldStart += 1
if isSelection and self._selectionStart is None:
self._selectionStart = fieldStart
elif shouldMoveCursorToFirstContent:
self.cursorPos = fieldStart
shouldMoveCursorToFirstContent = False
# Map this field text to the start of the field's content.
self._addFieldText(text, self._currentContentPos)
elif cmd == "controlEnd":
field = ctrlFields.pop()
text = info.getControlFieldBraille(field, ctrlFields, False, formatConfig)
if not text:
continue
# Map this field text to the end of the field's content.
self._addFieldText(text, self._currentContentPos - 1)
self._endsWithField = True
if isSelection and self._selectionStart is None:
# There is no selection. This is a cursor.
self.cursorPos = len(self.rawText)
if not self._skipFieldsNotAtStartOfNode:
# We only render fields that aren't at the start of their nodes for the first part of the reading unit.
# Otherwise, we'll render fields that have already been rendered.
self._skipFieldsNotAtStartOfNode = True
def _getReadingUnit(self):
return textInfos.UNIT_PARAGRAPH if config.conf["braille"]["readByParagraph"] else textInfos.UNIT_LINE
def update(self):
formatConfig = config.conf["documentFormatting"]
unit = self._getReadingUnit()
# HACK: Some TextInfos only support UNIT_LINE properly if they are based on POSITION_CARET,
# so use the original cursor TextInfo for line and copy for cursor.
self._readingInfo = readingInfo = self._getCursor()
cursor = readingInfo.copy()
# Get the reading unit at the cursor.
readingInfo.expand(unit)
# Get the selection.
sel = self._getSelection()
# Restrict the selection to the reading unit at the cursor.
if sel.compareEndPoints(readingInfo, "startToStart") < 0:
sel.setEndPoint(readingInfo, "startToStart")
if sel.compareEndPoints(readingInfo, "endToEnd") > 0:
sel.setEndPoint(readingInfo, "endToEnd")
self.rawText = ""
self.rawTextTypeforms = []
self.cursorPos = None
# The output includes text representing fields which isn't part of the real content in the control.
# Therefore, maintain a map of positions in the output to positions in the content.
self._rawToContentPos = []
self._currentContentPos = 0
self._selectionStart = self._selectionEnd = None
self._skipFieldsNotAtStartOfNode = False
self._endsWithField = False
# Not all text APIs support offsets, so we can't always get the offset of the selection relative to the start of the reading unit.
# Therefore, grab the reading unit in three parts.
# First, the chunk from the start of the reading unit to the start of the selection.
chunk = readingInfo.copy()
chunk.collapse()
chunk.setEndPoint(sel, "endToStart")
self._addTextWithFields(chunk, formatConfig)
# Now, the selection itself.
self._addTextWithFields(sel, formatConfig, isSelection=True)
# Finally, get the chunk from the end of the selection to the end of the reading unit.
chunk.setEndPoint(readingInfo, "endToEnd")
chunk.setEndPoint(sel, "startToEnd")
self._addTextWithFields(chunk, formatConfig)
# Strip line ending characters.
self.rawText = self.rawText.rstrip("\r\n\0\v\f")
rawTextLen = len(self.rawText)
if rawTextLen < len(self._rawToContentPos):
# The stripped text is shorter than the original.
self._currentContentPos = self._rawToContentPos[rawTextLen]
del self.rawTextTypeforms[rawTextLen:]
# Trimming _rawToContentPos doesn't matter,
# because we'll only ever ask for indexes valid in rawText.
#del self._rawToContentPos[rawTextLen:]
if rawTextLen == 0 or not self._endsWithField:
# There is no text left after stripping line ending characters,
# or the last item added can be navigated with a cursor.
# Add a space in case the cursor is at the end of the reading unit.
self.rawText += " "
rawTextLen += 1
self.rawTextTypeforms.append(louis.plain_text)
self._rawToContentPos.append(self._currentContentPos)
if self.cursorPos is not None and self.cursorPos >= rawTextLen:
self.cursorPos = rawTextLen - 1
# If this is not the start of the object, hide all previous regions.
start = cursor.obj.makeTextInfo(textInfos.POSITION_FIRST)
self.hidePreviousRegions = (start.compareEndPoints(readingInfo, "startToStart") < 0)
# If this is a multiline control, position it at the absolute left of the display when focused.
self.focusToHardLeft = self._isMultiline()
super(TextInfoRegion, self).update()
if self._selectionStart is not None:
# Mark the selection with dots 7 and 8.
if self._selectionEnd >= len(self.rawText):
brailleSelEnd = len(self.brailleCells)
else:
brailleSelEnd = self.rawToBraillePos[self._selectionEnd]
for pos in xrange(self.rawToBraillePos[self._selectionStart], brailleSelEnd):
self.brailleCells[pos] |= DOT7 | DOT8
def routeTo(self, braillePos):
if braillePos == self.brailleCursorPos:
# The cursor is already at this position,
# so activate the position.
try:
self._getCursor().activate()
except NotImplementedError:
pass
return
pos = self._rawToContentPos[self.brailleToRawPos[braillePos]]
# pos is relative to the start of the reading unit.
# Therefore, get the start of the reading unit...
dest = self._readingInfo.copy()
dest.collapse()
# and move pos characters from there.
dest.move(textInfos.UNIT_CHARACTER, pos)
self._setCursor(dest)
def nextLine(self):
dest = self._readingInfo.copy()
moved = dest.move(self._getReadingUnit(), 1)
if not moved:
return
dest.collapse()
self._setCursor(dest)
def previousLine(self, start=False):
dest = self._readingInfo.copy()
dest.collapse()
if start:
unit = self._getReadingUnit()
else:
# If the end of the reading unit is desired, move to the last character.
unit = textInfos.UNIT_CHARACTER
moved = dest.move(unit, -1)
if not moved:
return
dest.collapse()
self._setCursor(dest)
class CursorManagerRegion(TextInfoRegion):
def _isMultiline(self):
return True
def _getSelection(self):
return self.obj.selection
def _setCursor(self, info):
self.obj.selection = info
class ReviewTextInfoRegion(TextInfoRegion):
def _getCursor(self):
return api.getReviewPosition().copy()
_getSelection = _getCursor
def _setCursor(self, info):
api.setReviewPosition(info)
def rindex(seq, item, start, end):
for index in xrange(end - 1, start - 1, -1):
if seq[index] == item:
return index
raise ValueError("%r is not in sequence" % item)
class BrailleBuffer(baseObject.AutoPropertyObject):
def __init__(self, handler):
self.handler = handler
#: The regions in this buffer.
#: @type: [L{Region}, ...]
self.regions = []
#: The position of the cursor in L{brailleCells}, C{None} if no region contains the cursor.
#: @type: int
self.cursorPos = None
#: The translated braille representation of the entire buffer.
#: @type: [int, ...]
self.brailleCells = []
#: The position in L{brailleCells} where the display window starts (inclusive).
#: @type: int
self.windowStartPos = 0
def clear(self):
"""Clear the entire buffer.
This removes all regions and resets the window position to 0.
"""
self.regions = []
self.cursorPos = None
self.brailleCursorPos = None
self.brailleCells = []
self.windowStartPos = 0
def _get_visibleRegions(self):
if not self.regions:
return
if self.regions[-1].hidePreviousRegions:
yield self.regions[-1]
return
for region in self.regions:
yield region
def _get_regionsWithPositions(self):
start = 0
for region in self.visibleRegions:
end = start + len(region.brailleCells)
yield region, start, end
start = end
def bufferPosToRegionPos(self, bufferPos):
for region, start, end in self.regionsWithPositions:
if end > bufferPos:
return region, bufferPos - start
raise LookupError("No such position")
def regionPosToBufferPos(self, region, pos, allowNearest=False):
for testRegion, start, end in self.regionsWithPositions:
if region == testRegion:
if pos < end - start:
# The requested position is still valid within the region.
return start + pos
elif allowNearest:
# The position within the region isn't valid,
# but the region is valid, so return its start.
return start
break
if allowNearest:
# Resort to the start of the last region.
return start
raise LookupError("No such position")
def bufferPosToWindowPos(self, bufferPos):
if not (self.windowStartPos <= bufferPos < self.windowEndPos):
raise LookupError("Buffer position not in window")
return bufferPos - self.windowStartPos
def _get_windowEndPos(self):
endPos = self.windowStartPos + self.handler.displaySize
cellsLen = len(self.brailleCells)
if endPos >= cellsLen:
return cellsLen
if not config.conf["braille"]["wordWrap"]:
return endPos
try:
# Try not to split words across windows.
# To do this, break after the furthest possible space.
return min(rindex(self.brailleCells, 0, self.windowStartPos, endPos) + 1,
endPos)
except ValueError:
pass
return endPos
def _set_windowEndPos(self, endPos):
startPos = endPos - self.handler.displaySize
# Get the last region currently displayed.
region, regionPos = self.bufferPosToRegionPos(endPos - 1)
if region.focusToHardLeft:
# Only scroll to the start of this region.
restrictPos = endPos - regionPos - 1
else:
restrictPos = 0
if startPos <= restrictPos:
self.windowStartPos = restrictPos
return
if not config.conf["braille"]["wordWrap"]:
self.windowStartPos = startPos
return
try:
# Try not to split words across windows.
# To do this, break after the furthest possible block of spaces.
# Find the start of the first block of spaces.
# Search from 1 cell before in case startPos is just after a space.
startPos = self.brailleCells.index(0, startPos - 1, endPos)
# Skip past spaces.
for startPos in xrange(startPos, endPos):
if self.brailleCells[startPos] != 0:
break
except ValueError:
pass
self.windowStartPos = startPos
def _nextWindow(self):
oldStart = self.windowStartPos
end = self.windowEndPos
if end < len(self.brailleCells):
self.windowStartPos = end
return self.windowStartPos != oldStart
def scrollForward(self):
if not self._nextWindow():
# The window could not be scrolled, so try moving to the next line.
if self.regions:
self.regions[-1].nextLine()
else:
# Scrolling succeeded.
self.updateDisplay()
def _previousWindow(self):
start = self.windowStartPos
if start > 0:
self.windowEndPos = start
return self.windowStartPos != start
def scrollBack(self):
if not self._previousWindow():
# The window could not be scrolled, so try moving to the previous line.
if self.regions:
self.regions[-1].previousLine()
else:
# Scrolling succeeded.
self.updateDisplay()
def scrollTo(self, region, pos):
pos = self.regionPosToBufferPos(region, pos)
while pos >= self.windowEndPos:
if not self._nextWindow():
break
while pos < self.windowStartPos:
if not self._previousWindow():
break
self.updateDisplay()
def focus(self, region):
"""Bring the specified region into focus.
The region is placed at the start of the display.
However, if the region has not set L{Region.focusToHardLeft} and there is extra space at the end of the display, the display is scrolled left so that as much as possible is displayed.
@param region: The region to focus.
@type region: L{Region}
"""
pos = self.regionPosToBufferPos(region, 0)
self.windowStartPos = pos
if region.focusToHardLeft:
return
end = self.windowEndPos
if end - pos < self.handler.displaySize:
# We can fit more on the display while still keeping pos visible.
# Force windowStartPos to be recalculated based on windowEndPos.
self.windowEndPos = end
def update(self):
self.brailleCells = []
self.cursorPos = None
start = 0
if log.isEnabledFor(log.IO):
logRegions = []
for region in self.visibleRegions:
if log.isEnabledFor(log.IO):
logRegions.append(region.rawText)
cells = region.brailleCells
self.brailleCells.extend(cells)
if region.brailleCursorPos is not None:
self.cursorPos = start + region.brailleCursorPos
start += len(cells)
if log.isEnabledFor(log.IO):
log.io("Braille regions text: %r" % logRegions)
def updateDisplay(self):
if self is self.handler.buffer:
self.handler.update()
def _get_cursorWindowPos(self):
if self.cursorPos is None:
return None
try:
return self.bufferPosToWindowPos(self.cursorPos)
except LookupError:
return None
def _get_windowBrailleCells(self):
return self.brailleCells[self.windowStartPos:self.windowEndPos]
def routeTo(self, windowPos):
pos = self.windowStartPos + windowPos
if pos >= self.windowEndPos:
return
region, pos = self.bufferPosToRegionPos(pos)
region.routeTo(pos)
def saveWindow(self):
"""Save the current window so that it can be restored after the buffer is updated.
The window start position is saved as a position relative to a region.
This allows it to be restored even after other regions are added, removed or updated.
It can be restored with L{restoreWindow}.
@postcondition: The window is saved and can be restored with L{restoreWindow}.
"""
self._savedWindow = self.bufferPosToRegionPos(self.windowStartPos)
def restoreWindow(self):
"""Restore the window saved by L{saveWindow}.
@precondition: L{saveWindow} has been called.
@postcondition: If the saved position is valid, the window is restored.
Otherwise, the nearest position is restored.
"""
region, pos = self._savedWindow
self.windowStartPos = self.regionPosToBufferPos(region, pos, allowNearest=True)
_cachedFocusAncestorsEnd = 0
def invalidateCachedFocusAncestors(index):
"""Invalidate cached focus ancestors from a given index.
This will cause regions to be generated for the focus ancestors >= index next time L{getFocusContextRegions} is called,
rather than using cached regions for those ancestors.
@param index: The index from which cached focus ancestors should be invalidated.
@type index: int
"""
global _cachedFocusAncestorsEnd
# There could be multiple calls to this function before getFocusContextRegions() is called.
_cachedFocusAncestorsEnd = min(_cachedFocusAncestorsEnd, index)
def getFocusContextRegions(obj, oldFocusRegions=None):
global _cachedFocusAncestorsEnd
# Late import to avoid circular import.
from treeInterceptorHandler import TreeInterceptor
ancestors = api.getFocusAncestors()
ancestorsEnd = len(ancestors)
if isinstance(obj, TreeInterceptor):
obj = obj.rootNVDAObject
# We only want the ancestors of the buffer's root NVDAObject.
if obj != api.getFocusObject():
# Search backwards through the focus ancestors to find the index of obj.
for index, ancestor in itertools.izip(xrange(len(ancestors) - 1, 0, -1), reversed(ancestors)):
if obj == ancestor:
ancestorsEnd = index
break
if oldFocusRegions:
# We have the regions from the previous focus, so use them as a cache to avoid rebuilding regions which are the same.
# We need to generate new regions from _cachedFocusAncestorsEnd onwards.
# However, we must ensure that it is not beyond the last ancestor we wish to consider.
# Also, we don't ever want to fetch ancestor 0 (the desktop).
newAncestorsStart = max(min(_cachedFocusAncestorsEnd, ancestorsEnd), 1)
# Search backwards through the old regions to find the last common region.
for index, region in itertools.izip(xrange(len(oldFocusRegions) - 1, -1, -1), reversed(oldFocusRegions)):
ancestorIndex = getattr(region, "_focusAncestorIndex", None)
if ancestorIndex is None:
continue
if ancestorIndex < newAncestorsStart:
# This is the last common region.
# An ancestor may have been skipped and not have a region, which means that we need to grab new ancestors from this point.
newAncestorsStart = ancestorIndex + 1
commonRegionsEnd = index + 1
break
else:
# No common regions were found.
commonRegionsEnd = 0
newAncestorsStart = 1
# Yield the common regions.
for region in oldFocusRegions[0:commonRegionsEnd]:
yield region
else:
# Fetch all ancestors.
newAncestorsStart = 1
for index, parent in enumerate(ancestors[newAncestorsStart:ancestorsEnd], newAncestorsStart):
if not parent.isPresentableFocusAncestor:
continue
region = NVDAObjectRegion(parent, appendText=" ")
region._focusAncestorIndex = index
region.update()
yield region
_cachedFocusAncestorsEnd = ancestorsEnd
def getFocusRegions(obj, review=False):
# Allow objects to override normal behaviour.
try:
regions = obj.getBrailleRegions(review=review)
except (AttributeError, NotImplementedError):
pass
else:
for region in regions:
region.update()
yield region
return
# Late import to avoid circular import.
from treeInterceptorHandler import TreeInterceptor
from cursorManager import CursorManager
if isinstance(obj, CursorManager):
region2 = (ReviewTextInfoRegion if review else CursorManagerRegion)(obj)
elif isinstance(obj, TreeInterceptor) or NVDAObjectHasUsefulText(obj):
region2 = (ReviewTextInfoRegion if review else TextInfoRegion)(obj)
else:
region2 = None
if isinstance(obj, TreeInterceptor):
obj = obj.rootNVDAObject
region = NVDAObjectRegion(obj, appendText=" " if region2 else "")
region.update()
yield region
if region2:
region2.update()
yield region2
def formatCellsForLog(cells):
"""Formats a sequence of braille cells so that it is suitable for logging.
The output contains the dot numbers for each cell, with each cell separated by a space.
A C{-} indicates an empty cell.
@param cells: The cells to format.
@type cells: sequence of int
@return: The formatted cells.
@rtype: str
"""
# optimisation: This gets called a lot, so needs to be as efficient as possible.
# List comprehensions without function calls are faster than loops.
# For str.join, list comprehensions are faster than generator comprehensions.
return " ".join([
"".join([str(dot + 1) for dot in xrange(8) if cell & (1 << dot)])
if cell else "-"
for cell in cells])
class BrailleHandler(baseObject.AutoPropertyObject):
TETHER_FOCUS = "focus"
TETHER_REVIEW = "review"
cursorShape = 0xc0
def __init__(self):
self.display = None
self.displaySize = 0
self.mainBuffer = BrailleBuffer(self)
self.messageBuffer = BrailleBuffer(self)
self._messageCallLater = None
self.buffer = self.mainBuffer
#: Whether braille is enabled.
#: @type: bool
self.enabled = False
self._keyCountForLastMessage=0
self._cursorPos = None
self._cursorBlinkUp = True
self._cells = []
self._cursorBlinkTimer = None
def terminate(self):
if self._messageCallLater:
self._messageCallLater.Stop()
self._messageCallLater = None
if self._cursorBlinkTimer:
self._cursorBlinkTimer.Stop()
self._cursorBlinkTimer = None
if self.display:
self.display.terminate()
self.display = None
def _get_tether(self):
return config.conf["braille"]["tetherTo"]
def _set_tether(self, tether):
if tether == config.conf["braille"]["tetherTo"]:
return
config.conf["braille"]["tetherTo"] = tether
self.mainBuffer.clear()
if tether == self.TETHER_REVIEW:
self.handleReviewMove()
else:
self.handleGainFocus(api.getFocusObject())
def setDisplayByName(self, name, isFallback=False):
if not name:
self.display = None
self.displaySize = 0
return
# See if the user have defined a specific port to connect to
if name not in config.conf["braille"]:
# No port was set.
config.conf["braille"][name] = {"port" : ""}
port = config.conf["braille"][name].get("port")
# Here we try to keep compatible with old drivers that don't support port setting
# or situations where the user hasn't set any port.
kwargs = {}
if port:
kwargs["port"] = port
try:
newDisplay = _getDisplayDriver(name)
if newDisplay == self.display.__class__:
# This is the same driver as was already set, so just re-initialise it.
self.display.terminate()
newDisplay = self.display
newDisplay.__init__(**kwargs)
else:
newDisplay = newDisplay(**kwargs)
if self.display:
try:
self.display.terminate()
except:
log.error("Error terminating previous display driver", exc_info=True)
self.display = newDisplay
self.displaySize = newDisplay.numCells
self.enabled = bool(self.displaySize)
if not isFallback:
config.conf["braille"]["display"] = name
log.info("Loaded braille display driver %s, current display has %d cells." %(name, self.displaySize))
return True
except:
log.error("Error initializing display driver", exc_info=True)
self.setDisplayByName("noBraille", isFallback=True)
return False
def _updateDisplay(self):
if self._cursorBlinkTimer:
self._cursorBlinkTimer.Stop()
self._cursorBlinkTimer = None
self._cursorBlinkUp = True
self._displayWithCursor()
blinkRate = config.conf["braille"]["cursorBlinkRate"]
if blinkRate and self._cursorPos is not None:
self._cursorBlinkTimer = wx.PyTimer(self._blink)
self._cursorBlinkTimer.Start(blinkRate)
def _displayWithCursor(self):
if not self._cells:
return
cells = list(self._cells)
if self._cursorPos is not None and self._cursorBlinkUp:
cells[self._cursorPos] |= self.cursorShape
self.display.display(cells)
def _blink(self):
self._cursorBlinkUp = not self._cursorBlinkUp
self._displayWithCursor()
def update(self):
cells = self.buffer.windowBrailleCells
if log.isEnabledFor(log.IO):
log.io("Braille window dots: %s" % formatCellsForLog(cells))
# cells might not be the full length of the display.
# Therefore, pad it with spaces to fill the display.
self._cells = cells + [0] * (self.displaySize - len(cells))
self._cursorPos = self.buffer.cursorWindowPos
self._updateDisplay()
def scrollForward(self):
self.buffer.scrollForward()
if self.buffer is self.messageBuffer:
self._resetMessageTimer()
def scrollBack(self):
self.buffer.scrollBack()
if self.buffer is self.messageBuffer:
self._resetMessageTimer()
def routeTo(self, windowPos):
self.buffer.routeTo(windowPos)
if self.buffer is self.messageBuffer:
self._dismissMessage()
def message(self, text):
"""Display a message to the user which times out after a configured interval.
The timeout will be reset if the user scrolls the display.
The message will be dismissed immediately if the user presses a cursor routing key.
If a key is pressed the message will be dismissed by the next text being written to the display
@postcondition: The message is displayed.
"""
if not self.enabled or config.conf["braille"]["messageTimeout"] == 0:
return
if self.buffer is self.messageBuffer:
self.buffer.clear()
else:
self.buffer = self.messageBuffer
region = TextRegion(text)
region.update()
self.buffer.regions.append(region)
self.buffer.update()
self.update()
self._resetMessageTimer()
self._keyCountForLastMessage=keyboardHandler.keyCounter
def _resetMessageTimer(self):
"""Reset the message timeout.
@precondition: A message is currently being displayed.
"""
# Configured timeout is in seconds.
timeout = config.conf["braille"]["messageTimeout"] * 1000
if self._messageCallLater:
self._messageCallLater.Restart(timeout)
else:
self._messageCallLater = wx.CallLater(timeout, self._dismissMessage)
def _dismissMessage(self):
"""Dismiss the current message.
@precondition: A message is currently being displayed.
@postcondition: The display returns to the main buffer.
"""
self.buffer.clear()
self.buffer = self.mainBuffer
self._messageCallLater.Stop()
self._messageCallLater = None
self.update()
def handleGainFocus(self, obj):
if not self.enabled:
return
if self.tether != self.TETHER_FOCUS:
return
self._doNewObject(itertools.chain(getFocusContextRegions(obj, oldFocusRegions=self.mainBuffer.regions), getFocusRegions(obj)))
def _doNewObject(self, regions):
self.mainBuffer.clear()
for region in regions:
self.mainBuffer.regions.append(region)
self.mainBuffer.update()
# Last region should receive focus.
self.mainBuffer.focus(region)
if region.brailleCursorPos is not None:
self.mainBuffer.scrollTo(region, region.brailleCursorPos)
if self.buffer is self.mainBuffer:
self.update()
elif self.buffer is self.messageBuffer and keyboardHandler.keyCounter>self._keyCountForLastMessage:
self._dismissMessage()
def handleCaretMove(self, obj):
if not self.enabled:
return
if self.tether != self.TETHER_FOCUS:
return
if not self.mainBuffer.regions:
return
region = self.mainBuffer.regions[-1]
if region.obj is not obj:
return
region.pendingCaretUpdate=True
def handlePendingCaretUpdate(self):
"""Checks to see if the final text region needs its caret updated and if so calls _doCursorMove for the region."""
region=self.mainBuffer.regions[-1] if self.mainBuffer.regions else None
if isinstance(region,TextInfoRegion) and region.pendingCaretUpdate:
try:
self._doCursorMove(region)
finally:
region.pendingCaretUpdate=False
def _doCursorMove(self, region):
self.mainBuffer.saveWindow()
region.update()
self.mainBuffer.update()
self.mainBuffer.restoreWindow()
if region.brailleCursorPos is not None:
self.mainBuffer.scrollTo(region, region.brailleCursorPos)
if self.buffer is self.mainBuffer:
self.update()
elif self.buffer is self.messageBuffer and keyboardHandler.keyCounter>self._keyCountForLastMessage:
self._dismissMessage()
def handleUpdate(self, obj):
if not self.enabled:
return
# Optimisation: It is very likely that it is the focus object that is being updated.
# If the focus object is in the braille buffer, it will be the last region, so scan the regions backwards.
for region in reversed(list(self.mainBuffer.visibleRegions)):
if hasattr(region, "obj") and region.obj == obj:
break
else:
# No region for this object.
return
self.mainBuffer.saveWindow()
region.update()
self.mainBuffer.update()
self.mainBuffer.restoreWindow()
if self.buffer is self.mainBuffer:
self.update()
elif self.buffer is self.messageBuffer and keyboardHandler.keyCounter>self._keyCountForLastMessage:
self._dismissMessage()
def handleReviewMove(self):
if not self.enabled:
return
if self.tether != self.TETHER_REVIEW:
return
reviewPos = api.getReviewPosition()
region = self.mainBuffer.regions[-1] if self.mainBuffer.regions else None
if region and region.obj == reviewPos.obj:
self._doCursorMove(region)
else:
# We're reviewing a different object.
self._doNewObject(getFocusRegions(reviewPos.obj, review=True))
def handleConfigProfileSwitch(self):
display = config.conf["braille"]["display"]
if display != self.display.name:
self.setDisplayByName(display)
def initialize():
global handler
config.addConfigDirsToPythonPackagePath(brailleDisplayDrivers)
log.info("Using liblouis version %s" % louis.version())
handler = BrailleHandler()
handler.setDisplayByName(config.conf["braille"]["display"])
# Update the display to the current focus/review position.
if not handler.enabled or not api.getDesktopObject():
# Braille is disabled or focus/review hasn't yet been initialised.
return
if handler.tether == handler.TETHER_FOCUS:
handler.handleGainFocus(api.getFocusObject())
else:
handler.handleReviewMove()
def pumpAll():
"""Runs tasks at the end of each core cycle. For now just caret updates."""
handler.handlePendingCaretUpdate()
def terminate():
global handler
handler.terminate()
handler = None
class BrailleDisplayDriver(baseObject.AutoPropertyObject):
"""Abstract base braille display driver.
Each braille display driver should be a separate Python module in the root brailleDisplayDrivers directory containing a BrailleDisplayDriver class which inherits from this base class.
At a minimum, drivers must set L{name} and L{description} and override the L{check} method.
To display braille, L{numCells} and L{display} must be implemented.
Drivers should dispatch input such as presses of buttons, wheels or other controls using the L{inputCore} framework.
They should subclass L{BrailleDisplayGesture} and execute instances of those gestures using L{inputCore.manager.executeGesture}.
These gestures can be mapped in L{gestureMap}.
A driver can also inherit L{baseObject.ScriptableObject} to provide display specific scripts.
"""
#: The name of the braille display; must be the original module file name.
#: @type: str
name = ""
#: A description of the braille display.
#: @type: str
description = ""
@classmethod
def check(cls):
"""Determine whether this braille display is available.
The display will be excluded from the list of available displays if this method returns C{False}.
For example, if this display is not present, C{False} should be returned.
@return: C{True} if this display is available, C{False} if not.
@rtype: bool
"""
return False
def terminate(self):
"""Terminate this display driver.
This will be called when NVDA is finished with this display driver.
It should close any open connections, perform cleanup, etc.
Subclasses should call the superclass method first.
@postcondition: This instance can no longer be used unless it is constructed again.
"""
# Clear the display.
try:
self.display([0] * self.numCells)
except:
# The display driver seems to be failing, but we're terminating anyway, so just ignore it.
pass
def _get_numCells(self):
"""Obtain the number of braille cells on this display.
@note: 0 indicates that braille should be disabled.
@return: The number of cells.
@rtype: int
"""
return 0
def display(self, cells):
"""Display the given braille cells.
@param cells: The braille cells to display.
@type cells: [int, ...]
"""
#: Automatic port constant to be used by braille displays that support the "automatic" port
#: @type: Tupple
# Translators: String representing the automatic port selection for braille displays.
AUTOMATIC_PORT = ("auto", _("Automatic"))
@classmethod
def getPossiblePorts(cls):
""" Returns possible hardware ports for this driver.
If the driver supports automatic port setting it should return as the first port L{brailleDisplayDriver.AUTOMATIC_PORT}
@return: ordered dictionary of name : description for each port
@rtype: OrderedDict
"""
raise NotImplementedError
#: Global input gesture map for this display driver.
#: @type: L{inputCore.GlobalGestureMap}
gestureMap = None
class BrailleDisplayGesture(inputCore.InputGesture):
"""A button, wheel or other control pressed on a braille display.
Subclasses must provide L{source} and L{id}.
L{routingIndex} should be provided for routing buttons.
Subclasses can also inherit from L{brailleInput.BrailleInputGesture} if the display has a braille keyboard.
If the braille display driver is a L{baseObject.ScriptableObject}, it can provide scripts specific to input gestures from this display.
"""
def _get_source(self):
"""The string used to identify all gestures from this display.
This should generally be the driver name.
This string will be included in the source portion of gesture identifiers.
For example, if this was C{alvaBC6},
a display specific gesture identifier might be C{br(alvaBC6):etouch1}.
@rtype: str
"""
raise NotImplementedError
def _get_id(self):
"""The unique, display specific id for this gesture.
@rtype: str
"""
raise NotImplementedError
#: The index of the routing key or C{None} if this is not a routing key.
#: @type: int
routingIndex = None
def _get_identifiers(self):
ids = [u"br({source}):{id}".format(source=self.source, id=self.id).lower()]
import brailleInput
if isinstance(self, brailleInput.BrailleInputGesture):
ids.extend(brailleInput.BrailleInputGesture._get_identifiers(self))
return ids
def _get_displayName(self):
import brailleInput
if isinstance(self, brailleInput.BrailleInputGesture):
name = brailleInput.BrailleInputGesture._get_displayName(self)
if name:
return name
return self.id
def _get_scriptableObject(self):
display = handler.display
if isinstance(display, baseObject.ScriptableObject):
return display
return super(BrailleDisplayGesture, self).scriptableObject
@classmethod
def getDisplayTextForIdentifier(cls, identifier):
return handler.display.description, identifier.split(":", 1)[1]
inputCore.registerGestureSource("br", BrailleDisplayGesture)
| 1 | 17,316 | nit: UNICODE_BRAILLE_TABLE or something might be a better name for this. IMO, braille-patterns.cti is a terrible name. This table allows Unicode braille characters to be used anywhere to produce raw dots. | nvaccess-nvda | py |
@@ -104,9 +104,6 @@ type Builder struct {
// MachineNetwork is the subnet to use for the cluster's machine network.
MachineNetwork string
-
- // SkipMachinePoolGeneration is set to skip generating MachinePool objects
- SkipMachinePoolGeneration bool
}
// Validate ensures that the builder's fields are logically configured and usable to generate the cluster resources. | 1 | package clusterresource
import (
"fmt"
"github.com/ghodss/yaml"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
"github.com/openshift/hive/pkg/constants"
"github.com/openshift/installer/pkg/ipnet"
installertypes "github.com/openshift/installer/pkg/types"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
)
const (
deleteAfterAnnotation = "hive.openshift.io/delete-after"
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once"
)
// Builder can be used to build all artifacts required for to create a ClusterDeployment.
type Builder struct {
// Name is the name of your Cluster. Will be used for both the ClusterDeployment.Name and the
// ClusterDeployment.Spec.ClusterName, which encompasses the subdomain and cloud provider resource
// tagging.
Name string
// Namespace where the ClusterDeployment and all associated artifacts will be created.
Namespace string
// Labels are labels to be added to the ClusterDeployment.
Labels map[string]string
// CloudBuilder encapsulates logic for building the objects for a specific cloud.
CloudBuilder CloudBuilder
// PullSecret is the secret to use when pulling images.
PullSecret string
// SSHPrivateKey is an optional SSH key to configure on hosts in the cluster. This would
// typically be read from ~/.ssh/id_rsa.
SSHPrivateKey string
// SSHPublicKey is an optional public SSH key to configure on hosts in the cluster. This would
// typically be read from ~/.ssh/id_rsa.pub. Must match the SSHPrivateKey.
SSHPublicKey string
// InstallOnce indicates that the provision job should not be retried on failure.
InstallOnce bool
// BaseDomain is the DNS base domain to be used for the cluster.
BaseDomain string
// WorkerNodesCount is the number of worker nodes to create in the cluster initially.
WorkerNodesCount int64
// ManageDNS can be set to true to enable Hive's automatic DNS zone creation and forwarding. (assuming
// this is properly configured in HiveConfig)
ManageDNS bool
// DeleteAfter is the duration after which the cluster should be automatically destroyed, relative to
// creationTimestamp. Stored as an annotation on the ClusterDeployment.
DeleteAfter string
// ServingCert is the contents of a serving certificate to be used for the cluster.
ServingCert string
// ServingCertKey is the contents of a key for the ServingCert.
ServingCertKey string
// Adopt is a flag indicating we're adopting a pre-existing cluster.
Adopt bool
// AdoptAdminKubeconfig is a cluster administrator admin kubeconfig typically obtained
// from openshift-install. Required when adopting pre-existing clusters.
AdoptAdminKubeconfig []byte
// AdoptClusterID is the unique generated ID for a cluster being adopted.
// Required when adopting pre-existing clusters.
AdoptClusterID string
// AdoptInfraID is the unique generated infrastructure ID for a cluster being adopted.
// Required when adopting pre-existing clusters.
AdoptInfraID string
// AdoptAdminUsername is the admin username for an adopted cluster, typically written to disk
// after openshift-install create-cluster. This field is optional when adopting.
AdoptAdminUsername string
// AdoptAdminPassword is the admin password for an adopted cluster, typically written to disk
// after openshift-install create-cluster. This field is optional when adopting.
AdoptAdminPassword string
// InstallerManifests is a map of filename strings to bytes for files to inject into the installers
// manifests dir before launching create-cluster.
InstallerManifests map[string][]byte
// ImageSet is the ClusterImageSet to use for this cluster.
ImageSet string
// ReleaseImage is a specific OpenShift release image to install this cluster with. Will override
// ImageSet.
ReleaseImage string
// MachineNetwork is the subnet to use for the cluster's machine network.
MachineNetwork string
// SkipMachinePoolGeneration is set to skip generating MachinePool objects
SkipMachinePoolGeneration bool
}
// Validate ensures that the builder's fields are logically configured and usable to generate the cluster resources.
func (o *Builder) Validate() error {
if len(o.Name) == 0 {
return fmt.Errorf("name is required")
}
if len(o.BaseDomain) == 0 {
return fmt.Errorf("BaseDomain is required")
}
if o.CloudBuilder == nil {
return fmt.Errorf("no CloudBuilder configured for this Builder")
}
if len(o.ImageSet) > 0 && len(o.ReleaseImage) > 0 {
return fmt.Errorf("cannot set both ImageSet and ReleaseImage")
}
if len(o.ImageSet) == 0 && len(o.ReleaseImage) == 0 {
return fmt.Errorf("must set either image set or release image")
}
if len(o.ServingCert) > 0 && len(o.ServingCertKey) == 0 {
return fmt.Errorf("must set serving cert key to use with serving cert")
}
if o.Adopt {
if len(o.AdoptAdminKubeconfig) == 0 || o.AdoptInfraID == "" || o.AdoptClusterID == "" {
return fmt.Errorf("must specify the following fields to adopt a cluster: AdoptAdminKubeConfig AdoptInfraID AdoptClusterID")
}
if (o.AdoptAdminUsername != "" || o.AdoptAdminPassword != "") && !(o.AdoptAdminUsername != "" && o.AdoptAdminPassword != "") {
return fmt.Errorf("either both AdoptAdminPassword and AdoptAdminUsername must be set, or neither")
}
} else {
if len(o.AdoptAdminKubeconfig) > 0 || o.AdoptInfraID != "" || o.AdoptClusterID != "" || o.AdoptAdminUsername != "" || o.AdoptAdminPassword != "" {
return fmt.Errorf("cannot set adoption fields if Adopt is false")
}
}
return nil
}
// Build generates all resources using the fields configured.
func (o *Builder) Build() ([]runtime.Object, error) {
if err := o.Validate(); err != nil {
return nil, err
}
var allObjects []runtime.Object
allObjects = append(allObjects, o.generateClusterDeployment())
if !o.SkipMachinePoolGeneration {
allObjects = append(allObjects, o.generateMachinePool())
}
installConfigSecret, err := o.generateInstallConfigSecret()
if err != nil {
return nil, err
}
allObjects = append(allObjects, installConfigSecret)
// TODO: maintain "include secrets" flag functionality? possible this should just be removed
if len(o.PullSecret) != 0 {
allObjects = append(allObjects, o.generatePullSecretSecret())
}
if o.SSHPrivateKey != "" {
allObjects = append(allObjects, o.generateSSHPrivateKeySecret())
}
if o.ServingCertKey != "" && o.ServingCert != "" {
allObjects = append(allObjects, o.generateServingCertSecret())
}
cloudCredsSecret := o.CloudBuilder.generateCredentialsSecret(o)
if cloudCredsSecret != nil {
allObjects = append(allObjects, cloudCredsSecret)
}
if o.InstallerManifests != nil {
allObjects = append(allObjects, o.generateInstallerManifestsConfigMap())
}
if o.Adopt {
allObjects = append(allObjects, o.generateAdminKubeconfigSecret())
if o.AdoptAdminUsername != "" {
allObjects = append(allObjects, o.generateAdoptedAdminPasswordSecret())
}
}
return allObjects, nil
}
func (o *Builder) generateClusterDeployment() *hivev1.ClusterDeployment {
cd := &hivev1.ClusterDeployment{
TypeMeta: metav1.TypeMeta{
Kind: "ClusterDeployment",
APIVersion: hivev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.Name,
Namespace: o.Namespace,
Annotations: map[string]string{},
Labels: o.Labels,
},
Spec: hivev1.ClusterDeploymentSpec{
ClusterName: o.Name,
BaseDomain: o.BaseDomain,
ManageDNS: o.ManageDNS,
Provisioning: &hivev1.Provisioning{},
},
}
if o.SSHPrivateKey != "" {
cd.Spec.Provisioning.SSHPrivateKeySecretRef = &corev1.LocalObjectReference{Name: o.getSSHPrivateKeySecretName()}
}
if o.InstallOnce {
cd.Annotations[tryInstallOnceAnnotation] = "true"
}
if o.PullSecret != "" {
cd.Spec.PullSecretRef = &corev1.LocalObjectReference{Name: o.getPullSecretSecretName()}
}
if len(o.ServingCert) > 0 {
cd.Spec.CertificateBundles = []hivev1.CertificateBundleSpec{
{
Name: "serving-cert",
CertificateSecretRef: corev1.LocalObjectReference{
Name: fmt.Sprintf("%s-serving-cert", o.Name),
},
},
}
cd.Spec.ControlPlaneConfig.ServingCertificates.Default = "serving-cert"
cd.Spec.Ingress = []hivev1.ClusterIngress{
{
Name: "default",
Domain: fmt.Sprintf("apps.%s.%s", o.Name, o.BaseDomain),
ServingCertificate: "serving-cert",
},
}
}
if o.DeleteAfter != "" {
cd.ObjectMeta.Annotations[deleteAfterAnnotation] = o.DeleteAfter
}
if o.Adopt {
cd.Spec.ClusterMetadata = &hivev1.ClusterMetadata{
ClusterID: o.AdoptClusterID,
InfraID: o.AdoptInfraID,
AdminKubeconfigSecretRef: corev1.LocalObjectReference{Name: o.getAdoptAdminKubeconfigSecretName()},
}
cd.Spec.Installed = true
if o.AdoptAdminUsername != "" {
cd.Spec.ClusterMetadata.AdminPasswordSecretRef = corev1.LocalObjectReference{
Name: o.getAdoptAdminPasswordSecretName(),
}
}
}
if o.InstallerManifests != nil {
cd.Spec.Provisioning.ManifestsConfigMapRef = &corev1.LocalObjectReference{
Name: o.getManifestsConfigMapName(),
}
}
if o.ReleaseImage != "" {
cd.Spec.Provisioning.ReleaseImage = o.ReleaseImage
} else if o.ImageSet != "" {
cd.Spec.Provisioning.ImageSetRef = &hivev1.ClusterImageSetReference{Name: o.ImageSet}
}
cd.Spec.Provisioning.InstallConfigSecretRef = corev1.LocalObjectReference{Name: o.getInstallConfigSecretName()}
o.CloudBuilder.addClusterDeploymentPlatform(o, cd)
return cd
}
func (o *Builder) generateInstallConfigSecret() (*corev1.Secret, error) {
installConfig := &installertypes.InstallConfig{
ObjectMeta: metav1.ObjectMeta{
Name: o.Name,
},
TypeMeta: metav1.TypeMeta{
APIVersion: installertypes.InstallConfigVersion,
},
SSHKey: o.SSHPublicKey,
BaseDomain: o.BaseDomain,
Networking: &installertypes.Networking{
NetworkType: "OpenShiftSDN",
ServiceNetwork: []ipnet.IPNet{*ipnet.MustParseCIDR("172.30.0.0/16")},
ClusterNetwork: []installertypes.ClusterNetworkEntry{
{
CIDR: *ipnet.MustParseCIDR("10.128.0.0/14"),
HostPrefix: 23,
},
},
MachineNetwork: []installertypes.MachineNetworkEntry{
{
CIDR: *ipnet.MustParseCIDR(o.MachineNetwork),
},
},
},
ControlPlane: &installertypes.MachinePool{
Name: "master",
Replicas: pointer.Int64Ptr(3),
},
Compute: []installertypes.MachinePool{
{
Name: "worker",
Replicas: &o.WorkerNodesCount,
},
},
}
o.CloudBuilder.addInstallConfigPlatform(o, installConfig)
d, err := yaml.Marshal(installConfig)
if err != nil {
return nil, err
}
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.getInstallConfigSecretName(),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"install-config.yaml": d,
},
}, nil
}
func (o *Builder) generateMachinePool() *hivev1.MachinePool {
mp := &hivev1.MachinePool{
TypeMeta: metav1.TypeMeta{
Kind: "MachinePool",
APIVersion: hivev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-worker", o.Name),
Namespace: o.Namespace,
},
Spec: hivev1.MachinePoolSpec{
ClusterDeploymentRef: corev1.LocalObjectReference{
Name: o.Name,
},
Name: "worker",
Replicas: pointer.Int64Ptr(o.WorkerNodesCount),
},
}
o.CloudBuilder.addMachinePoolPlatform(o, mp)
return mp
}
func (o *Builder) getInstallConfigSecretName() string {
return fmt.Sprintf("%s-install-config", o.Name)
}
// generatePullSecretSecret returns a Kubernetes Secret containing the pull secret to be
// used for pulling images.
func (o *Builder) generatePullSecretSecret() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.getPullSecretSecretName(),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeDockerConfigJson,
StringData: map[string]string{
corev1.DockerConfigJsonKey: o.PullSecret,
},
}
}
// generateSSHPrivateKeySecret returns a Kubernetes Secret containing the SSH private
// key to be used.
func (o *Builder) generateSSHPrivateKeySecret() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.getSSHPrivateKeySecretName(),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeOpaque,
StringData: map[string]string{
constants.SSHPrivateKeySecretKey: o.SSHPrivateKey,
},
}
}
func (o *Builder) generateServingCertSecret() *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.getServingCertSecretName(),
Namespace: o.Namespace,
},
Type: corev1.SecretTypeTLS,
StringData: map[string]string{
constants.TLSCrtSecretKey: o.ServingCert,
constants.TLSKeySecretKey: o.ServingCertKey,
},
}
}
func (o *Builder) generateAdminKubeconfigSecret() *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: o.getAdoptAdminKubeconfigSecretName(),
Namespace: o.Namespace,
},
Data: map[string][]byte{
constants.KubeconfigSecretKey: o.AdoptAdminKubeconfig,
constants.RawKubeconfigSecretKey: o.AdoptAdminKubeconfig,
},
}
}
func (o *Builder) generateInstallerManifestsConfigMap() *corev1.ConfigMap {
return &corev1.ConfigMap{
TypeMeta: metav1.TypeMeta{
Kind: "ConfigMap",
APIVersion: corev1.SchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: o.getManifestsConfigMapName(),
Namespace: o.Namespace,
},
BinaryData: o.InstallerManifests,
}
}
func (o *Builder) generateAdoptedAdminPasswordSecret() *corev1.Secret {
if o.AdoptAdminUsername == "" {
return nil
}
adminPasswordSecret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: o.getAdoptAdminPasswordSecretName(),
Namespace: o.Namespace,
},
StringData: map[string]string{
"username": o.AdoptAdminUsername,
"password": o.AdoptAdminPassword,
},
}
return adminPasswordSecret
}
func (o *Builder) getManifestsConfigMapName() string {
return fmt.Sprintf("%s-manifests", o.Name)
}
func (o *Builder) getAdoptAdminPasswordSecretName() string {
return fmt.Sprintf("%s-adopted-admin-password", o.Name)
}
func (o *Builder) getServingCertSecretName() string {
return fmt.Sprintf("%s-serving-cert", o.Name)
}
func (o *Builder) getAdoptAdminKubeconfigSecretName() string {
return fmt.Sprintf("%s-adopted-admin-kubeconfig", o.Name)
}
// TODO: handle long cluster names.
func (o *Builder) getSSHPrivateKeySecretName() string {
return fmt.Sprintf("%s-ssh-private-key", o.Name)
}
// TODO: handle long cluster names.
func (o *Builder) getPullSecretSecretName() string {
return fmt.Sprintf("%s-pull-secret", o.Name)
}
// CloudBuilder interface exposes the functions we will use to set cloud specific portions of the cluster's resources.
type CloudBuilder interface {
addClusterDeploymentPlatform(o *Builder, cd *hivev1.ClusterDeployment)
addMachinePoolPlatform(o *Builder, mp *hivev1.MachinePool)
addInstallConfigPlatform(o *Builder, ic *installertypes.InstallConfig)
generateCredentialsSecret(o *Builder) *corev1.Secret
}
| 1 | 11,785 | Why are we removing the option to skip machine pool generation? | openshift-hive | go |
@@ -0,0 +1,9 @@
+using System;
+
+namespace Microsoft.AspNetCore.Server.Kestrel.Internal.Http
+{
+ public interface IHttpStartLineHandler
+ {
+ void OnStartLine(HttpMethod method, HttpVersion version, Span<byte> target, Span<byte> path, Span<byte> query, Span<byte> customMethod);
+ }
+} | 1 | 1 | 11,700 | "Request line" here too. | aspnet-KestrelHttpServer | .cs |
|
@@ -1292,6 +1292,19 @@ void Corpse::LootItem(Client *client, const EQApplicationPacket *app)
std::vector<EQ::Any> args;
args.push_back(inst);
args.push_back(this);
+ if (RuleB(Zone, UseZoneController)) {
+ if (entity_list.GetNPCByNPCTypeID(ZONE_CONTROLLER_NPC_ID)){
+ if (parse->EventNPC(EVENT_LOOT_ZONE, entity_list.GetNPCByNPCTypeID(ZONE_CONTROLLER_NPC_ID)->CastToNPC(), client, buf, 0, &args) != 0) {
+ lootitem->auto_loot = -1;
+ client->MessageString(Chat::Red, LOOT_NOT_ALLOWED, inst->GetItem()->Name);
+ client->QueuePacket(app);
+ delete inst;
+ return;
+ }
+ }
+ }
+
+
if (parse->EventPlayer(EVENT_LOOT, client, buf, 0, &args) != 0) {
lootitem->auto_loot = -1;
client->MessageString(Chat::Red, LOOT_NOT_ALLOWED, inst->GetItem()->Name); | 1 | /* EQEMu: Everquest Server Emulator
Copyright (C) 2001-2003 EQEMu Development Team (http://eqemulator.net)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY except by those people which sell it, which
are required to give you total support for your newly bought product;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
New class for handeling corpses and everything associated with them.
Child of the Mob class.
-Quagmire
*/
#ifdef _WINDOWS
#if (!defined(_MSC_VER) || (defined(_MSC_VER) && _MSC_VER < 1900))
#define snprintf _snprintf
#define vsnprintf _vsnprintf
#endif
#define strncasecmp _strnicmp
#define strcasecmp _stricmp
#endif
#include "../common/global_define.h"
#include "../common/eqemu_logsys.h"
#include "../common/rulesys.h"
#include "../common/string_util.h"
#include "../common/say_link.h"
#include "corpse.h"
#include "entity.h"
#include "expedition.h"
#include "groups.h"
#include "mob.h"
#include "raids.h"
#ifdef BOTS
#include "bot.h"
#endif
#include "quest_parser_collection.h"
#include "string_ids.h"
#include "worldserver.h"
#include <iostream>
extern EntityList entity_list;
extern Zone* zone;
extern WorldServer worldserver;
extern npcDecayTimes_Struct npcCorpseDecayTimes[100];
void Corpse::SendEndLootErrorPacket(Client* client) {
auto outapp = new EQApplicationPacket(OP_LootComplete, 0);
client->QueuePacket(outapp);
safe_delete(outapp);
}
void Corpse::SendLootReqErrorPacket(Client* client, LootResponse response) {
auto outapp = new EQApplicationPacket(OP_MoneyOnCorpse, sizeof(moneyOnCorpseStruct));
moneyOnCorpseStruct* d = (moneyOnCorpseStruct*) outapp->pBuffer;
d->response = static_cast<uint8>(response);
d->unknown1 = 0x5a;
d->unknown2 = 0x40;
client->QueuePacket(outapp);
safe_delete(outapp);
}
Corpse* Corpse::LoadCharacterCorpseEntity(uint32 in_dbid, uint32 in_charid, std::string in_charname, const glm::vec4& position, std::string time_of_death, bool rezzed, bool was_at_graveyard, uint32 guild_consent_id) {
uint32 item_count = database.GetCharacterCorpseItemCount(in_dbid);
auto buffer =
new char[sizeof(PlayerCorpse_Struct) + (item_count * sizeof(player_lootitem::ServerLootItem_Struct))];
PlayerCorpse_Struct *pcs = (PlayerCorpse_Struct*)buffer;
database.LoadCharacterCorpseData(in_dbid, pcs);
/* Load Items */
ItemList itemlist;
ServerLootItem_Struct* tmp = nullptr;
for (unsigned int i = 0; i < pcs->itemcount; i++) {
tmp = new ServerLootItem_Struct;
memcpy(tmp, &pcs->items[i], sizeof(player_lootitem::ServerLootItem_Struct));
itemlist.push_back(tmp);
}
/* Create Corpse Entity */
auto pc = new Corpse(in_dbid, // uint32 in_dbid
in_charid, // uint32 in_charid
in_charname.c_str(), // char* in_charname
&itemlist, // ItemList* in_itemlist
pcs->copper, // uint32 in_copper
pcs->silver, // uint32 in_silver
pcs->gold, // uint32 in_gold
pcs->plat, // uint32 in_plat
position,
pcs->size, // float in_size
pcs->gender, // uint8 in_gender
pcs->race, // uint16 in_race
pcs->class_, // uint8 in_class
pcs->deity, // uint8 in_deity
pcs->level, // uint8 in_level
pcs->texture, // uint8 in_texture
pcs->helmtexture, // uint8 in_helmtexture
pcs->exp, // uint32 in_rezexp
was_at_graveyard // bool wasAtGraveyard
);
if (pcs->locked)
pc->Lock();
/* Load Item Tints */
pc->item_tint.Head.Color = pcs->item_tint.Head.Color;
pc->item_tint.Chest.Color = pcs->item_tint.Chest.Color;
pc->item_tint.Arms.Color = pcs->item_tint.Arms.Color;
pc->item_tint.Wrist.Color = pcs->item_tint.Wrist.Color;
pc->item_tint.Hands.Color = pcs->item_tint.Hands.Color;
pc->item_tint.Legs.Color = pcs->item_tint.Legs.Color;
pc->item_tint.Feet.Color = pcs->item_tint.Feet.Color;
pc->item_tint.Primary.Color = pcs->item_tint.Primary.Color;
pc->item_tint.Secondary.Color = pcs->item_tint.Secondary.Color;
/* Load Physical Appearance */
pc->haircolor = pcs->haircolor;
pc->beardcolor = pcs->beardcolor;
pc->eyecolor1 = pcs->eyecolor1;
pc->eyecolor2 = pcs->eyecolor2;
pc->hairstyle = pcs->hairstyle;
pc->luclinface = pcs->face;
pc->beard = pcs->beard;
pc->drakkin_heritage = pcs->drakkin_heritage;
pc->drakkin_tattoo = pcs->drakkin_tattoo;
pc->drakkin_details = pcs->drakkin_details;
pc->IsRezzed(rezzed);
pc->become_npc = false;
pc->consented_guild_id = guild_consent_id;
pc->UpdateEquipmentLight(); // itemlist populated above..need to determine actual values
safe_delete_array(pcs);
return pc;
}
Corpse::Corpse(NPC* in_npc, ItemList* in_itemlist, uint32 in_npctypeid, const NPCType** in_npctypedata, uint32 in_decaytime)
// vesuvias - appearence fix
: Mob("Unnamed_Corpse","",0,0,in_npc->GetGender(),in_npc->GetRace(),in_npc->GetClass(),BT_Humanoid,//bodytype added
in_npc->GetDeity(),in_npc->GetLevel(),in_npc->GetNPCTypeID(),in_npc->GetSize(),0,
in_npc->GetPosition(), in_npc->GetInnateLightType(), in_npc->GetTexture(),in_npc->GetHelmTexture(),
0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,EQ::TintProfile(),0xff,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
(*in_npctypedata)->use_model, false),
corpse_decay_timer(in_decaytime),
corpse_rez_timer(0),
corpse_delay_timer(RuleI(NPC, CorpseUnlockTimer)),
corpse_graveyard_timer(0),
loot_cooldown_timer(10)
{
corpse_graveyard_timer.Disable();
is_corpse_changed = false;
is_player_corpse = false;
is_locked = false;
being_looted_by = 0xFFFFFFFF;
if (in_itemlist) {
itemlist = *in_itemlist;
in_itemlist->clear();
}
SetCash(in_npc->GetCopper(), in_npc->GetSilver(), in_npc->GetGold(), in_npc->GetPlatinum());
npctype_id = in_npctypeid;
SetPlayerKillItemID(0);
char_id = 0;
corpse_db_id = 0;
player_corpse_depop = false;
strcpy(corpse_name, in_npc->GetName());
strcpy(name, in_npc->GetName());
for(int count = 0; count < 100; count++) {
if ((level >= npcCorpseDecayTimes[count].minlvl) && (level <= npcCorpseDecayTimes[count].maxlvl)) {
corpse_decay_timer.SetTimer(npcCorpseDecayTimes[count].seconds*1000);
break;
}
}
if(IsEmpty()) {
corpse_decay_timer.SetTimer(RuleI(NPC,EmptyNPCCorpseDecayTimeMS)+1000);
}
if(in_npc->HasPrivateCorpse()) {
corpse_delay_timer.SetTimer(corpse_decay_timer.GetRemainingTime() + 1000);
}
for (int i = 0; i < MAX_LOOTERS; i++){
allowed_looters[i] = 0;
}
this->rez_experience = 0;
UpdateEquipmentLight();
UpdateActiveLight();
loot_request_type = LootRequestType::Forbidden;
}
Corpse::Corpse(Client* client, int32 in_rezexp) : Mob (
"Unnamed_Corpse", // const char* in_name,
"", // const char* in_lastname,
0, // int32 in_cur_hp,
0, // int32 in_max_hp,
client->GetGender(), // uint8 in_gender,
client->GetRace(), // uint16 in_race,
client->GetClass(), // uint8 in_class,
BT_Humanoid, // bodyType in_bodytype,
client->GetDeity(), // uint8 in_deity,
client->GetLevel(), // uint8 in_level,
0, // uint32 in_npctype_id,
client->GetSize(), // float in_size,
0, // float in_runspeed,
client->GetPosition(),
client->GetInnateLightType(), // uint8 in_light, - verified for client innate_light value
client->GetTexture(), // uint8 in_texture,
client->GetHelmTexture(), // uint8 in_helmtexture,
0, // uint16 in_ac,
0, // uint16 in_atk,
0, // uint16 in_str,
0, // uint16 in_sta,
0, // uint16 in_dex,
0, // uint16 in_agi,
0, // uint16 in_int,
0, // uint16 in_wis,
0, // uint16 in_cha,
client->GetPP().haircolor, // uint8 in_haircolor,
client->GetPP().beardcolor, // uint8 in_beardcolor,
client->GetPP().eyecolor1, // uint8 in_eyecolor1, // the eyecolors always seem to be the same, maybe left and right eye?
client->GetPP().eyecolor2, // uint8 in_eyecolor2,
client->GetPP().hairstyle, // uint8 in_hairstyle,
client->GetPP().face, // uint8 in_luclinface,
client->GetPP().beard, // uint8 in_beard,
client->GetPP().drakkin_heritage, // uint32 in_drakkin_heritage,
client->GetPP().drakkin_tattoo, // uint32 in_drakkin_tattoo,
client->GetPP().drakkin_details, // uint32 in_drakkin_details,
EQ::TintProfile(), // uint32 in_armor_tint[_MaterialCount],
0xff, // uint8 in_aa_title,
0, // uint8 in_see_invis, // see through invis
0, // uint8 in_see_invis_undead, // see through invis vs. undead
0, // uint8 in_see_hide,
0, // uint8 in_see_improved_hide,
0, // int32 in_hp_regen,
0, // int32 in_mana_regen,
0, // uint8 in_qglobal,
0, // uint8 in_maxlevel,
0, // uint32 in_scalerate
0, // uint8 in_armtexture,
0, // uint8 in_bracertexture,
0, // uint8 in_handtexture,
0, // uint8 in_legtexture,
0, // uint8 in_feettexture,
0, // uint8 in_usemodel,
0 // bool in_always_aggro
),
corpse_decay_timer(RuleI(Character, CorpseDecayTimeMS)),
corpse_rez_timer(RuleI(Character, CorpseResTimeMS)),
corpse_delay_timer(RuleI(NPC, CorpseUnlockTimer)),
corpse_graveyard_timer(RuleI(Zone, GraveyardTimeMS)),
loot_cooldown_timer(10)
{
int i;
PlayerProfile_Struct *pp = &client->GetPP();
EQ::ItemInstance *item = nullptr;
/* Check if Zone has Graveyard First */
if(!zone->HasGraveyard()) {
corpse_graveyard_timer.Disable();
}
for (i = 0; i < MAX_LOOTERS; i++){
allowed_looters[i] = 0;
}
if (client->AutoConsentGroupEnabled()) {
Group* grp = client->GetGroup();
consented_group_id = grp ? grp->GetID() : 0;
}
if (client->AutoConsentRaidEnabled()) {
Raid* raid = client->GetRaid();
consented_raid_id = raid ? raid->GetID() : 0;
}
consented_guild_id = client->AutoConsentGuildEnabled() ? client->GuildID() : 0;
is_corpse_changed = true;
rez_experience = in_rezexp;
can_corpse_be_rezzed = true;
is_player_corpse = true;
is_locked = false;
being_looted_by = 0xFFFFFFFF;
char_id = client->CharacterID();
corpse_db_id = 0;
player_corpse_depop = false;
copper = 0;
silver = 0;
gold = 0;
platinum = 0;
strcpy(corpse_name, pp->name);
strcpy(name, pp->name);
/* become_npc was not being initialized which led to some pretty funky things with newly created corpses */
become_npc = false;
SetPlayerKillItemID(0);
/* Check Rule to see if we can leave corpses */
if(!RuleB(Character, LeaveNakedCorpses) ||
RuleB(Character, LeaveCorpses) &&
GetLevel() >= RuleI(Character, DeathItemLossLevel)) {
// cash
// Let's not move the cash when 'RespawnFromHover = true' && 'client->GetClientVersion() < EQClientSoF' since the client doesn't.
// (change to first client that supports 'death hover' mode, if not SoF.)
if (!RuleB(Character, RespawnFromHover) || client->ClientVersion() < EQ::versions::ClientVersion::SoF) {
SetCash(pp->copper, pp->silver, pp->gold, pp->platinum);
pp->copper = 0;
pp->silver = 0;
pp->gold = 0;
pp->platinum = 0;
}
// get their tints
memcpy(&item_tint.Slot, &client->GetPP().item_tint, sizeof(item_tint));
// TODO soulbound items need not be added to corpse, but they need
// to go into the regular slots on the player, out of bags
std::list<uint32> removed_list;
// ideally, we would start at invslot::slotGeneral1 and progress to invslot::slotCursor..
// ..then regress and process invslot::EQUIPMENT_BEGIN through invslot::EQUIPMENT_END...
// without additional work to database loading of player corpses, this order is not
// currently preserved and a re-work of this processing loop is not warranted.
for (i = EQ::invslot::POSSESSIONS_BEGIN; i <= EQ::invslot::POSSESSIONS_END; ++i) {
item = client->GetInv().GetItem(i);
if (item == nullptr) { continue; }
if(!client->IsBecomeNPC() || (client->IsBecomeNPC() && !item->GetItem()->NoRent))
MoveItemToCorpse(client, item, i, removed_list);
}
database.TransactionBegin();
// this should not be modified to include the entire range of invtype::TYPE_POSSESSIONS slots by default..
// ..due to the possibility of 'hidden' items from client version bias..or, possibly, soul-bound items (WoW?)
if (!removed_list.empty()) {
std::list<uint32>::const_iterator iter = removed_list.begin();
if (iter != removed_list.end()) {
std::stringstream ss("");
ss << "DELETE FROM `inventory` WHERE `charid` = " << client->CharacterID();
ss << " AND `slotid` IN (" << (*iter);
++iter;
while (iter != removed_list.end()) {
ss << ", " << (*iter);
++iter;
}
ss << ")";
database.QueryDatabase(ss.str().c_str());
}
}
auto start = client->GetInv().cursor_cbegin();
auto finish = client->GetInv().cursor_cend();
database.SaveCursor(client->CharacterID(), start, finish);
client->CalcBonuses();
client->Save();
IsRezzed(false);
Save();
database.TransactionCommit();
UpdateEquipmentLight();
UpdateActiveLight();
return;
} //end "not leaving naked corpses"
UpdateEquipmentLight();
UpdateActiveLight();
loot_request_type = LootRequestType::Forbidden;
IsRezzed(false);
Save();
}
void Corpse::MoveItemToCorpse(Client *client, EQ::ItemInstance *inst, int16 equipSlot, std::list<uint32> &removedList)
{
AddItem(
inst->GetItem()->ID,
inst->GetCharges(),
equipSlot,
inst->GetAugmentItemID(0),
inst->GetAugmentItemID(1),
inst->GetAugmentItemID(2),
inst->GetAugmentItemID(3),
inst->GetAugmentItemID(4),
inst->GetAugmentItemID(5),
inst->IsAttuned()
);
removedList.push_back(equipSlot);
while (true) {
if (!inst->IsClassBag()) { break; }
if (equipSlot < EQ::invslot::GENERAL_BEGIN || equipSlot > EQ::invslot::slotCursor) { break; }
for (int16 sub_index = EQ::invbag::SLOT_BEGIN; sub_index <= EQ::invbag::SLOT_END; ++sub_index) {
int16 real_bag_slot = EQ::InventoryProfile::CalcSlotId(equipSlot, sub_index);
auto bag_inst = client->GetInv().GetItem(real_bag_slot);
if (bag_inst == nullptr) { continue; }
AddItem(
bag_inst->GetItem()->ID,
bag_inst->GetCharges(),
real_bag_slot,
bag_inst->GetAugmentItemID(0),
bag_inst->GetAugmentItemID(1),
bag_inst->GetAugmentItemID(2),
bag_inst->GetAugmentItemID(3),
bag_inst->GetAugmentItemID(4),
bag_inst->GetAugmentItemID(5),
bag_inst->IsAttuned()
);
removedList.push_back(real_bag_slot);
client->DeleteItemInInventory(real_bag_slot, 0, true, false);
}
break;
}
client->DeleteItemInInventory(equipSlot, 0, true, false);
}
// To be called from LoadFromDBData
Corpse::Corpse(uint32 in_dbid, uint32 in_charid, const char* in_charname, ItemList* in_itemlist, uint32 in_copper, uint32 in_silver, uint32 in_gold, uint32 in_plat, const glm::vec4& position, float in_size, uint8 in_gender, uint16 in_race, uint8 in_class, uint8 in_deity, uint8 in_level, uint8 in_texture, uint8 in_helmtexture,uint32 in_rezexp, bool wasAtGraveyard)
: Mob("Unnamed_Corpse",
"",
0,
0,
in_gender,
in_race,
in_class,
BT_Humanoid,
in_deity,
in_level,
0,
in_size,
0,
position,
0, // verified for client innate_light value
in_texture,
in_helmtexture,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
EQ::TintProfile(),
0xff,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
false),
corpse_decay_timer(RuleI(Character, CorpseDecayTimeMS)),
corpse_rez_timer(RuleI(Character, CorpseResTimeMS)),
corpse_delay_timer(RuleI(NPC, CorpseUnlockTimer)),
corpse_graveyard_timer(RuleI(Zone, GraveyardTimeMS)),
loot_cooldown_timer(10)
{
LoadPlayerCorpseDecayTime(in_dbid);
if (!zone->HasGraveyard() || wasAtGraveyard)
corpse_graveyard_timer.Disable();
is_corpse_changed = false;
is_player_corpse = true;
is_locked = false;
being_looted_by = 0xFFFFFFFF;
corpse_db_id = in_dbid;
player_corpse_depop = false;
char_id = in_charid;
itemlist = *in_itemlist;
in_itemlist->clear();
strcpy(corpse_name, in_charname);
strcpy(name, in_charname);
this->copper = in_copper;
this->silver = in_silver;
this->gold = in_gold;
this->platinum = in_plat;
rez_experience = in_rezexp;
for (int i = 0; i < MAX_LOOTERS; i++){
allowed_looters[i] = 0;
}
SetPlayerKillItemID(0);
UpdateEquipmentLight();
UpdateActiveLight();
loot_request_type = LootRequestType::Forbidden;
}
Corpse::~Corpse() {
if (is_player_corpse && !(player_corpse_depop && corpse_db_id == 0)) {
Save();
}
ItemList::iterator cur,end;
cur = itemlist.begin();
end = itemlist.end();
for(; cur != end; ++cur) {
ServerLootItem_Struct* item = *cur;
safe_delete(item);
}
itemlist.clear();
}
/*
this needs to be called AFTER the entity_id is set
the client does this too, so it's unchangable
*/
void Corpse::CalcCorpseName() {
EntityList::RemoveNumbers(name);
char tmp[64];
if (is_player_corpse){
snprintf(tmp, sizeof(tmp), "'s corpse%d", GetID());
}
else{
snprintf(tmp, sizeof(tmp), "`s_corpse%d", GetID());
}
name[(sizeof(name) - 1) - strlen(tmp)] = 0;
strcat(name, tmp);
}
bool Corpse::Save() {
if (!is_player_corpse)
return true;
if (!is_corpse_changed)
return true;
uint32 tmp = this->CountItems();
uint32 tmpsize = sizeof(PlayerCorpse_Struct) + (tmp * sizeof(player_lootitem::ServerLootItem_Struct));
PlayerCorpse_Struct* dbpc = (PlayerCorpse_Struct*) new uchar[tmpsize];
memset(dbpc, 0, tmpsize);
dbpc->itemcount = tmp;
dbpc->size = this->size;
dbpc->locked = is_locked;
dbpc->copper = this->copper;
dbpc->silver = this->silver;
dbpc->gold = this->gold;
dbpc->plat = this->platinum;
dbpc->race = this->race;
dbpc->class_ = class_;
dbpc->gender = gender;
dbpc->deity = deity;
dbpc->level = level;
dbpc->texture = this->texture;
dbpc->helmtexture = this->helmtexture;
dbpc->exp = rez_experience;
memcpy(&dbpc->item_tint.Slot, &item_tint.Slot, sizeof(dbpc->item_tint));
dbpc->haircolor = haircolor;
dbpc->beardcolor = beardcolor;
dbpc->eyecolor2 = eyecolor1;
dbpc->hairstyle = hairstyle;
dbpc->face = luclinface;
dbpc->beard = beard;
dbpc->drakkin_heritage = drakkin_heritage;
dbpc->drakkin_tattoo = drakkin_tattoo;
dbpc->drakkin_details = drakkin_details;
uint32 x = 0;
ItemList::iterator cur, end;
cur = itemlist.begin();
end = itemlist.end();
for (; cur != end; ++cur) {
ServerLootItem_Struct* item = *cur;
memcpy((char*)&dbpc->items[x++], (char*)item, sizeof(player_lootitem::ServerLootItem_Struct));
}
/* Create New Corpse*/
if (corpse_db_id == 0) {
corpse_db_id = database.SaveCharacterCorpse(char_id, corpse_name, zone->GetZoneID(), zone->GetInstanceID(), dbpc, m_Position, consented_guild_id);
}
/* Update Corpse Data */
else{
corpse_db_id = database.UpdateCharacterCorpse(corpse_db_id, char_id, corpse_name, zone->GetZoneID(), zone->GetInstanceID(), dbpc, m_Position, consented_guild_id, IsRezzed());
}
safe_delete_array(dbpc);
return true;
}
void Corpse::Delete() {
if (IsPlayerCorpse() && corpse_db_id != 0)
database.DeleteCharacterCorpse(corpse_db_id);
corpse_db_id = 0;
player_corpse_depop = true;
}
void Corpse::Bury() {
if (IsPlayerCorpse() && corpse_db_id != 0)
database.BuryCharacterCorpse(corpse_db_id);
corpse_db_id = 0;
player_corpse_depop = true;
}
void Corpse::DepopNPCCorpse() {
if (IsNPCCorpse())
player_corpse_depop = true;
}
void Corpse::DepopPlayerCorpse() {
player_corpse_depop = true;
}
void Corpse::AddConsentName(std::string consent_player_name)
{
for (const auto& consented_player_name : consented_player_names) {
if (strcasecmp(consented_player_name.c_str(), consent_player_name.c_str()) == 0) {
return;
}
}
consented_player_names.emplace_back(consent_player_name);
}
void Corpse::RemoveConsentName(std::string consent_player_name)
{
consented_player_names.erase(std::remove_if(consented_player_names.begin(), consented_player_names.end(),
[consent_player_name](const std::string& consented_player_name) {
return strcasecmp(consented_player_name.c_str(), consent_player_name.c_str()) == 0;
}
), consented_player_names.end());
}
uint32 Corpse::CountItems() {
return itemlist.size();
}
void Corpse::AddItem(uint32 itemnum, uint16 charges, int16 slot, uint32 aug1, uint32 aug2, uint32 aug3, uint32 aug4, uint32 aug5, uint32 aug6, uint8 attuned) {
if (!database.GetItem(itemnum))
return;
is_corpse_changed = true;
auto item = new ServerLootItem_Struct;
memset(item, 0, sizeof(ServerLootItem_Struct));
item->item_id = itemnum;
item->charges = charges;
item->equip_slot = slot;
item->aug_1=aug1;
item->aug_2=aug2;
item->aug_3=aug3;
item->aug_4=aug4;
item->aug_5=aug5;
item->aug_6=aug6;
item->attuned=attuned;
itemlist.push_back(item);
UpdateEquipmentLight();
}
ServerLootItem_Struct* Corpse::GetItem(uint16 lootslot, ServerLootItem_Struct** bag_item_data) {
ServerLootItem_Struct *sitem = nullptr, *sitem2 = nullptr;
ItemList::iterator cur,end;
cur = itemlist.begin();
end = itemlist.end();
for(; cur != end; ++cur) {
if((*cur)->lootslot == lootslot) {
sitem = *cur;
break;
}
}
if (sitem && bag_item_data && EQ::InventoryProfile::SupportsContainers(sitem->equip_slot)) {
int16 bagstart = EQ::InventoryProfile::CalcSlotId(sitem->equip_slot, EQ::invbag::SLOT_BEGIN);
cur = itemlist.begin();
end = itemlist.end();
for (; cur != end; ++cur) {
sitem2 = *cur;
if (sitem2->equip_slot >= bagstart && sitem2->equip_slot < bagstart + 10) {
bag_item_data[sitem2->equip_slot - bagstart] = sitem2;
}
}
}
return sitem;
}
uint32 Corpse::GetWornItem(int16 equipSlot) const {
ItemList::const_iterator cur,end;
cur = itemlist.begin();
end = itemlist.end();
for(; cur != end; ++cur) {
ServerLootItem_Struct* item = *cur;
if (item->equip_slot == equipSlot) {
return item->item_id;
}
}
return 0;
}
void Corpse::RemoveItem(uint16 lootslot) {
if (lootslot == 0xFFFF)
return;
ItemList::iterator cur,end;
cur = itemlist.begin();
end = itemlist.end();
for (; cur != end; ++cur) {
ServerLootItem_Struct* sitem = *cur;
if (sitem->lootslot == lootslot) {
RemoveItem(sitem);
return;
}
}
}
void Corpse::RemoveItem(ServerLootItem_Struct* item_data)
{
for (auto iter = itemlist.begin(); iter != itemlist.end(); ++iter) {
auto sitem = *iter;
if (sitem != item_data) { continue; }
is_corpse_changed = true;
itemlist.erase(iter);
uint8 material = EQ::InventoryProfile::CalcMaterialFromSlot(sitem->equip_slot); // autos to unsigned char
if (material != EQ::textures::materialInvalid)
SendWearChange(material);
UpdateEquipmentLight();
if (UpdateActiveLight())
SendAppearancePacket(AT_Light, GetActiveLightType());
safe_delete(sitem);
return;
}
}
void Corpse::RemoveItemByID(uint32 item_id, int quantity) {
if (!database.GetItem(item_id)) {
return;
}
if (!HasItem(item_id)) {
return;
}
int removed_count = 0;
for (auto current_item = itemlist.begin(); current_item != itemlist.end(); ++current_item) {
ServerLootItem_Struct* sitem = *current_item;
if (removed_count == quantity) {
break;
}
if (sitem && sitem->item_id == item_id) {
int stack_size = sitem->charges > 1 ? sitem->charges : 1;
if ((removed_count + stack_size) <= quantity) {
removed_count += stack_size;
is_corpse_changed = true;
itemlist.erase(current_item);
} else {
int amount_left = (quantity - removed_count);
if (amount_left > 0) {
if (stack_size > amount_left) {
removed_count += amount_left;
sitem->charges -= amount_left;
is_corpse_changed = true;
} else if (stack_size == amount_left) {
removed_count += amount_left;
itemlist.erase(current_item);
}
}
}
}
}
}
void Corpse::SetCash(uint32 in_copper, uint32 in_silver, uint32 in_gold, uint32 in_platinum) {
this->copper = in_copper;
this->silver = in_silver;
this->gold = in_gold;
this->platinum = in_platinum;
is_corpse_changed = true;
}
void Corpse::RemoveCash() {
this->copper = 0;
this->silver = 0;
this->gold = 0;
this->platinum = 0;
is_corpse_changed = true;
}
bool Corpse::IsEmpty() const {
if (copper != 0 || silver != 0 || gold != 0 || platinum != 0)
return false;
return itemlist.empty();
}
bool Corpse::Process() {
if (player_corpse_depop)
return false;
if (corpse_delay_timer.Check()) {
for (int i = 0; i < MAX_LOOTERS; i++)
allowed_looters[i] = 0;
corpse_delay_timer.Disable();
return true;
}
if (corpse_graveyard_timer.Check()) {
MovePlayerCorpseToGraveyard();
corpse_graveyard_timer.Disable();
return false;
}
/*
if(corpse_res_timer.Check()) {
can_rez = false;
corpse_res_timer.Disable();
}
*/
/* This is when a corpse hits decay timer and does checks*/
if (corpse_decay_timer.Check()) {
/* NPC */
if (IsNPCCorpse()){
corpse_decay_timer.Disable();
return false;
}
/* Client */
if (!RuleB(Zone, EnableShadowrest)){
Delete();
}
else {
if (database.BuryCharacterCorpse(corpse_db_id)) {
Save();
player_corpse_depop = true;
corpse_db_id = 0;
LogDebug("Tagged [{}] player corpse has buried", this->GetName());
}
else {
LogError("Unable to bury [{}] player corpse", this->GetName());
return true;
}
}
corpse_decay_timer.Disable();
return false;
}
return true;
}
void Corpse::SetDecayTimer(uint32 decaytime) {
if (decaytime == 0)
corpse_decay_timer.Trigger();
else
corpse_decay_timer.Start(decaytime);
}
bool Corpse::CanPlayerLoot(int charid) {
uint8 looters = 0;
for (int i = 0; i < MAX_LOOTERS; i++) {
if (allowed_looters[i] != 0){
looters++;
}
if (allowed_looters[i] == charid)
return true;
}
/* If we have no looters, obviously client can loot */
return looters == 0;
}
void Corpse::AllowPlayerLoot(Mob *them, uint8 slot) {
if(slot >= MAX_LOOTERS)
return;
if(them == nullptr || !them->IsClient())
return;
allowed_looters[slot] = them->CastToClient()->CharacterID();
}
void Corpse::MakeLootRequestPackets(Client* client, const EQApplicationPacket* app) {
if (!client)
return;
// Added 12/08. Started compressing loot struct on live.
if(player_corpse_depop) {
SendLootReqErrorPacket(client, LootResponse::SomeoneElse);
return;
}
if(IsPlayerCorpse() && !corpse_db_id) { // really should try to resave in this case
// SendLootReqErrorPacket(client, 0);
client->Message(Chat::Red, "Warning: Corpse's dbid = 0! Corpse will not survive zone shutdown!");
std::cout << "Error: PlayerCorpse::MakeLootRequestPackets: dbid = 0!" << std::endl;
// return;
}
if(is_locked && client->Admin() < 100) {
SendLootReqErrorPacket(client, LootResponse::SomeoneElse);
client->Message(Chat::Red, "Error: Corpse locked by GM.");
return;
}
if(!being_looted_by || (being_looted_by != 0xFFFFFFFF && !entity_list.GetID(being_looted_by)))
being_looted_by = 0xFFFFFFFF;
if (DistanceSquaredNoZ(client->GetPosition(), m_Position) > 625) {
SendLootReqErrorPacket(client, LootResponse::TooFar);
return;
}
if (being_looted_by != 0xFFFFFFFF && being_looted_by != client->GetID()) {
SendLootReqErrorPacket(client, LootResponse::SomeoneElse);
return;
}
// all loot session disqualifiers should occur before this point as not to interfere with any current looter
loot_request_type = LootRequestType::Forbidden;
// loot_request_type is scoped to class Corpse and reset on a per-loot session basis
if (client->GetGM()) {
if (client->Admin() >= 100)
loot_request_type = LootRequestType::GMAllowed;
else
loot_request_type = LootRequestType::GMPeek;
}
else {
if (IsPlayerCorpse()) {
if (char_id == client->CharacterID()) {
loot_request_type = LootRequestType::Self;
}
else if (CanPlayerLoot(client->CharacterID())) {
if (GetPlayerKillItem() == -1)
loot_request_type = LootRequestType::AllowedPVPAll;
else if (GetPlayerKillItem() == 1)
loot_request_type = LootRequestType::AllowedPVPSingle;
else if (GetPlayerKillItem() > 1)
loot_request_type = LootRequestType::AllowedPVPDefined;
}
}
else if ((IsNPCCorpse() || become_npc) && CanPlayerLoot(client->CharacterID())) {
loot_request_type = LootRequestType::AllowedPVE;
}
}
LogInventory("MakeLootRequestPackets() LootRequestType [{}] for [{}]", (int) loot_request_type, client->GetName());
if (loot_request_type == LootRequestType::Forbidden) {
SendLootReqErrorPacket(client, LootResponse::NotAtThisTime);
return;
}
being_looted_by = client->GetID();
client->CommonBreakInvisible(); // we should be "all good" so lets break invis now instead of earlier before all error checking is done
// process coin
bool loot_coin = false;
std::string tmp;
if (database.GetVariable("LootCoin", tmp))
loot_coin = (tmp[0] == 1 && tmp[1] == '\0');
if (loot_request_type == LootRequestType::GMPeek || loot_request_type == LootRequestType::GMAllowed) {
client->Message(Chat::Yellow, "This corpse contains %u platinum, %u gold, %u silver and %u copper.",
GetPlatinum(), GetGold(), GetSilver(), GetCopper());
auto outapp = new EQApplicationPacket(OP_MoneyOnCorpse, sizeof(moneyOnCorpseStruct));
moneyOnCorpseStruct* d = (moneyOnCorpseStruct*)outapp->pBuffer;
d->response = static_cast<uint8>(LootResponse::Normal);
d->unknown1 = 0x42;
d->unknown2 = 0xef;
d->copper = 0;
d->silver = 0;
d->gold = 0;
d->platinum = 0;
outapp->priority = 6;
client->QueuePacket(outapp);
safe_delete(outapp);
}
else {
auto outapp = new EQApplicationPacket(OP_MoneyOnCorpse, sizeof(moneyOnCorpseStruct));
moneyOnCorpseStruct* d = (moneyOnCorpseStruct*)outapp->pBuffer;
d->response = static_cast<uint8>(LootResponse::Normal);
d->unknown1 = 0x42;
d->unknown2 = 0xef;
Group* cgroup = client->GetGroup();
// this can be reworked into a switch and/or massaged to include specialized pve loot rules based on 'LootRequestType'
if (!IsPlayerCorpse() && client->IsGrouped() && client->AutoSplitEnabled() && cgroup) {
d->copper = 0;
d->silver = 0;
d->gold = 0;
d->platinum = 0;
cgroup->SplitMoney(GetCopper(), GetSilver(), GetGold(), GetPlatinum(), client);
}
else {
d->copper = GetCopper();
d->silver = GetSilver();
d->gold = GetGold();
d->platinum = GetPlatinum();
client->AddMoneyToPP(GetCopper(), GetSilver(), GetGold(), GetPlatinum(), false);
}
RemoveCash();
Save();
outapp->priority = 6;
client->QueuePacket(outapp);
safe_delete(outapp);
}
// process items
auto timestamps = database.GetItemRecastTimestamps(client->CharacterID());
if (loot_request_type == LootRequestType::AllowedPVPDefined) {
auto pkitemid = GetPlayerKillItem();
auto pkitem = database.GetItem(pkitemid);
auto pkinst = database.CreateItem(pkitem, pkitem->MaxCharges);
if (pkinst) {
if (pkitem->RecastDelay)
pkinst->SetRecastTimestamp(timestamps.count(pkitem->RecastType) ? timestamps.at(pkitem->RecastType) : 0);
LogInventory("MakeLootRequestPackets() Slot [{}], Item [{}]", EQ::invslot::CORPSE_BEGIN, pkitem->Name);
client->SendItemPacket(EQ::invslot::CORPSE_BEGIN, pkinst, ItemPacketLoot);
safe_delete(pkinst);
}
else {
LogInventory("MakeLootRequestPackets() PlayerKillItem [{}] not found", pkitemid);
client->Message(Chat::Red, "PlayerKillItem (id: %i) could not be found!", pkitemid);
}
client->QueuePacket(app);
return;
}
auto loot_slot = EQ::invslot::CORPSE_BEGIN;
auto corpse_mask = client->GetInv().GetLookup()->CorpseBitmask;
for (auto item_data : itemlist) {
// every loot session must either set all items' lootslots to 'invslot::SLOT_INVALID'
// or to a valid enumerated client-versioned corpse slot (lootslot is not equip_slot)
item_data->lootslot = 0xFFFF;
// align server and client corpse slot mappings so translators can function properly
while (loot_slot <= EQ::invslot::CORPSE_END && (((uint64)1 << loot_slot) & corpse_mask) == 0)
++loot_slot;
if (loot_slot > EQ::invslot::CORPSE_END)
continue;
if (IsPlayerCorpse()) {
if (loot_request_type == LootRequestType::AllowedPVPSingle && loot_slot != EQ::invslot::CORPSE_BEGIN)
continue;
if (item_data->equip_slot < EQ::invslot::POSSESSIONS_BEGIN || item_data->equip_slot > EQ::invslot::POSSESSIONS_END)
continue;
}
const auto *item = database.GetItem(item_data->item_id);
auto inst = database.CreateItem(
item,
item_data->charges,
item_data->aug_1,
item_data->aug_2,
item_data->aug_3,
item_data->aug_4,
item_data->aug_5,
item_data->aug_6,
item_data->attuned
);
if (!inst)
continue;
if (item->RecastDelay)
inst->SetRecastTimestamp(timestamps.count(item->RecastType) ? timestamps.at(item->RecastType) : 0);
LogInventory("MakeLootRequestPackets() Slot [{}], Item [{}]", loot_slot, item->Name);
client->SendItemPacket(loot_slot, inst, ItemPacketLoot);
safe_delete(inst);
item_data->lootslot = loot_slot++;
}
// Disgrace: Client seems to require that we send the packet back...
client->QueuePacket(app);
// This is required for the 'Loot All' feature to work for SoD clients. I expect it is to tell the client that the
// server has now sent all the items on the corpse.
if (client->ClientVersion() >= EQ::versions::ClientVersion::SoD)
SendLootReqErrorPacket(client, LootResponse::LootAll);
}
void Corpse::LootItem(Client *client, const EQApplicationPacket *app)
{
if (!client)
return;
auto lootitem = (LootingItem_Struct *)app->pBuffer;
LogInventory("LootItem() LootRequestType [{}], Slot [{}] for [{}]", (int) loot_request_type, lootitem->slot_id, client->GetName());
if (loot_request_type < LootRequestType::GMAllowed) { // LootRequestType::Forbidden and LootRequestType::GMPeek
client->QueuePacket(app);
SendEndLootErrorPacket(client);
// unlock corpse for others
if (IsBeingLootedBy(client))
ResetLooter();
return;
}
if (!loot_cooldown_timer.Check()) {
client->QueuePacket(app);
SendEndLootErrorPacket(client);
// unlock corpse for others
if (IsBeingLootedBy(client))
ResetLooter();
return;
}
/* To prevent item loss for a player using 'Loot All' who doesn't have inventory space for all their items. */
if (RuleB(Character, CheckCursorEmptyWhenLooting) && !client->GetInv().CursorEmpty()) {
client->Message(Chat::Red, "You may not loot an item while you have an item on your cursor.");
client->QueuePacket(app);
SendEndLootErrorPacket(client);
/* Unlock corpse for others */
if (IsBeingLootedBy(client))
ResetLooter();
return;
}
if (!IsBeingLootedBy(client)) {
client->QueuePacket(app);
SendEndLootErrorPacket(client);
return;
}
if (IsPlayerCorpse() && !CanPlayerLoot(client->CharacterID()) && !become_npc &&
(char_id != client->CharacterID() && client->Admin() < 150)) {
client->Message(Chat::Red, "Error: This is a player corpse and you dont own it.");
client->QueuePacket(app);
SendEndLootErrorPacket(client);
return;
}
if (is_locked && client->Admin() < 100) {
client->QueuePacket(app);
SendLootReqErrorPacket(client, LootResponse::SomeoneElse);
client->Message(Chat::Red, "Error: Corpse locked by GM.");
return;
}
if (IsPlayerCorpse() && (char_id != client->CharacterID()) && CanPlayerLoot(client->CharacterID()) &&
GetPlayerKillItem() == 0) {
client->Message(Chat::Red, "Error: You cannot loot any more items from this corpse.");
client->QueuePacket(app);
SendEndLootErrorPacket(client);
ResetLooter();
return;
}
const EQ::ItemData *item = nullptr;
EQ::ItemInstance *inst = nullptr;
ServerLootItem_Struct *item_data = nullptr, *bag_item_data[10] = {};
memset(bag_item_data, 0, sizeof(bag_item_data));
if (GetPlayerKillItem() > 1) {
item = database.GetItem(GetPlayerKillItem());
}
else if (GetPlayerKillItem() == -1 || GetPlayerKillItem() == 1) {
item_data =
GetItem(lootitem->slot_id); // dont allow them to loot entire bags of items as pvp reward
}
else {
item_data = GetItem(lootitem->slot_id, bag_item_data);
}
if (GetPlayerKillItem() <= 1 && item_data != 0) {
item = database.GetItem(item_data->item_id);
}
if (item != 0) {
if (item_data) {
inst = database.CreateItem(item, item_data ? item_data->charges : 0, item_data->aug_1,
item_data->aug_2, item_data->aug_3, item_data->aug_4,
item_data->aug_5, item_data->aug_6, item_data->attuned);
}
else {
inst = database.CreateItem(item);
}
}
if (client && inst) {
if (client->CheckLoreConflict(item)) {
client->MessageString(Chat::White, LOOT_LORE_ERROR);
client->QueuePacket(app);
SendEndLootErrorPacket(client);
ResetLooter();
delete inst;
return;
}
if (inst->IsAugmented()) {
for (int i = EQ::invaug::SOCKET_BEGIN; i <= EQ::invaug::SOCKET_END; i++) {
EQ::ItemInstance *itm = inst->GetAugment(i);
if (itm) {
if (client->CheckLoreConflict(itm->GetItem())) {
client->MessageString(Chat::White, LOOT_LORE_ERROR);
client->QueuePacket(app);
SendEndLootErrorPacket(client);
ResetLooter();
delete inst;
return;
}
}
}
}
char buf[88];
char q_corpse_name[64];
strcpy(q_corpse_name, corpse_name);
snprintf(buf, 87, "%d %d %s", inst->GetItem()->ID, inst->GetCharges(),
EntityList::RemoveNumbers(q_corpse_name));
buf[87] = '\0';
std::vector<EQ::Any> args;
args.push_back(inst);
args.push_back(this);
if (parse->EventPlayer(EVENT_LOOT, client, buf, 0, &args) != 0) {
lootitem->auto_loot = -1;
client->MessageString(Chat::Red, LOOT_NOT_ALLOWED, inst->GetItem()->Name);
client->QueuePacket(app);
delete inst;
return;
}
if (!IsPlayerCorpse())
{
// dynamic zones may prevent looting by non-members or based on lockouts
auto dz = zone->GetDynamicZone();
if (dz && !dz->CanClientLootCorpse(client, GetNPCTypeID(), GetID()))
{
// note on live this message is only sent once on the first loot attempt of an open corpse
client->MessageString(Chat::Loot, LOOT_NOT_ALLOWED, inst->GetItem()->Name);
lootitem->auto_loot = -1; // generates client eqstr 1370 "You may not loot that item from this corpse."
client->QueuePacket(app);
delete inst;
return;
}
}
// do we want this to have a fail option too?
parse->EventItem(EVENT_LOOT, client, inst, this, buf, 0);
// safe to ACK now
client->QueuePacket(app);
if (!IsPlayerCorpse() && RuleB(Character, EnableDiscoveredItems)) {
if (client && !client->GetGM() && !client->IsDiscovered(inst->GetItem()->ID))
client->DiscoverItem(inst->GetItem()->ID);
}
if (zone->adv_data) {
ServerZoneAdventureDataReply_Struct *ad = (ServerZoneAdventureDataReply_Struct *)zone->adv_data;
if (ad->type == Adventure_Collect && !IsPlayerCorpse()) {
if (ad->data_id == inst->GetItem()->ID) {
zone->DoAdventureCountIncrease();
}
}
}
/* First add it to the looter - this will do the bag contents too */
if (lootitem->auto_loot > 0) {
if (!client->AutoPutLootInInventory(*inst, true, true, bag_item_data))
client->PutLootInInventory(EQ::invslot::slotCursor, *inst, bag_item_data);
}
else {
client->PutLootInInventory(EQ::invslot::slotCursor, *inst, bag_item_data);
}
/* Update any tasks that have an activity to loot this item */
if (RuleB(TaskSystem, EnableTaskSystem))
client->UpdateTasksForItem(TaskActivityType::Loot, item->ID);
/* Remove it from Corpse */
if (item_data) {
/* Delete needs to be before RemoveItem because its deletes the pointer for
* item_data/bag_item_data */
database.DeleteItemOffCharacterCorpse(this->corpse_db_id, item_data->equip_slot,
item_data->item_id);
/* Delete Item Instance */
RemoveItem(item_data->lootslot);
}
/* Remove Bag Contents */
if (item->IsClassBag() && (GetPlayerKillItem() != -1 || GetPlayerKillItem() != 1)) {
for (int i = EQ::invbag::SLOT_BEGIN; i <= EQ::invbag::SLOT_END; i++) {
if (bag_item_data[i]) {
/* Delete needs to be before RemoveItem because its deletes the pointer for
* item_data/bag_item_data */
database.DeleteItemOffCharacterCorpse(this->corpse_db_id,
bag_item_data[i]->equip_slot,
bag_item_data[i]->item_id);
/* Delete Item Instance */
RemoveItem(bag_item_data[i]);
}
}
}
if (GetPlayerKillItem() != -1) {
SetPlayerKillItemID(0);
}
/* Send message with item link to groups and such */
EQ::SayLinkEngine linker;
linker.SetLinkType(EQ::saylink::SayLinkItemInst);
linker.SetItemInst(inst);
linker.GenerateLink();
client->MessageString(Chat::Loot, LOOTED_MESSAGE, linker.Link().c_str());
if (!IsPlayerCorpse()) {
Group *g = client->GetGroup();
if (g != nullptr) {
g->GroupMessageString(client, Chat::Loot, OTHER_LOOTED_MESSAGE,
client->GetName(), linker.Link().c_str());
}
else {
Raid *r = client->GetRaid();
if (r != nullptr) {
r->RaidMessageString(client, Chat::Loot, OTHER_LOOTED_MESSAGE,
client->GetName(), linker.Link().c_str());
}
}
}
}
else {
SendEndLootErrorPacket(client);
safe_delete(inst);
return;
}
if (IsPlayerCorpse()) {
client->SendItemLink(inst);
}
else {
client->SendItemLink(inst, true);
}
safe_delete(inst);
}
void Corpse::EndLoot(Client* client, const EQApplicationPacket* app) {
auto outapp = new EQApplicationPacket;
outapp->SetOpcode(OP_LootComplete);
outapp->size = 0;
client->QueuePacket(outapp);
safe_delete(outapp);
this->being_looted_by = 0xFFFFFFFF;
if (this->IsEmpty())
Delete();
else
Save();
}
void Corpse::FillSpawnStruct(NewSpawn_Struct* ns, Mob* ForWho) {
Mob::FillSpawnStruct(ns, ForWho);
ns->spawn.max_hp = 120;
ns->spawn.NPC = 2;
UpdateActiveLight();
ns->spawn.light = m_Light.Type[EQ::lightsource::LightActive];
}
void Corpse::QueryLoot(Client* to) {
int x = 0, y = 0; // x = visible items, y = total items
to->Message(Chat::White, "Coin: %ip, %ig, %is, %ic", platinum, gold, silver, copper);
ItemList::iterator cur,end;
cur = itemlist.begin();
end = itemlist.end();
int corpselootlimit = to->GetInv().GetLookup()->InventoryTypeSize.Corpse;
for(; cur != end; ++cur) {
ServerLootItem_Struct* sitem = *cur;
if (IsPlayerCorpse()) {
if (sitem->equip_slot >= EQ::invbag::GENERAL_BAGS_BEGIN && sitem->equip_slot <= EQ::invbag::CURSOR_BAG_END)
sitem->lootslot = 0xFFFF;
else
x < corpselootlimit ? sitem->lootslot = x : sitem->lootslot = 0xFFFF;
const EQ::ItemData* item = database.GetItem(sitem->item_id);
if (item)
to->Message((sitem->lootslot == 0xFFFF), "LootSlot: %i (EquipSlot: %i) Item: %s (%d), Count: %i", static_cast<int16>(sitem->lootslot), sitem->equip_slot, item->Name, item->ID, sitem->charges);
else
to->Message((sitem->lootslot == 0xFFFF), "Error: 0x%04x", sitem->item_id);
if (sitem->lootslot != 0xFFFF)
x++;
y++;
}
else {
sitem->lootslot=y;
const EQ::ItemData* item = database.GetItem(sitem->item_id);
if (item)
to->Message(Chat::White, "LootSlot: %i Item: %s (%d), Count: %i", sitem->lootslot, item->Name, item->ID, sitem->charges);
else
to->Message(Chat::White, "Error: 0x%04x", sitem->item_id);
y++;
}
}
if (IsPlayerCorpse()) {
to->Message(Chat::White, "%i visible %s (%i total) on %s (DBID: %i).", x, x==1?"item":"items", y, this->GetName(), this->GetCorpseDBID());
}
else {
to->Message(Chat::White, "%i %s on %s.", y, y==1?"item":"items", this->GetName());
}
}
bool Corpse::HasItem(uint32 item_id) {
if (!database.GetItem(item_id)) {
return false;
}
for (auto current_item = itemlist.begin(); current_item != itemlist.end(); ++current_item) {
ServerLootItem_Struct* loot_item = *current_item;
if (!loot_item) {
LogError("Corpse::HasItem() - ItemList error, null item");
continue;
}
if (!loot_item->item_id || !database.GetItem(loot_item->item_id)) {
LogError("Corpse::HasItem() - Database error, invalid item");
continue;
}
if (loot_item->item_id == item_id) {
return true;
}
}
return false;
}
uint16 Corpse::CountItem(uint32 item_id) {
uint16 item_count = 0;
if (!database.GetItem(item_id)) {
return item_count;
}
for (auto current_item = itemlist.begin(); current_item != itemlist.end(); ++current_item) {
ServerLootItem_Struct* loot_item = *current_item;
if (!loot_item) {
LogError("Corpse::CountItem() - ItemList error, null item");
continue;
}
if (!loot_item->item_id || !database.GetItem(loot_item->item_id)) {
LogError("Corpse::CountItem() - Database error, invalid item");
continue;
}
if (loot_item->item_id == item_id) {
item_count += loot_item->charges;
}
}
return item_count;
}
uint32 Corpse::GetItemIDBySlot(uint16 loot_slot) {
for (auto current_item = itemlist.begin(); current_item != itemlist.end(); ++current_item) {
ServerLootItem_Struct* loot_item = *current_item;
if (loot_item->lootslot == loot_slot) {
return loot_item->item_id;
}
}
return 0;
}
uint16 Corpse::GetFirstSlotByItemID(uint32 item_id) {
for (auto current_item = itemlist.begin(); current_item != itemlist.end(); ++current_item) {
ServerLootItem_Struct* loot_item = *current_item;
if (loot_item->item_id == item_id) {
return loot_item->lootslot;
}
}
return 0;
}
bool Corpse::Summon(Client* client, bool spell, bool CheckDistance) {
uint32 dist2 = 10000; // pow(100, 2);
if (!spell) {
if (this->GetCharID() == client->CharacterID()) {
if (IsLocked() && client->Admin() < 100) {
client->Message(Chat::Red, "That corpse is locked by a GM.");
return false;
}
if (!CheckDistance || (DistanceSquaredNoZ(m_Position, client->GetPosition()) <= dist2)) {
GMMove(client->GetX(), client->GetY(), client->GetZ());
is_corpse_changed = true;
}
else {
client->MessageString(Chat::Red, CORPSE_TOO_FAR);
return false;
}
}
else
{
bool consented = false;
for (const auto& consented_player_name : consented_player_names) {
if (strcasecmp(client->GetName(), consented_player_name.c_str()) == 0) {
consented = true;
break;
}
}
if (!consented && consented_guild_id && consented_guild_id != GUILD_NONE) {
if (client->GuildID() == consented_guild_id) {
consented = true;
}
}
if (!consented && consented_group_id) {
Group* grp = client->GetGroup();
if (grp && grp->GetID() == consented_group_id) {
consented = true;
}
}
if (!consented && consented_raid_id) {
Raid* raid = client->GetRaid();
if (raid && raid->GetID() == consented_raid_id) {
consented = true;
}
}
if (consented) {
if (!CheckDistance || (DistanceSquaredNoZ(m_Position, client->GetPosition()) <= dist2)) {
GMMove(client->GetX(), client->GetY(), client->GetZ());
is_corpse_changed = true;
}
else {
client->MessageString(Chat::Red, CORPSE_TOO_FAR);
return false;
}
}
else {
client->MessageString(Chat::Red, CONSENT_DENIED);
return false;
}
}
}
else {
GMMove(client->GetX(), client->GetY(), client->GetZ());
is_corpse_changed = true;
}
Save();
return true;
}
void Corpse::CompleteResurrection(){
rez_experience = 0;
is_corpse_changed = true;
this->Save();
}
void Corpse::Spawn() {
auto app = new EQApplicationPacket;
this->CreateSpawnPacket(app, this);
entity_list.QueueClients(this, app);
safe_delete(app);
}
uint32 Corpse::GetEquippedItemFromTextureSlot(uint8 material_slot) const {
int16 invslot;
if (material_slot > EQ::textures::LastTexture) {
return 0;
}
invslot = EQ::InventoryProfile::CalcSlotFromMaterial(material_slot);
if(invslot == INVALID_INDEX) // GetWornItem() should be returning a 0 for any invalid index...
return 0;
return GetWornItem(invslot);
}
uint32 Corpse::GetEquipmentColor(uint8 material_slot) const {
const EQ::ItemData *item = nullptr;
if (material_slot > EQ::textures::LastTexture) {
return 0;
}
item = database.GetItem(GetEquippedItemFromTextureSlot(material_slot));
if(item != 0) {
return (item_tint.Slot[material_slot].UseTint ? item_tint.Slot[material_slot].Color : item->Color);
}
return 0;
}
void Corpse::UpdateEquipmentLight()
{
m_Light.Type[EQ::lightsource::LightEquipment] = 0;
m_Light.Level[EQ::lightsource::LightEquipment] = 0;
for (auto iter = itemlist.begin(); iter != itemlist.end(); ++iter) {
if ((*iter)->equip_slot < EQ::invslot::EQUIPMENT_BEGIN || (*iter)->equip_slot > EQ::invslot::EQUIPMENT_END) { continue; }
if ((*iter)->equip_slot == EQ::invslot::slotAmmo) { continue; }
auto item = database.GetItem((*iter)->item_id);
if (item == nullptr) { continue; }
if (EQ::lightsource::IsLevelGreater(item->Light, m_Light.Type[EQ::lightsource::LightEquipment]))
m_Light.Type[EQ::lightsource::LightEquipment] = item->Light;
}
uint8 general_light_type = 0;
for (auto iter = itemlist.begin(); iter != itemlist.end(); ++iter) {
if ((*iter)->equip_slot < EQ::invslot::GENERAL_BEGIN || (*iter)->equip_slot > EQ::invslot::GENERAL_END) { continue; }
auto item = database.GetItem((*iter)->item_id);
if (item == nullptr) { continue; }
if (!item->IsClassCommon()) { continue; }
if (item->Light < 9 || item->Light > 13) { continue; }
if (EQ::lightsource::TypeToLevel(item->Light))
general_light_type = item->Light;
}
if (EQ::lightsource::IsLevelGreater(general_light_type, m_Light.Type[EQ::lightsource::LightEquipment]))
m_Light.Type[EQ::lightsource::LightEquipment] = general_light_type;
m_Light.Level[EQ::lightsource::LightEquipment] = EQ::lightsource::TypeToLevel(m_Light.Type[EQ::lightsource::LightEquipment]);
}
void Corpse::AddLooter(Mob* who) {
for (int i = 0; i < MAX_LOOTERS; i++) {
if (allowed_looters[i] == 0) {
allowed_looters[i] = who->CastToClient()->CharacterID();
break;
}
}
}
void Corpse::LoadPlayerCorpseDecayTime(uint32 corpse_db_id){
if(!corpse_db_id)
return;
uint32 active_corpse_decay_timer = database.GetCharacterCorpseDecayTimer(corpse_db_id);
if (active_corpse_decay_timer > 0 && RuleI(Character, CorpseDecayTimeMS) > (active_corpse_decay_timer * 1000)) {
corpse_decay_timer.SetTimer(RuleI(Character, CorpseDecayTimeMS) - (active_corpse_decay_timer * 1000));
}
else {
corpse_decay_timer.SetTimer(2000);
}
if (active_corpse_decay_timer > 0 && RuleI(Zone, GraveyardTimeMS) > (active_corpse_decay_timer * 1000)) {
corpse_graveyard_timer.SetTimer(RuleI(Zone, GraveyardTimeMS) - (active_corpse_decay_timer * 1000));
}
else {
corpse_graveyard_timer.SetTimer(3000);
}
}
void Corpse::SendWorldSpawnPlayerCorpseInZone(uint32_t zone_id)
{
auto pack = std::make_unique<ServerPacket>(ServerOP_SpawnPlayerCorpse, sizeof(SpawnPlayerCorpse_Struct));
SpawnPlayerCorpse_Struct* spc = reinterpret_cast<SpawnPlayerCorpse_Struct*>(pack->pBuffer);
spc->player_corpse_id = corpse_db_id;
spc->zone_id = zone_id;
worldserver.SendPacket(pack.get());
}
bool Corpse::MovePlayerCorpseToGraveyard()
{
if (IsPlayerCorpse() && zone && zone->HasGraveyard())
{
Save();
uint16_t instance_id = (zone->GetZoneID() == zone->graveyard_zoneid()) ? zone->GetInstanceID() : 0;
database.SendCharacterCorpseToGraveyard(corpse_db_id, zone->graveyard_zoneid(), instance_id, zone->GetGraveyardPoint());
SendWorldSpawnPlayerCorpseInZone(zone->graveyard_zoneid());
corpse_db_id = 0;
player_corpse_depop = true;
corpse_graveyard_timer.Disable();
LogDebug("Moved [{}] player corpse to the designated graveyard in zone [{}]", GetName(), ZoneName(zone->graveyard_zoneid()));
return true;
}
return false;
}
bool Corpse::MovePlayerCorpseToNonInstance()
{
if (IsPlayerCorpse() && zone && zone->GetInstanceID() != 0)
{
Save();
database.SendCharacterCorpseToNonInstance(corpse_db_id);
SendWorldSpawnPlayerCorpseInZone(zone->GetZoneID());
corpse_db_id = 0;
player_corpse_depop = true;
corpse_graveyard_timer.Disable();
LogDebug("Moved [{}] player corpse to non-instance version of zone [{}]", GetName(), ZoneName(zone->GetZoneID()));
return true;
}
return false;
}
std::vector<int> Corpse::GetLootList() {
std::vector<int> corpse_items;
for (auto current_item = itemlist.begin(); current_item != itemlist.end(); ++current_item) {
ServerLootItem_Struct* loot_item = *current_item;
if (!loot_item) {
LogError("Corpse::GetLootList() - ItemList error, null item");
continue;
}
if (std::find(corpse_items.begin(), corpse_items.end(), loot_item->item_id) != corpse_items.end()) {
continue;
}
corpse_items.push_back(loot_item->item_id);
}
return corpse_items;
}
| 1 | 10,872 | Please remove this. | EQEmu-Server | cpp |
@@ -15,6 +15,7 @@ class Organization::Affiliated < Organization::AccountFacts
accounts = @organization.accounts.joins([:person, :positions])
accounts = accounts.group('accounts.id, people.kudo_position').order('kudo_position nulls last')
accounts.paginate(per_page: limit, page: page)
+ Account.paginate_by_sql(accounts.to_sql, per_page: limit, page: page)
end
def projects(page = 1, limit = 10) | 1 | class Organization::Affiliated < Organization::AccountFacts
def initialize(organization)
@organization = organization
end
def stats
Organization.connection.select_one <<-SQL
SELECT #{Organization.send(:sanitize_sql, selects)}
FROM accounts A #{Organization.send(:sanitize_sql, account_facts_joins)}
WHERE A.organization_id = #{@organization.id};
SQL
end
def committers(page = 1, limit = 10)
accounts = @organization.accounts.joins([:person, :positions])
accounts = accounts.group('accounts.id, people.kudo_position').order('kudo_position nulls last')
accounts.paginate(per_page: limit, page: page)
end
def projects(page = 1, limit = 10)
@organization.projects.order('projects.user_count DESC').paginate(per_page: limit, page: page)
end
private
def selects
<<-SQL
#{affl_committers} AS affl_committers,
#{affl_commits} AS affl_commits,
#{affl_projects} AS affl_projects,
#{affl_committers_out} AS affl_committers_out,
#{affl_commits_out} AS affl_commits_out,
#{affl_projects_out} AS affl_projects_out
SQL
end
def affl_committers
"COUNT(DISTINCT CASE WHEN P.organization_id = #{@organization.id} THEN A.id END)"
end
def affl_commits
"COALESCE(SUM(CASE WHEN P.organization_id = #{@organization.id} THEN NF.commits ELSE 0 END), 0)"
end
def affl_projects
"COUNT(DISTINCT CASE WHEN P.organization_id = #{@organization.id} THEN P.id END)"
end
def affl_committers_out
"COUNT(DISTINCT CASE WHEN COALESCE(P.organization_id,0) <> #{@organization.id} THEN A.id END)"
end
def affl_commits_out
"COALESCE(SUM(CASE WHEN COALESCE(P.organization_id,0) <> #{@organization.id} THEN NF.commits ELSE 0 END), 0)"
end
def affl_projects_out
"COUNT(DISTINCT CASE WHEN COALESCE(P.organization_id,0) <> #{@organization.id} THEN P.id END)"
end
end
| 1 | 6,963 | Weird bug. Combining those joins and group calls was triggering AREL to generate the sql cache inside of will_paginate before the final call. This is a harmless workaround, but hints that will_paginate might becoming seriously deprecated. | blackducksoftware-ohloh-ui | rb |
@@ -75,6 +75,12 @@ class Analysis < ActiveRecord::Base
end.compact.join(' AND ')
end
+ def allowed_tuples
+ [].tap do |tuples|
+ analysis_sloc_sets.each { |analysis_sloc_set| tuples << analysis_sloc_set.allowed_tuples }
+ end.compact.join(' AND ')
+ end
+
def angle
(Math.atan(hotness_score) * 180 / Math::PI).round(3)
end | 1 | # frozen_string_literal: true
class Analysis < ActiveRecord::Base
include Analysis::Report
AVG_SALARY = 55_000
EARLIEST_DATE = Time.utc(1971, 1, 1)
EARLIEST_DATE_SQL_STRING = "TIMESTAMP '#{EARLIEST_DATE.strftime('%Y-%m-%d')}'"
ACTIVITY_LEVEL_INDEX_MAP = {
na: 0, new: 10, inactive: 20, very_low: 30, low: 40, moderate: 50, high: 60, very_high: 70
}.freeze
has_one :all_time_summary
has_one :thirty_day_summary
has_one :twelve_month_summary
has_one :previous_twelve_month_summary
has_many :analysis_summaries
has_many :analysis_aliases
has_many :contributor_facts, class_name: 'ContributorFact'
has_many :analysis_sloc_sets, dependent: :delete_all
has_many :sloc_sets, through: :analysis_sloc_sets
has_many :factoids, -> { order('severity DESC') }, dependent: :delete_all
has_many :activity_facts, dependent: :delete_all
belongs_to :project
belongs_to :main_language, class_name: 'Language', foreign_key: :main_language_id
scope :fresh, -> { where(Analysis.arel_table[:created_at].gt(Time.current - 2.days)) }
scope :hot, -> { where.not(hotness_score: nil).order(hotness_score: :desc) }
scope :for_lang, ->(lang_id) { where(main_language_id: lang_id) }
attr_accessor :ticks
def twelve_month_summary
super || NilAnalysisSummary.new
end
def previous_twelve_month_summary
super || NilAnalysisSummary.new
end
def activity_level
return :na if no_analysis? || old_analysis?
return :new if new_first_commit?
return :inactive if old_last_commit?
return :very_low if too_small_team?
convert_activity_score
end
def code_total
logic_total.to_i + markup_total.to_i + build_total.to_i
end
def man_years
man_years_from_loc(markup_total) + man_years_from_loc(logic_total) + man_years_from_loc(build_total)
end
def empty?
min_month.nil? || code_total.zero?
end
def cocomo_value(avg_salary = AVG_SALARY)
(man_years * avg_salary).to_i
end
def man_years_from_loc(loc = 0)
loc.positive? ? 2.4 * ((loc.to_f / 1000.0)**1.05) / 12.0 : 0
end
def ignore_tuples
[].tap do |tuples|
analysis_sloc_sets.each do |analysis_sloc_set|
tuples << analysis_sloc_set.ignore_tuples
end
end.compact.join(' AND ')
end
def angle
(Math.atan(hotness_score) * 180 / Math::PI).round(3)
end
class << self
def fresh_and_hot(lang_id = nil)
fnh = Analysis.fresh.hot
fnh = fnh.for_lang(lang_id) unless lang_id.nil?
fnh
end
end
private
def no_analysis?
(updated_on.nil? || first_commit_time.nil? || last_commit_time.nil? || headcount.nil? || empty?)
end
def old_analysis?
updated_on < Time.current - 30.days
end
def new_first_commit?
first_commit_time > Time.current - 12.months
end
def old_last_commit?
last_commit_time < Time.current - 24.months
end
def too_small_team?
headcount == 1
end
def convert_activity_score
case activity_score
when 0..204_933 then :very_low
when 204_934..875_012 then :low
when 875_013..4_686_315 then :moderate
when 4_686_316..13_305_163 then :high
else :very_high
end
end
end
| 1 | 9,518 | This can be simplified as discussed before. | blackducksoftware-ohloh-ui | rb |
@@ -1,7 +1,14 @@
package execute
-import "github.com/influxdata/flux"
+import (
+ "github.com/influxdata/flux"
+ "github.com/influxdata/flux/execute/table"
+)
func NewProcessMsg(tbl flux.Table) ProcessMsg {
return &processMsg{table: tbl}
}
+
+func NewProcessChunkMsg(chunk table.Chunk) ProcessChunkMsg {
+ return &processChunkMsg{chunk: chunk}
+} | 1 | package execute
import "github.com/influxdata/flux"
func NewProcessMsg(tbl flux.Table) ProcessMsg {
return &processMsg{table: tbl}
}
| 1 | 16,487 | Does `internal` or `test` in the file name actually do anything here? Or is that just to show these functions are only for tests? | influxdata-flux | go |
@@ -12,7 +12,7 @@ describe('Transaction deserialization', function() {
vectors_valid.forEach(function(vector) {
if (vector.length > 1) {
var hexa = vector[1];
- Transaction(hexa).serialize().should.equal(hexa);
+ Transaction(hexa).serialize(true).should.equal(hexa);
index++;
}
}); | 1 | 'use strict';
var Transaction = require('../../lib/transaction');
var vectors_valid = require('../data/bitcoind/tx_valid.json');
var vectors_invalid = require('../data/bitcoind/tx_invalid.json');
describe('Transaction deserialization', function() {
describe('valid transaction test case', function() {
var index = 0;
vectors_valid.forEach(function(vector) {
if (vector.length > 1) {
var hexa = vector[1];
Transaction(hexa).serialize().should.equal(hexa);
index++;
}
});
});
describe('invalid transaction test case', function() {
var index = 0;
vectors_invalid.forEach(function(vector) {
if (vector.length > 1) {
var hexa = vector[1];
Transaction(hexa).serialize().should.equal(hexa);
index++;
}
});
});
});
| 1 | 13,751 | does this boolean indicate unsafe serialization? | bitpay-bitcore | js |
@@ -332,6 +332,13 @@ class Realm {
*/
static deleteFile(config) {}
+ /**
+ * Copy bundled Realm files to app's default file folder.
+ * This is not implemented for node.js.
+ * @throws {Error} If an I/O error occured or method is not implemented.
+ */
+ static copyBundledRealmFiles() {}
+
/**
* Get a list of subscriptions. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - Optional parameter to query for either a specific name or pattern (using | 1 | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 2016 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
/**
* A Realm instance represents a Realm database.
*
* ```js
* const Realm = require('realm');
* ```
*
*/
class Realm {
/**
* Indicates if this Realm contains any objects.
* @type {boolean}
* @readonly
* @since 1.10.0
*/
get empty() {}
/**
* The path to the file where this Realm is stored.
* @type {string}
* @readonly
* @since 0.12.0
*/
get path() {}
/**
* Indicates if this Realm was opened as read-only.
* @type {boolean}
* @readonly
* @since 0.12.0
*/
get readOnly() {}
/**
* A normalized representation of the schema provided in the
* {@link Realm~Configuration Configuration} when this Realm was constructed.
* @type {Realm~ObjectSchema[]}
* @readonly
* @since 0.12.0
*/
get schema() {}
/**
* The current schema version of this Realm.
* @type {number}
* @readonly
* @since 0.12.0
*/
get schemaVersion() {}
/**
* Indicates if this Realm is in a write transaction.
* @type {boolean}
* @readonly
* @since 1.10.3
*/
get isInTransaction() {}
/**
* Indicates if this Realm has been closed.
* @type {boolean}
* @readonly
* @since 2.1.0
*/
get isClosed() {}
/**
* Gets the sync session if this is a synced Realm
* @type {Session}
*/
get syncSession() {}
/**
* Create a new `Realm` instance using the provided `config`. If a Realm does not yet exist
* at `config.path` (or {@link Realm.defaultPath} if not provided), then this constructor
* will create it with the provided `config.schema` (which is _required_ in this case).
* Otherwise, the instance will access the existing Realm from the file at that path.
* In this case, `config.schema` is _optional_ or not have changed, unless
* `config.schemaVersion` is incremented, in which case the Realm will be automatically
* migrated to use the new schema.
* In the case of query-based sync, `config.schema` is required. An exception will be
* thrown if `config.schema` is not defined.
* @param {Realm~Configuration} [config] - **Required** when first creating the Realm.
* @throws {Error} If anything in the provided `config` is invalid.
* @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened
*/
constructor(config) {}
/**
* Open a Realm asynchronously with a promise. If the Realm is synced, it will be fully
* synchronized before it is available.
* In the case of query-based sync, `config.schema` is required. An exception will be
* thrown if `config.schema` is not defined.
* @param {Realm~Configuration} config - if no config is defined, it will open the default realm
* @returns {ProgressPromise} - a promise that will be resolved with the Realm instance when it's available.
* @throws {Error} If anything in the provided `config` is invalid.
*/
static open(config) {}
/**
* Open a Realm asynchronously with a callback. If the Realm is synced, it will be fully
* synchronized before it is available.
* @param {Realm~Configuration} config
* @param {callback(error, realm)} - will be called when the Realm is ready.
* @param {callback(transferred, transferable)} [progressCallback] - an optional callback for download progress notifications
* @throws {Error} If anything in the provided `config` is invalid
* @throws {IncompatibleSyncedRealmError} when an incompatible synced Realm is opened
*/
static openAsync(config, callback, progressCallback) {}
/**
* Return a configuration for a default synced Realm. The server URL for the user will be used as base for
* the URL for the synced Realm. If no user is supplied, the current user will be used.
* @param {Realm.Sync.User} - an optional sync user
* @throws {Error} if zero or multiple users are logged in
* @returns {Realm~Configuration} - a configuration matching a default synced Realm.
* @since 2.3.0
* @deprecated use {@link Sync.User.createConfiguration()} instead.
*/
static automaticSyncConfiguration(user) {}
/**
* Creates a template object for a Realm model class where all optional fields are `undefined` and all required
* fields have the default value for the given data type, either the value set by the `default` property in the
* schema or the default value for the datatype if the schema doesn't specify one, i.e. `0`, false and `""`.
*
* @param {Realm~ObjectSchema} schema object describing the class
*/
static createTemplateObject(objectSchema) {}
/**
* Closes this Realm so it may be re-opened with a newer schema version.
* All objects and collections from this Realm are no longer valid after calling this method.
*/
close() {}
/**
* Returns the granted privileges.
*
* This combines all privileges granted on the Realm/Class/Object by all Roles which
* the current User is a member of into the final privileges which will
* be enforced by the server.
*
* The privilege calculation is done locally using cached data, and inherently may
* be stale. It is possible that this method may indicate that an operation is
* permitted but the server will still reject it if permission is revoked before
* the changes have been integrated on the server.
*
* Non-synchronized Realms always have permission to perform all operations.
*
* @param {(Realm~ObjectType|Realm.Object)} arg - the object type or the object to compute privileges from. If no
* argument is given, the privileges for the Realm is returned.
* @returns {Object} as the computed privileges as properties
* @since 2.3.0
* @see {Realm.Permissions} for details of privileges and roles.
*/
privileges(arg) {}
/**
* Returns the fine-grained permissions object associated with either the Realm itself or a Realm model class.
*
* @param {Realm~ObjectType} [arg] - If no argument is provided, the Realm-level permissions are returned.
* Otherwise, the Class-level permissions for the provided type is returned.
* @returns {Object} The permissions object
* @since 2.18.0
* @see {Realm.Permissions} for details of priviliges and roles.
*/
permissions(arg) {}
/**
* Create a new Realm object of the given type and with the specified properties.
* @param {Realm~ObjectType} type - The type of Realm object to create.
* @param {Object} properties - Property values for all required properties without a
* default value.
* @param {boolean} [update=false] - Signals that an existing object with matching primary key
* should be updated. Only the primary key property and properties which should be updated
* need to be specified. All missing property values will remain unchanged.
* @returns {Realm.Object}
*/
create(type, properties, update) {}
/**
* Deletes the provided Realm object, or each one inside the provided collection.
* @param {Realm.Object|Realm.Object[]|Realm.List|Realm.Results} object
*/
delete(object) {}
/**
* Deletes a Realm model, including all of its objects.
* @param {string} name - the model name
*/
deleteModel(name) {}
/**
* **WARNING:** This will delete **all** objects in the Realm!
*/
deleteAll() {}
/**
* Returns all objects of the given `type` in the Realm.
* @param {Realm~ObjectType} type - The type of Realm objects to retrieve.
* @throws {Error} If type passed into this method is invalid.
* @returns {Realm.Results} that will live-update as objects are created and destroyed.
*/
objects(type) {}
/**
* Searches for a Realm object by its primary key.
* @param {Realm~ObjectType} type - The type of Realm object to search for.
* @param {number|string} key - The primary key value of the object to search for.
* @throws {Error} If type passed into this method is invalid or if the object type did
* not have a `primaryKey` specified in its {@link Realm~ObjectSchema ObjectSchema}.
* @returns {Realm.Object|undefined} if no object is found.
* @since 0.14.0
*/
objectForPrimaryKey(type, key) {}
/**
* Add a listener `callback` for the specified event `name`.
* @param {string} name - The name of event that should cause the callback to be called.
* _Currently, only the "change" and "schema" events are supported_.
* @param {callback(Realm, string)|callback(Realm, string, Schema)} callback - Function to be called when a change event occurs.
* Each callback will only be called once per event, regardless of the number of times
* it was added.
* @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
*/
addListener(name, callback) {}
/**
* Remove the listener `callback` for the specfied event `name`.
* @param {string} name - The event name.
* _Currently, only the "change" and "schema" events are supported_.
* @param {callback(Realm, string)|callback(Realm, string, Schema)} callback - Function that was previously added as a
* listener for this event through the {@link Realm#addListener addListener} method.
* @throws {Error} If an invalid event `name` is supplied, or if `callback` is not a function.
*/
removeListener(name, callback) {}
/**
* Remove all event listeners (restricted to the event `name`, if provided).
* @param {string} [name] - The name of the event whose listeners should be removed.
* _Currently, only the "change" and "schema" events are supported_.
* @throws {Error} When invalid event `name` is supplied
*/
removeAllListeners(name) {}
/**
* Synchronously call the provided `callback` inside a write transaction.
* @param {function()} callback
*/
write(callback) {}
/**
* Initiate a write transaction.
* @throws {Error} When already in write transaction
*/
beginTransaction() {}
/**
* Commit a write transaction.
*/
commitTransaction() {}
/**
* Cancel a write transaction.
*/
cancelTransaction() {}
/**
* Replaces all string columns in this Realm with a string enumeration column and compacts the
* database file.
*
* Cannot be called from a write transaction.
*
* Compaction will not occur if other `Realm` instances exist.
*
* While compaction is in progress, attempts by other threads or processes to open the database will
* wait.
*
* Be warned that resource requirements for compaction is proportional to the amount of live data in
* the database. Compaction works by writing the database contents to a temporary database file and
* then replacing the database with the temporary one.
* @returns {true} if compaction succeeds.
*/
compact() {}
/**
* Writes a compacted copy of the Realm to the given path.
*
* The destination file cannot already exist.
*
* Note that if this method is called from within a write transaction, the current data is written,
* not the data from the point when the previous write transaction was committed.
* @param {string} path path to save the Realm to
* @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Optional 64-byte encryption key to encrypt the new file with.
*/
writeCopyTo(path, encryptionKey) {}
/**
* Get the current schema version of the Realm at the given path.
* @param {string} path - The path to the file where the
* Realm database is stored.
* @param {ArrayBuffer|ArrayBufferView} [encryptionKey] - Required only when
* accessing encrypted Realms.
* @throws {Error} When passing an invalid or non-matching encryption key.
* @returns {number} version of the schema, or `-1` if no Realm exists at `path`.
*/
static schemaVersion(path, encryptionKey) {}
/**
* Delete the Realm file for the given configuration.
* @param {Realm~Configuration} config
* @throws {Error} If anything in the provided `config` is invalid.
*/
static deleteFile(config) {}
/**
* Get a list of subscriptions. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - Optional parameter to query for either a specific name or pattern (using
* cards `?` and `*`).
* @throws {Error} If `name` is not a string.
* @returns an array of objects of (`name`, `objectType`, `query`).
*/
subscriptions(name) {}
/**
* Unsubscribe a named subscription. THIS METHOD IS IN BETA AND MAY CHANGE IN FUTURE VERSIONS.
* @param {string} name - The name of the subscription.
* @throws {Error} If `name` is not a string or an empty string.
*/
unsubscribe(name) {}
}
/**
* This describes the different options used to create a {@link Realm} instance.
* @typedef Realm~Configuration
* @type {Object}
* @property {ArrayBuffer|ArrayBufferView} [encryptionKey] - The 512-bit (64-byte) encryption
* key used to encrypt and decrypt all data in the Realm.
* @property {callback(Realm, Realm)} [migration] - The function to run if a migration is needed.
* This function should provide all the logic for converting data models from previous schemas
* to the new schema.
* This function takes two arguments:
* - `oldRealm` - The Realm before migration is performed.
* - `newRealm` - The Realm that uses the latest `schema`, which should be modified as necessary.
* @property {boolean} [deleteRealmIfMigrationNeeded=false] - Specifies if this Realm should be deleted
* if a migration is needed.
* @property {callback(number, number)} [shouldCompactOnLaunch] - The function called when opening
* a Realm for the first time during the life of a process to determine if it should be compacted
* before being returned to the user. The function takes two arguments:
* - `totalSize` - The total file size (data + free space)
* - `usedSize` - The total bytes used by data in the file.
* It returns `true` to indicate that an attempt to compact the file should be made. The compaction
* will be skipped if another process is accessing it.
* @property {string} [path={@link Realm.defaultPath}] - The path to the file where the
* Realm database should be stored.
* @property {boolean} [inMemory=false] - Specifies if this Realm should be opened in-memory. This
* still requires a path (can be the default path) to identify the Realm so other processes can
* open the same Realm. The file will also be used as swap space if the Realm becomes bigger than
* what fits in memory, but it is not persistent and will be removed when the last instance
* is closed.
* @property {boolean} [readOnly=false] - Specifies if this Realm should be opened as read-only.
* @property {boolean} [disableFormatUpgrade=false] - Specifies if this Realm's file format should
* be automatically upgraded if it was created with an older version of the Realm library.
* If set to `true` and a file format upgrade is required, an error will be thrown instead.
* @property {Array<Realm~ObjectClass|Realm~ObjectSchema>} [schema] - Specifies all the
* object types in this Realm. **Required** when first creating a Realm at this `path`.
* If omitted, the schema will be read from the existing Realm file.
* @property {number} [schemaVersion] - **Required** (and must be incremented) after
* changing the `schema`.
* @property {Realm.Sync~SyncConfiguration} [sync] - Sync configuration parameters.
*/
/**
* Realm objects will inherit methods, getters, and setters from the `prototype` of this
* constructor. It is **highly recommended** that this constructor inherit from
* {@link Realm.Object}.
* @typedef Realm~ObjectClass
* @type {Class}
* @property {Realm~ObjectSchema} schema - Static property specifying object schema information.
*/
/**
* @typedef Realm~ObjectSchema
* @type {Object}
* @property {string} name - Represents the object type.
* @property {string} [primaryKey] - The name of a `"string"` or `"int"` property
* that must be unique across all objects of this type within the same Realm.
* @property {Object<string, (Realm~PropertyType|Realm~ObjectSchemaProperty)>} properties -
* An object where the keys are property names and the values represent the property type.
*
* @example
* let MyClassSchema = {
* name: 'MyClass',
* primaryKey: 'pk',
* properties: {
* pk: 'int',
* optionalFloatValue: 'float?' // or {type: 'float', optional: true}
* listOfStrings: 'string[]',
* listOfOptionalDates: 'date?[]',
* indexedInt: {type: 'int', indexed: true}
*
* linkToObject: 'MyClass',
* listOfObjects: 'MyClass[]', // or {type: 'list', objectType: 'MyClass'}
* objectsLinkingToThisObject: {type: 'linkingObjects', objectType: 'MyClass', property: 'linkToObject'}
* }
* };
*/
/**
* @typedef Realm~ObjectSchemaProperty
* @type {Object}
* @property {Realm~PropertyType} type - The type of this property.
* @property {Realm~PropertyType} [objectType] - **Required** when `type` is `"list"` or `"linkingObjects"`,
* and must match the type of an object in the same schema, or, for `"list"`
* only, any other type which may be stored as a Realm property.
* @property {string} [property] - **Required** when `type` is `"linkingObjects"`, and must match
* the name of a property on the type specified in `objectType` that links to the type this property belongs to.
* @property {any} [default] - The default value for this property on creation when not
* otherwise specified.
* @property {boolean} [optional] - Signals if this property may be assigned `null` or `undefined`.
* For `"list"` properties of non-object types, this instead signals whether the values inside the list may be assigned `null` or `undefined`.
* This is not supported for `"list"` properties of object types and `"linkingObjects"` properties.
* @property {boolean} [indexed] - Signals if this property should be indexed. Only supported for
* `"string"`, `"int"`, and `"bool"` properties.
*/
/**
* The type of an object may either be specified as a string equal to the `name` in a
* {@link Realm~ObjectSchema ObjectSchema} definition, **or** a constructor that was specified
* in the {@link Realm~Configuration configuration} `schema`.
* @typedef Realm~ObjectType
* @type {string|Realm~ObjectClass}
*/
/**
* A property type may be specified as one of the standard builtin types, or as
* an object type inside the same schema.
*
* When specifying property types in an {@linkplain Realm~ObjectSchema object schema}, you
* may append `?` to any of the property types to indicate that it is optional
* (i.e. it can be `null` in addition to the normal values) and `[]` to
* indicate that it is instead a list of that type. For example,
* `optionalIntList: 'int?[]'` would declare a property which is a list of
* nullable integers. The property types reported by {@linkplain Realm.Collection
* collections} and in a Realm's schema will never
* use these forms.
*
* @typedef Realm~PropertyType
* @type {("bool"|"int"|"float"|"double"|"string"|"date"|"data"|"list"|"linkingObjects"|"<ObjectType>")}
*
* @property {boolean} "bool" - Property value may either be `true` or `false`.
* @property {number} "int" - Property may be assigned any number, but will be stored as a
* round integer, meaning anything after the decimal will be truncated.
* @property {number} "float" - Property may be assigned any number, but will be stored as a
* `float`, which may result in a loss of precision.
* @property {number} "double" - Property may be assigned any number, and will have no loss
* of precision.
* @property {string} "string" - Property value may be any arbitrary string.
* @property {Date} "date" - Property may be assigned any `Date` instance.
* @property {ArrayBuffer} "data" - Property may either be assigned an `ArrayBuffer`
* or `ArrayBufferView` (e.g. `DataView`, `Int8Array`, `Float32Array`, etc.) instance,
* but will always be returned as an `ArrayBuffer`.
* @property {Realm.List} "list" - Property may be assigned any ordered collection
* (e.g. `Array`, {@link Realm.List}, {@link Realm.Results}) of objects all matching the
* `objectType` specified in the {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}.
* @property {Realm.Results} "linkingObjects" - Property is read-only and always returns a {@link Realm.Results}
* of all the objects matching the `objectType` that are linking to the current object
* through the `property` relationship specified in {@link Realm~ObjectSchemaProperty ObjectSchemaProperty}.
* @property {Realm.Object} "<ObjectType>" - A string that matches the `name` of an object in the
* same schema (see {@link Realm~ObjectSchema ObjectSchema}) – this property may be assigned
* any object of this type from inside the same Realm, and will always be _optional_
* (meaning it may also be assigned `null` or `undefined`).
*/
| 1 | 17,364 | Maybe rewrite to `Is only implemented for React Native`? (I assume that is the case). | realm-realm-js | js |
@@ -246,8 +246,15 @@ public abstract class PageStreamingConfig {
if (pageSizeField == null) {
// TODO: Conform to design doc spec, once approved, for using non-standard paging fields
// (such as max_results for page_size)
- if (language == TargetLanguage.JAVA && transportProtocol == TransportProtocol.HTTP) {
- pageSizeField = methodModel.getInputField(pagingParams.getNameForMaxResults());
+ if (transportProtocol == TransportProtocol.HTTP) {
+ if (language == TargetLanguage.JAVA) {
+ pageSizeField = methodModel.getInputField(pagingParams.getNameForMaxResults());
+ } else if (language == TargetLanguage.PHP) {
+ FieldModel resourcesField = ProtoPageStreamingTransformer.getResourcesField(methodModel);
+ if (resourcesField != null && !resourcesField.isMap()) {
+ pageSizeField = methodModel.getInputField(pagingParams.getNameForMaxResults());
+ }
+ }
}
}
ProtoField responseTokenField = | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.config;
import com.google.api.codegen.MethodConfigProto;
import com.google.api.codegen.PageStreamingConfigProto;
import com.google.api.codegen.ResourceNameTreatment;
import com.google.api.codegen.common.TargetLanguage;
import com.google.api.codegen.configgen.ProtoPageStreamingTransformer;
import com.google.api.codegen.configgen.ProtoPagingParameters;
import com.google.api.codegen.util.ProtoParser;
import com.google.api.tools.framework.model.Diag;
import com.google.api.tools.framework.model.DiagCollector;
import com.google.api.tools.framework.model.SimpleLocation;
import com.google.auto.value.AutoValue;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/** PageStreamingConfig represents the page streaming configuration for a method. */
@AutoValue
public abstract class PageStreamingConfig {
@AutoValue
public abstract static class PagingFields {
public abstract FieldModel getRequestTokenField();
// This can be a required field once all APIs have this configured.
@Nullable
public abstract FieldModel getPageSizeField();
public abstract FieldModel getResponseTokenField();
public static PagingFields.Builder newBuilder() {
return new AutoValue_PageStreamingConfig_PagingFields.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract PagingFields.Builder setRequestTokenField(FieldModel val);
public abstract PagingFields.Builder setPageSizeField(FieldModel val);
public abstract PagingFields.Builder setResponseTokenField(FieldModel val);
public abstract PagingFields build();
}
}
public abstract PagingFields getPagingFields();
public abstract FieldConfig getResourcesFieldConfig();
@Nullable
public FieldModel getPageSizeField() {
return getPagingFields().getPageSizeField();
}
public FieldModel getResponseTokenField() {
return getPagingFields().getResponseTokenField();
}
public FieldModel getRequestTokenField() {
return getPagingFields().getRequestTokenField();
}
/** Returns whether there is a field for page size. */
public boolean hasPageSizeField() {
return getPageSizeField() != null;
}
public FieldModel getResourcesField() {
return getResourcesFieldConfig().getField();
}
public String getResourcesFieldName() {
return getResourcesField().getSimpleName();
}
// TODO(andrealin): combine this with the protofile one, pass in resourcenametreatment as param
/**
* Creates an instance of PageStreamingConfig based on PageStreamingConfigProto, linking it up
* with the provided method. On errors, null will be returned, and diagnostics are reported to the
* diag collector.
*/
@Nullable
static PageStreamingConfig createPageStreamingFromGapicConfig(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
MethodConfigProto methodConfigProto,
MethodModel method) {
PageStreamingConfigProto pageStreaming = methodConfigProto.getPageStreaming();
String requestTokenFieldName = pageStreaming.getRequest().getTokenField();
FieldModel requestTokenField = method.getInputField(requestTokenFieldName);
if (requestTokenField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Request field missing for page streaming: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
requestTokenFieldName));
}
String pageSizeFieldName = pageStreaming.getRequest().getPageSizeField();
FieldModel pageSizeField = null;
if (!Strings.isNullOrEmpty(pageSizeFieldName)) {
pageSizeField = method.getInputField(pageSizeFieldName);
if (pageSizeField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Request field missing for page streaming: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getInputFullName(),
pageSizeFieldName));
}
}
String responseTokenFieldName = pageStreaming.getResponse().getTokenField();
FieldModel responseTokenField = method.getOutputField(responseTokenFieldName);
if (responseTokenField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Response field missing for page streaming: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getOutputFullName(),
responseTokenFieldName));
}
String resourcesFieldName = pageStreaming.getResponse().getResourcesField();
FieldModel resourcesField = method.getOutputField(resourcesFieldName);
FieldConfig resourcesFieldConfig;
if (resourcesField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Resources field missing for page streaming: method = %s, message type = %s, field = %s",
method.getFullName(),
method.getOutputFullName(),
resourcesFieldName));
resourcesFieldConfig = null;
} else {
resourcesFieldConfig =
FieldConfigFactory.createMessageFieldConfig(
messageConfigs,
resourceNameConfigs,
resourcesField,
methodConfigProto.getResourceNameTreatment());
}
if (requestTokenField == null || responseTokenField == null || resourcesFieldConfig == null) {
return null;
}
PagingFields pagingFields =
PagingFields.newBuilder()
.setPageSizeField(pageSizeField)
.setRequestTokenField(requestTokenField)
.setResponseTokenField(responseTokenField)
.build();
return new AutoValue_PageStreamingConfig(pagingFields, resourcesFieldConfig);
}
/**
* Creates an instance of PageStreamingConfig based on PageStreamingConfigProto, linking it up
* with the provided method. On errors, null will be returned, and diagnostics are reported to the
* diag collector.
*/
@Nullable
static PageStreamingConfig createPageStreamingFromProtoFile(
DiagCollector diagCollector,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ProtoMethodModel method,
PagingFields pagingFields,
ProtoParser protoParser,
String defaultPackageName) {
FieldModel requestTokenField = pagingFields.getRequestTokenField();
FieldModel responseTokenField = pagingFields.getResponseTokenField();
FieldModel resourcesField = ProtoPageStreamingTransformer.getResourcesField(method);
FieldConfig resourcesFieldConfig;
if (resourcesField == null) {
diagCollector.addDiag(
Diag.error(
SimpleLocation.TOPLEVEL,
"Resources field missing for page streaming: method = %s, message type = %s",
method.getFullName(),
method.getOutputFullName()));
resourcesFieldConfig = null;
} else {
ResourceNameTreatment resourceNameTreatment =
GapicMethodConfig.defaultResourceNameTreatmentFromProto(
method.getProtoMethod(), protoParser, defaultPackageName);
resourcesFieldConfig =
FieldConfigFactory.createMessageFieldConfig(
messageConfigs, resourceNameConfigs, resourcesField, resourceNameTreatment);
}
if (requestTokenField == null || responseTokenField == null || resourcesFieldConfig == null) {
return null;
}
return new AutoValue_PageStreamingConfig(pagingFields, resourcesFieldConfig);
}
/** package-private for use by {@link GapicMethodConfig}. */
static PageStreamingConfig createPageStreamingConfig(
TargetLanguage language,
TransportProtocol transportProtocol,
DiagCollector diagCollector,
String defaultPackageName,
ProtoMethodModel methodModel,
MethodConfigProto methodConfigProto,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs,
ProtoParser protoParser) {
// Let the GAPIC config define a page streaming config as an override.
if (methodConfigProto.hasPageStreaming()) {
return createPageStreamingFromGapicConfig(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
}
// Toggle pagination based on presence of paging params.
// See https://cloud.google.com/apis/design/design_patterns for API pagination pattern.
ProtoPagingParameters pagingParams = new ProtoPagingParameters();
ProtoField tokenField = methodModel.getInputField(pagingParams.getNameForPageToken());
ProtoField pageSizeField = methodModel.getInputField(pagingParams.getNameForPageSize());
if (pageSizeField == null) {
// TODO: Conform to design doc spec, once approved, for using non-standard paging fields
// (such as max_results for page_size)
if (language == TargetLanguage.JAVA && transportProtocol == TransportProtocol.HTTP) {
pageSizeField = methodModel.getInputField(pagingParams.getNameForMaxResults());
}
}
ProtoField responseTokenField =
methodModel.getOutputField(pagingParams.getNameForNextPageToken());
if (tokenField != null && responseTokenField != null && pageSizeField != null) {
PagingFields pagingFields =
PagingFields.newBuilder()
.setResponseTokenField(responseTokenField)
.setRequestTokenField(tokenField)
.setPageSizeField(pageSizeField)
.build();
return PageStreamingConfig.createPageStreamingFromProtoFile(
diagCollector,
messageConfigs,
resourceNameConfigs,
methodModel,
pagingFields,
protoParser,
defaultPackageName);
}
return null;
}
/** package-private for use by {@link GapicMethodConfig}. */
static PageStreamingConfig createPageStreamingConfig(
DiagCollector diagCollector,
ProtoMethodModel methodModel,
@Nonnull MethodConfigProto methodConfigProto,
ResourceNameMessageConfigs messageConfigs,
ImmutableMap<String, ResourceNameConfig> resourceNameConfigs) {
if (!PageStreamingConfigProto.getDefaultInstance()
.equals(methodConfigProto.getPageStreaming())) {
return PageStreamingConfig.createPageStreamingFromGapicConfig(
diagCollector, messageConfigs, resourceNameConfigs, methodConfigProto, methodModel);
}
return null;
}
}
| 1 | 31,044 | Do we not need this in Java because Java *is* handling map responses? | googleapis-gapic-generator | java |
@@ -279,6 +279,14 @@ class Task(object):
except BaseException:
logger.exception("Error in event callback for %r", event)
+ @property
+ def accepted_messages(self):
+ """
+ Configures which scheduler messages can be received and returns them. When falsy, this tasks
+ does not accept any message. When True, all messages are accepted.
+ """
+ return False
+
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. ''' | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
from contextlib import contextmanager
import logging
import traceback
import warnings
import json
import hashlib
import re
import copy
import functools
import luigi
from luigi import six
from luigi import parameter
from luigi.task_register import Register
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
TASK_ID_INCLUDE_PARAMS = 3
TASK_ID_TRUNCATE_PARAMS = 16
TASK_ID_TRUNCATE_HASH = 10
TASK_ID_INVALID_CHAR_REGEX = re.compile(r'[^A-Za-z0-9_]')
_SAME_AS_PYTHON_MODULE = '_same_as_python_module'
def namespace(namespace=None, scope=''):
"""
Call to set namespace of tasks declared after the call.
It is often desired to call this function with the keyword argument
``scope=__name__``.
The ``scope`` keyword makes it so that this call is only effective for task
classes with a matching [*]_ ``__module__``. The default value for
``scope`` is the empty string, which means all classes. Multiple calls with
the same scope simply replace each other.
The namespace of a :py:class:`Task` can also be changed by specifying the property
``task_namespace``.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
This explicit setting takes priority over whatever is set in the
``namespace()`` method, and it's also inherited through normal python
inheritence.
There's no equivalent way to set the ``task_family``.
*New since Luigi 2.6.0:* ``scope`` keyword argument.
.. [*] When there are multiple levels of matching module scopes like
``a.b`` vs ``a.b.c``, the more specific one (``a.b.c``) wins.
.. seealso:: The new and better scaling :py:func:`auto_namespace`
"""
Register._default_namespace_dict[scope] = namespace or ''
def auto_namespace(scope=''):
"""
Same as :py:func:`namespace`, but instead of a constant namespace, it will
be set to the ``__module__`` of the task class. This is desirable for these
reasons:
* Two tasks with the same name will not have conflicting task families
* It's more pythonic, as modules are Python's recommended way to
do namespacing.
* It's traceable. When you see the full name of a task, you can immediately
identify where it is defined.
We recommend calling this function from your package's outermost
``__init__.py`` file. The file contents could look like this:
.. code-block:: python
import luigi
luigi.auto_namespace(scope=__name__)
To reset an ``auto_namespace()`` call, you can use
``namespace(scope='my_scope'``). But this will not be
needed (and is also discouraged) if you use the ``scope`` kwarg.
*New since Luigi 2.6.0.*
"""
namespace(namespace=_SAME_AS_PYTHON_MODULE, scope=scope)
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and not
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Each :py:class:`~luigi.Parameter` of the Task should be declared as members:
.. code:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
second_param = luigi.Parameter()
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or worker-timeout value in config file
#: Only works when using multiple workers.
worker_timeout = None
#: Maximum number of tasks to run together as a batch. Infinite by default
max_batch_size = float('inf')
@property
def batchable(self):
"""
True if this instance can be run as part of a batch. By default, True
if it has any batched parameters
"""
return bool(self.batch_param_names())
@property
def retry_count(self):
"""
Override this positive integer to have different ``retry_count`` at task level
Check :ref:`scheduler-config`
"""
return None
@property
def disable_hard_timeout(self):
"""
Override this positive integer to have different ``disable_hard_timeout`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def disable_window_seconds(self):
"""
Override this positive integer to have different ``disable_window_seconds`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def owner_email(self):
'''
Override this to send out additional error emails to task owner, in addition to the one
defined in the global configuration. This should return a string or a list of strings. e.g.
'[email protected]' or ['[email protected]', '[email protected]']
'''
return None
def _owner_list(self):
"""
Turns the owner_email property into a list. This should not be overridden.
"""
owner_email = self.owner_email
if owner_email is None:
return []
elif isinstance(owner_email, six.string_types):
return owner_email.split(',')
else:
return owner_email
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
_visible_in_registry = True # TODO: Consider using in luigi.util as well
__not_user_specified = '__not_user_specified'
# This is here just to help pylint, the Register metaclass will always set
# this value anyway.
_namespace_at_class_time = None
task_namespace = __not_user_specified
"""
This value can be overriden to set the namespace that will be used.
(See :ref:`Task.namespaces_famlies_and_ids`)
If it's not specified and you try to read this value anyway, it will return
garbage. Please use :py:meth:`get_task_namespace` to read the namespace.
Note that setting this value with ``@property`` will not work, because this
is a class level value.
"""
@classmethod
def get_task_namespace(cls):
"""
The task family for the given class.
Note: You normally don't want to override this.
"""
if cls.task_namespace != cls.__not_user_specified:
return cls.task_namespace
elif cls._namespace_at_class_time == _SAME_AS_PYTHON_MODULE:
return cls.__module__
return cls._namespace_at_class_time
@property
def task_family(self):
"""
DEPRECATED since after 2.4.0. See :py:meth:`get_task_family` instead.
Hopefully there will be less meta magic in Luigi.
Convenience method since a property on the metaclass isn't directly
accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_task_family(cls):
"""
The task family for the given class.
If ``task_namespace`` is not set, then it's simply the name of the
class. Otherwise, ``<task_namespace>.`` is prefixed to the class name.
Note: You normally don't want to override this.
"""
if not cls.get_task_namespace():
return cls.__name__
else:
return "{}.{}".format(cls.get_task_namespace(), cls.__name__)
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1]._counter)
return params
@classmethod
def batch_param_names(cls):
return [name for name, p in cls.get_params() if p._is_batchable()]
@classmethod
def get_param_names(cls, include_significant=False):
return [name for name, p in cls.get_params() if include_significant or p.significant]
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_family = cls.get_task_family()
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_family, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = param_obj.normalize(arg)
# Then the keyword arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = params_dict[param_name].normalize(arg)
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_family, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_family, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register kwargs as an attribute on the class. Might be useful
self.param_kwargs = dict(param_values)
self._warn_on_wrong_param_types()
self.task_id = task_id_str(self.get_task_family(), self.to_str_params(only_significant=True))
self.__hash = hash(self.task_id)
self.set_tracking_url = None
self.set_status_message = None
self.set_progress_percentage = None
@property
def param_args(self):
warnings.warn("Use of param_args has been deprecated.", DeprecationWarning)
return tuple(self.param_kwargs[k] for k, v in self.get_params())
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
def _warn_on_wrong_param_types(self):
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
params[param_name]._warn_on_wrong_param_type(param_name, param_value)
@classmethod
def from_str_params(cls, params_str):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value as string.
"""
kwargs = {}
for param_name, param in cls.get_params():
if param_name in params_str:
param_str = params_str[param_name]
if isinstance(param_str, list):
kwargs[param_name] = param._parse_list(param_str)
else:
kwargs[param_name] = param.parse(param_str)
return cls(**kwargs)
def to_str_params(self, only_significant=False):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if (not only_significant) or params[param_name].significant:
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in kwargs:
new_k[param_name] = kwargs[param_name]
elif hasattr(self, param_name):
new_k[param_name] = getattr(self, param_name)
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5')`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.get_task_family(), ', '.join(repr_parts))
return task_str
def __eq__(self, other):
return self.__class__ == other.__class__ and self.param_kwargs == other.param_kwargs
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exist.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
The returned value of this method is json encoded and sent to the scheduler
as the `expl` argument. Its string representation will be used as the
body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
@contextmanager
def no_unpicklable_properties(self):
"""
Remove unpicklable properties before dump task and resume them after.
This method could be called in subtask's dump method, to ensure unpicklable
properties won't break dump.
This method is a context-manager which can be called as below:
.. code-block: python
class DummyTask(luigi):
def _dump(self):
with self.no_unpicklable_properties():
pickle.dumps(self)
"""
unpicklable_properties = tuple(luigi.worker.TaskProcess.forward_reporter_callbacks.values())
reserved_properties = {}
for property_name in unpicklable_properties:
if hasattr(self, property_name):
reserved_properties[property_name] = getattr(self, property_name)
setattr(self, property_name, 'placeholder_during_pickling')
yield
for property_name, value in six.iteritems(reserved_properties):
setattr(self, property_name, value)
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
generated_tuples = []
for parameter_tuple in parameter_tuples:
if isinstance(parameter_tuple, (list, tuple)):
if cls(*parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
elif isinstance(parameter_tuple, dict):
if cls(**parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
else:
if cls(parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
return generated_tuples
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = None
def externalize(taskclass_or_taskobject):
"""
Returns an externalized version of a Task. You may both pass an
instantiated task object or a task class. Some examples:
.. code-block:: python
class RequiringTask(luigi.Task):
def requires(self):
task_object = self.clone(MyTask)
return externalize(task_object)
...
Here's mostly equivalent code, but ``externalize`` is applied to a task
class instead.
.. code-block:: python
@luigi.util.requires(externalize(MyTask))
class RequiringTask(luigi.Task):
pass
...
Of course, it may also be used directly on classes and objects (for example
for reexporting or other usage).
.. code-block:: python
MyTask = externalize(MyTask)
my_task_2 = externalize(MyTask2(param='foo'))
If you however want a task class to be external from the beginning, you're
better off inheriting :py:class:`ExternalTask` rather than :py:class:`Task`.
This function tries to be side-effect free by creating a copy of the class
or the object passed in and then modify that object. In particular this
code shouldn't do anything.
.. code-block:: python
externalize(MyTask) # BAD: This does nothing (as after luigi 2.4.0)
"""
# Seems like with python < 3.3 copy.copy can't copy classes
# and objects with specified metaclass http://bugs.python.org/issue11480
compatible_copy = copy.copy if six.PY3 else copy.deepcopy
copied_value = compatible_copy(taskclass_or_taskobject)
if copied_value is taskclass_or_taskobject:
# Assume it's a class
clazz = taskclass_or_taskobject
@_task_wraps(clazz)
class _CopyOfClass(clazz):
# How to copy a class: http://stackoverflow.com/a/9541120/621449
_visible_in_registry = False
_CopyOfClass.run = None
return _CopyOfClass
else:
# We assume it's an object
copied_value.run = None
return copied_value
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""
Class for configuration. See :ref:`ConfigClasses`.
"""
# TODO: let's refactor Task & Config so that it inherits from a common
# ParamContainer base class
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct))
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else:
# Remaining case: assume struct is iterable...
try:
return [getpaths(r) for r in struct]
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
def _task_wraps(task_class):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
assigned = functools.WRAPPER_ASSIGNMENTS + ('_namespace_at_class_time',)
return functools.wraps(task_class, assigned=assigned, updated=[])
| 1 | 17,954 | Maybe "For configuring which scheduler messages can be received."? | spotify-luigi | py |
@@ -141,7 +141,7 @@ class SyncThumbsCommand extends BaseCommand
}
//clear entity manager for saving memory
- $this->getMediaManager()->getEntityManager()->clear();
+ $this->getMediaManager()->getObjectManager()->clear();
if ($batchesLimit > 0 && $batchCounter == $batchesLimit) {
break; | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Command;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Provider\MediaProviderInterface;
use Symfony\Component\Console\Input\InputArgument;
use Symfony\Component\Console\Input\InputInterface;
use Symfony\Component\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
use Symfony\Component\Console\Question\ChoiceQuestion;
/**
* This command can be used to re-generate the thumbnails for all uploaded medias.
*
* Useful if you have existing media content and added new formats.
*/
class SyncThumbsCommand extends BaseCommand
{
/**
* @var bool
*/
protected $quiet = false;
/**
* @var OutputInterface
*/
protected $output;
/**
* {@inheritdoc}
*/
public function configure()
{
$this->setName('sonata:media:sync-thumbnails')
->setDescription('Sync uploaded image thumbs with new media formats')
->setDefinition([
new InputArgument('providerName', InputArgument::OPTIONAL, 'The provider'),
new InputArgument('context', InputArgument::OPTIONAL, 'The context'),
new InputOption('batchSize', null, InputOption::VALUE_REQUIRED, 'Media batch size (100 by default)', 100),
new InputOption('batchesLimit', null, InputOption::VALUE_REQUIRED, 'Media batches limit (0 by default)', 0),
new InputOption('startOffset', null, InputOption::VALUE_REQUIRED, 'Medias start offset (0 by default)', 0),
]
);
}
/**
* {@inheritdoc}
*/
public function execute(InputInterface $input, OutputInterface $output)
{
$helper = $this->getHelper('question');
$providerName = $input->getArgument('providerName');
if (null === $providerName) {
$providers = array_keys($this->getMediaPool()->getProviders());
$question = new ChoiceQuestion('Please select the provider', $providers, 0);
$question->setErrorMessage('Provider %s is invalid.');
$providerName = $helper->ask($input, $output, $question);
}
$context = $input->getArgument('context');
if (null === $context) {
$contexts = array_keys($this->getMediaPool()->getContexts());
$question = new ChoiceQuestion('Please select the context', $contexts, 0);
$question->setErrorMessage('Context %s is invalid.');
$context = $helper->ask($input, $output, $question);
}
$this->quiet = $input->getOption('quiet');
$this->output = $output;
$provider = $this->getMediaPool()->getProvider($providerName);
$filesystem = $provider->getFilesystem();
$fsReflection = new \ReflectionClass($filesystem);
$fsRegister = $fsReflection->getProperty('fileRegister');
$fsRegister->setAccessible(true);
$batchCounter = 0;
$batchSize = (int) $input->getOption('batchSize');
$batchesLimit = (int) $input->getOption('batchesLimit');
$startOffset = (int) $input->getOption('startOffset');
$totalMediasCount = 0;
do {
++$batchCounter;
try {
$batchOffset = $startOffset + ($batchCounter - 1) * $batchSize;
$medias = $this->getMediaManager()->findBy(
[
'providerName' => $providerName,
'context' => $context,
],
[
'id' => 'ASC',
],
$batchSize,
$batchOffset
);
} catch (\Exception $e) {
$this->log('Error: '.$e->getMessage());
break;
}
$batchMediasCount = count($medias);
if (0 === $batchMediasCount) {
break;
}
$totalMediasCount += $batchMediasCount;
$this->log(
sprintf(
'Loaded %s medias (batch #%d, offset %d) for generating thumbs (provider: %s, context: %s)',
$batchMediasCount,
$batchCounter,
$batchOffset,
$providerName,
$context
)
);
foreach ($medias as $media) {
if (!$this->processMedia($media, $provider)) {
continue;
}
//clean filesystem registry for saving memory
$fsRegister->setValue($filesystem, []);
}
//clear entity manager for saving memory
$this->getMediaManager()->getEntityManager()->clear();
if ($batchesLimit > 0 && $batchCounter == $batchesLimit) {
break;
}
} while (true);
$this->log("Done (total medias processed: {$totalMediasCount}).");
}
/**
* @param MediaInterface $media
* @param MediaProviderInterface $provider
*
* @return bool
*/
protected function processMedia($media, $provider)
{
$this->log('Generating thumbs for '.$media->getName().' - '.$media->getId());
try {
$provider->removeThumbnails($media);
} catch (\Exception $e) {
$this->log(sprintf('<error>Unable to remove old thumbnails, media: %s - %s </error>',
$media->getId(), $e->getMessage()));
return false;
}
try {
$provider->generateThumbnails($media);
} catch (\Exception $e) {
$this->log(sprintf('<error>Unable to generate new thumbnails, media: %s - %s </error>',
$media->getId(), $e->getMessage()));
return false;
}
return true;
}
/**
* Write a message to the output.
*
* @param string $message
*/
protected function log($message)
{
if (false === $this->quiet) {
$this->output->writeln($message);
}
}
}
| 1 | 9,979 | It will stop working with orm, so this is not a good fix, Try using some method in a common interface | sonata-project-SonataMediaBundle | php |
@@ -40,7 +40,11 @@ namespace OpenTelemetry.Collector.Dependencies
{
this.diagnosticSourceSubscriber = new DiagnosticSourceSubscriber(
new Dictionary<string, Func<ITracer, Func<HttpRequestMessage, ISampler>, ListenerHandler>>()
- { { "HttpHandlerDiagnosticListener", (t, s) => new HttpHandlerDiagnosticListener(t, s) } },
+ {
+ { "HttpHandlerDiagnosticListener", (t, s) => new HttpHandlerDiagnosticListener(t, s) },
+ { "Azure.Clients", (t, s) => new AzureSdkDiagnosticListener("Azure.Clients", t, sampler) },
+ { "Azure.Pipeline", (t, s) => new AzureSdkDiagnosticListener("Azure.Pipeline", t, sampler) },
+ },
tracer,
x =>
{ | 1 | // <copyright file="DependenciesCollector.cs" company="OpenTelemetry Authors">
// Copyright 2018, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
namespace OpenTelemetry.Collector.Dependencies
{
using System;
using System.Collections.Generic;
using System.Net.Http;
using OpenTelemetry.Collector.Dependencies.Common;
using OpenTelemetry.Collector.Dependencies.Implementation;
using OpenTelemetry.Trace;
/// <summary>
/// Dependencies collector.
/// </summary>
public class DependenciesCollector : IDisposable
{
private readonly DiagnosticSourceSubscriber diagnosticSourceSubscriber;
/// <summary>
/// Initializes a new instance of the <see cref="DependenciesCollector"/> class.
/// </summary>
/// <param name="options">Configuration options for dependencies collector.</param>
/// <param name="tracer">Tracer to record traced with.</param>
/// <param name="sampler">Sampler to use to sample dependnecy calls.</param>
public DependenciesCollector(DependenciesCollectorOptions options, ITracer tracer, ISampler sampler)
{
this.diagnosticSourceSubscriber = new DiagnosticSourceSubscriber(
new Dictionary<string, Func<ITracer, Func<HttpRequestMessage, ISampler>, ListenerHandler>>()
{ { "HttpHandlerDiagnosticListener", (t, s) => new HttpHandlerDiagnosticListener(t, s) } },
tracer,
x =>
{
ISampler s = null;
try
{
s = options.CustomSampler(x);
}
catch (Exception e)
{
s = null;
DependenciesCollectorEventSource.Log.ExceptionInCustomSampler(e);
}
return s ?? sampler;
});
this.diagnosticSourceSubscriber.Subscribe();
}
public void Dispose()
{
this.diagnosticSourceSubscriber.Dispose();
}
}
}
| 1 | 12,056 | `s` is not necessarily the same as `sampler` - sampler could be null and this crazy lambda underneath falls back to something. So please use `s` | open-telemetry-opentelemetry-dotnet | .cs |
@@ -83,9 +83,14 @@ public class PojoOperationGenerator extends AbstractOperationGenerator {
bodyModel = new ModelImpl();
bodyModel.setType(ModelImpl.OBJECT);
for (ParameterGenerator parameterGenerator : bodyFields) {
- SwaggerUtils.addDefinitions(swagger, parameterGenerator.getGenericType());
+ parameterGenerator.setHttpParameterType(HttpParameterType.BODY);
+ scanMethodParameter(parameterGenerator);
+
Property property = ModelConverters.getInstance().readAsProperty(parameterGenerator.getGenericType());
+ property.setDescription(parameterGenerator.getGeneratedParameter().getDescription());
bodyModel.addProperty(parameterGenerator.getParameterName(), property);
+
+ parameterGenerator.setHttpParameterType(null);
}
swagger.addDefinition(simpleRef, bodyModel);
| 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.servicecomb.swagger.generator.pojo;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import java.lang.reflect.Type;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import javax.ws.rs.HttpMethod;
import org.apache.commons.lang3.StringUtils;
import org.apache.servicecomb.swagger.SwaggerUtils;
import org.apache.servicecomb.swagger.generator.ParameterGenerator;
import org.apache.servicecomb.swagger.generator.SwaggerConst;
import org.apache.servicecomb.swagger.generator.SwaggerGeneratorFeature;
import org.apache.servicecomb.swagger.generator.core.AbstractOperationGenerator;
import org.apache.servicecomb.swagger.generator.core.AbstractSwaggerGenerator;
import org.apache.servicecomb.swagger.generator.core.model.HttpParameterType;
import io.swagger.converter.ModelConverters;
import io.swagger.models.ModelImpl;
import io.swagger.models.RefModel;
import io.swagger.models.Swagger;
import io.swagger.models.parameters.BodyParameter;
import io.swagger.models.parameters.Parameter;
import io.swagger.models.properties.Property;
public class PojoOperationGenerator extends AbstractOperationGenerator {
protected ModelImpl bodyModel;
protected BodyParameter bodyParameter;
public PojoOperationGenerator(AbstractSwaggerGenerator swaggerGenerator, Method method) {
super(swaggerGenerator, method);
}
@Override
protected void initMethodParameterGenerators(Map<String, List<Annotation>> methodAnnotationMap) {
super.initMethodParameterGenerators(methodAnnotationMap);
tryWrapParametersToBody();
}
private void tryWrapParametersToBody() {
List<ParameterGenerator> bodyFields = parameterGenerators.stream().filter(pg -> pg.getHttpParameterType() == null)
.collect(Collectors.toList());
if (bodyFields.isEmpty()) {
return;
}
if (bodyFields.size() == 1 && SwaggerUtils.isBean(bodyFields.get(0).getGenericType())) {
ParameterGenerator parameterGenerator = bodyFields.get(0);
parameterGenerator.setHttpParameterType(HttpParameterType.BODY);
return;
}
// wrap parameters to body
wrapParametersToBody(bodyFields);
}
private void wrapParametersToBody(List<ParameterGenerator> bodyFields) {
String simpleRef = method.getName() + "Body";
bodyModel = new ModelImpl();
bodyModel.setType(ModelImpl.OBJECT);
for (ParameterGenerator parameterGenerator : bodyFields) {
SwaggerUtils.addDefinitions(swagger, parameterGenerator.getGenericType());
Property property = ModelConverters.getInstance().readAsProperty(parameterGenerator.getGenericType());
bodyModel.addProperty(parameterGenerator.getParameterName(), property);
}
swagger.addDefinition(simpleRef, bodyModel);
SwaggerGeneratorFeature feature = swaggerGenerator.getSwaggerGeneratorFeature();
// bodyFields.size() > 1 is no reason, just because old version do this......
// if not care for this, then can just delete all logic about EXT_JAVA_CLASS/EXT_JAVA_INTF
if (feature.isExtJavaClassInVendor()
&& bodyFields.size() > 1
&& StringUtils.isNotEmpty(feature.getPackageName())) {
bodyModel.getVendorExtensions().put(SwaggerConst.EXT_JAVA_CLASS, feature.getPackageName() + "." + simpleRef);
}
RefModel refModel = new RefModel();
refModel.setReference("#/definitions/" + simpleRef);
bodyParameter = new BodyParameter();
bodyParameter.name(simpleRef);
bodyParameter.setSchema(refModel);
bodyParameter.setName(parameterGenerators.size() == 1 ? parameterGenerators.get(0).getParameterName() : simpleRef);
List<ParameterGenerator> newParameterGenerators = new ArrayList<>();
newParameterGenerators.add(new ParameterGenerator(
bodyParameter.getName(),
Collections.emptyList(),
null,
HttpParameterType.BODY,
bodyParameter));
parameterGenerators.stream().filter(p -> p.getHttpParameterType() != null)
.forEach(p -> newParameterGenerators.add(p));
parameterGenerators = newParameterGenerators;
}
private boolean isWrapBody(Object parameter) {
return parameter != null && parameter == bodyParameter;
}
@Override
protected void fillParameter(Swagger swagger, Parameter parameter, String parameterName, Type type,
List<Annotation> annotations) {
if (isWrapBody(parameter)) {
return;
}
super.fillParameter(swagger, parameter, parameterName, type, annotations);
}
@Override
protected Parameter createParameter(ParameterGenerator parameterGenerator) {
if (isWrapBody(parameterGenerator.getGeneratedParameter())) {
return bodyParameter;
}
return super.createParameter(parameterGenerator);
}
@Override
public void correctOperation() {
correctPath();
correctHttpMethod();
super.correctOperation();
}
protected void correctPath() {
if (StringUtils.isEmpty(path)) {
path = "/" + getOperationId();
}
}
protected void correctHttpMethod() {
if (StringUtils.isEmpty(httpMethod)) {
setHttpMethod(HttpMethod.POST);
}
}
}
| 1 | 11,231 | This code is a bit confusing. Should HttpParameterType set to the original one or it is always BODY? | apache-servicecomb-java-chassis | java |
@@ -741,7 +741,7 @@ func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Co
return nil, nil, err
}
log.Debug("Completed remote build action for %s", target)
- if err := c.verifyActionResult(target, command, digest, response.Result, false, isTest); err != nil {
+ if err := c.verifyActionResult(target, command, digest, response.Result, c.state.Config.Remote.VerifyOutputs && !isTest, isTest); err != nil {
return metadata, response.Result, err
}
c.locallyCacheResults(target, digest, metadata, response.Result) | 1 | // Package remote provides our interface to the Google remote execution APIs
// (https://github.com/bazelbuild/remote-apis) which Please can use to distribute
// work to remote servers.
package remote
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/client"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/filemetadata"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/retry"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/uploadinfo"
fpb "github.com/bazelbuild/remote-apis/build/bazel/remote/asset/v1"
pb "github.com/bazelbuild/remote-apis/build/bazel/remote/execution/v2"
"github.com/bazelbuild/remote-apis/build/bazel/semver"
"github.com/golang/protobuf/ptypes"
"github.com/grpc-ecosystem/go-grpc-middleware/retry"
"golang.org/x/sync/errgroup"
"google.golang.org/genproto/googleapis/longrunning"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/status"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/fs"
)
var log = logging.MustGetLogger("remote")
// The API version we support.
var apiVersion = semver.SemVer{Major: 2}
// A Client is the interface to the remote API.
//
// It provides a higher-level interface over the specific RPCs available.
type Client struct {
client *client.Client
fetchClient fpb.FetchClient
initOnce sync.Once
state *core.BuildState
err error // for initialisation
instance string
// Stored output directories from previously executed targets.
// This isn't just a cache - it is needed for cases where we don't actually
// have the files physically on disk.
outputs map[core.BuildLabel]*pb.Directory
outputMutex sync.RWMutex
// The unstamped build action digests. Stamped and test digests are not stored.
// This isn't just a cache - it is needed because building a target can modify the target and things like plz hash
// --detailed and --shell will fail to get the right action digest.
unstampedBuildActionDigests actionDigestMap
// Used to control downloading targets (we must make sure we don't re-fetch them
// while another target is trying to use them).
//
// This map is of effective type `map[*core.BuildTarget]*pendingDownload`
downloads sync.Map
// Server-sent cache properties
maxBlobBatchSize int64
// Platform properties that we will request from the remote.
// TODO(peterebden): this will need some modification for cross-compiling support.
platform *pb.Platform
// Path to the shell to use to execute actions in.
shellPath string
// Stats used to report RPC data rates
byteRateIn, byteRateOut, totalBytesIn, totalBytesOut int
stats *statsHandler
// Used to store and retrieve action results to reduce RPC calls when re-building targets
mdStore buildMetadataStore
// Passed to various SDK functions.
fileMetadataCache filemetadata.Cache
// existingBlobs is used to track the set of existing blobs remotely.
existingBlobs map[string]struct{}
existingBlobMutex sync.Mutex
}
type actionDigestMap struct {
m sync.Map
}
func (m *actionDigestMap) Get(label core.BuildLabel) *pb.Digest {
d, ok := m.m.Load(label)
if !ok {
panic(fmt.Sprintf("could not find action digest for label: %s", label.String()))
}
return d.(*pb.Digest)
}
func (m *actionDigestMap) Put(label core.BuildLabel, actionDigest *pb.Digest) {
m.m.Store(label, actionDigest)
}
// A pendingDownload represents a pending download of a build target. It is used to
// ensure we only download each target exactly once.
type pendingDownload struct {
once sync.Once
err error // Any error if the download failed.
}
// New returns a new Client instance.
// It begins the process of contacting the remote server but does not wait for it.
func New(state *core.BuildState) *Client {
c := &Client{
state: state,
instance: state.Config.Remote.Instance,
outputs: map[core.BuildLabel]*pb.Directory{},
mdStore: newDirMDStore(time.Duration(state.Config.Remote.CacheDuration)),
existingBlobs: map[string]struct{}{
digest.Empty.Hash: {},
},
fileMetadataCache: filemetadata.NewNoopCache(),
shellPath: state.Config.Remote.Shell,
}
c.stats = newStatsHandler(c)
go c.CheckInitialised() // Kick off init now, but we don't have to wait for it.
return c
}
// CheckInitialised checks that the client has connected to the server correctly.
func (c *Client) CheckInitialised() error {
c.initOnce.Do(c.init)
return c.err
}
// init is passed to the sync.Once to do the actual initialisation.
func (c *Client) init() {
// Change grpc to log using our implementation
grpclog.SetLoggerV2(&grpcLogMabob{})
var g errgroup.Group
g.Go(c.initExec)
if c.state.Config.Remote.AssetURL != "" {
g.Go(c.initFetch)
}
c.err = g.Wait()
if c.err != nil {
log.Error("Error setting up remote execution client: %s", c.err)
}
}
// initExec initialiases the remote execution client.
func (c *Client) initExec() error {
// Create a copy of the state where we can modify the config
dialOpts, err := c.dialOpts()
if err != nil {
return err
}
client, err := client.NewClient(context.Background(), c.instance, client.DialParams{
Service: c.state.Config.Remote.URL,
CASService: c.state.Config.Remote.CASURL,
NoSecurity: !c.state.Config.Remote.Secure,
TransportCredsOnly: c.state.Config.Remote.Secure,
DialOpts: dialOpts,
}, client.UseBatchOps(true), &client.TreeSymlinkOpts{Preserved: true}, client.RetryTransient(), client.RPCTimeouts(map[string]time.Duration{
"default": time.Duration(c.state.Config.Remote.Timeout),
"GetCapabilities": 5 * time.Second,
"BatchUpdateBlobs": time.Minute,
"BatchReadBlobs": time.Minute,
"GetTree": time.Minute,
"Execute": 0,
"WaitExecution": 0,
}))
if err != nil {
return err
}
c.client = client
// Extend timeouts a bit, RetryTransient only gives about 1.5 seconds total which isn't
// necessarily very much if the other end needs to sort its life out.
c.client.Retrier.Backoff = retry.ExponentialBackoff(500*time.Millisecond, 5*time.Second, retry.Attempts(8))
// Query the server for its capabilities. This tells us whether it is capable of
// execution, caching or both.
resp, err := c.client.GetCapabilities(context.Background())
if err != nil {
return err
}
if lessThan(&apiVersion, resp.LowApiVersion) || lessThan(resp.HighApiVersion, &apiVersion) {
return fmt.Errorf("Unsupported API version; we require %s but server only supports %s - %s", printVer(&apiVersion), printVer(resp.LowApiVersion), printVer(resp.HighApiVersion))
}
caps := resp.CacheCapabilities
if caps == nil {
return fmt.Errorf("Cache capabilities not supported by server (we do not support execution-only servers)")
}
if err := c.chooseDigest(caps.DigestFunction); err != nil {
return err
}
c.maxBlobBatchSize = caps.MaxBatchTotalSizeBytes
if c.maxBlobBatchSize == 0 {
// No limit was set by the server, assume we are implicitly limited to 4MB (that's
// gRPC's limit which most implementations do not seem to override). Round it down a
// bit to allow a bit of serialisation overhead etc.
c.maxBlobBatchSize = 4000000
}
if c.shellPath == "" {
// We have to run everything through a shell since our commands are arbitrary.
// Unfortunately we can't just say "bash", we need an absolute path which is
// a bit weird since it assumes that our absolute path is the same as the
// remote one (which is probably OK on the same OS, but not between say Linux and
// FreeBSD where bash is not idiomatically in the same place).
bash, err := core.LookBuildPath("bash", c.state.Config)
if err != nil {
return fmt.Errorf("Failed to set path for bash: %w", err)
}
c.shellPath = bash
}
log.Debug("Remote execution client initialised for storage")
// Now check if it can do remote execution
if resp.ExecutionCapabilities == nil {
return fmt.Errorf("Remote execution is configured but the build server doesn't support it")
}
if err := c.chooseDigest([]pb.DigestFunction_Value{resp.ExecutionCapabilities.DigestFunction}); err != nil {
return err
} else if !resp.ExecutionCapabilities.ExecEnabled {
return fmt.Errorf("Remote execution not enabled for this server")
}
c.platform = convertPlatform(c.state.Config)
log.Debug("Remote execution client initialised for execution")
if c.state.Config.Remote.AssetURL == "" {
c.fetchClient = fpb.NewFetchClient(client.Connection)
}
return nil
}
// initFetch initialises the remote fetch server.
func (c *Client) initFetch() error {
dialOpts, err := c.dialOpts()
if err != nil {
return err
}
if c.state.Config.Remote.Secure {
dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")))
} else {
dialOpts = append(dialOpts, grpc.WithInsecure())
}
conn, err := grpc.Dial(c.state.Config.Remote.AssetURL, append(dialOpts, grpc.WithUnaryInterceptor(grpc_retry.UnaryClientInterceptor()))...)
if err != nil {
return fmt.Errorf("Failed to connect to the remote fetch server: %s", err)
}
c.fetchClient = fpb.NewFetchClient(conn)
return nil
}
// chooseDigest selects a digest function that we will use.w
func (c *Client) chooseDigest(fns []pb.DigestFunction_Value) error {
systemFn := c.digestEnum(c.state.Config.Build.HashFunction)
for _, fn := range fns {
if fn == systemFn {
return nil
}
}
return fmt.Errorf("No acceptable hash function available; server supports %s but we require %s. Hint: you may need to set the hash function appropriately in the [build] section of your config", fns, systemFn)
}
// digestEnum returns a proto enum for the digest function of given name (as we name them in config)
func (c *Client) digestEnum(name string) pb.DigestFunction_Value {
switch c.state.Config.Build.HashFunction {
case "sha256":
return pb.DigestFunction_SHA256
case "sha1":
return pb.DigestFunction_SHA1
default:
return pb.DigestFunction_UNKNOWN // Shouldn't get here
}
}
// Build executes a remote build of the given target.
func (c *Client) Build(tid int, target *core.BuildTarget) (*core.BuildMetadata, error) {
if err := c.CheckInitialised(); err != nil {
return nil, err
}
metadata, ar, digest, err := c.build(tid, target)
if err != nil {
return metadata, err
}
if c.state.TargetHasher != nil {
hash, _ := hex.DecodeString(c.outputHash(ar))
c.state.TargetHasher.SetHash(target, hash)
}
if err := c.setOutputs(target, ar); err != nil {
return metadata, c.wrapActionErr(err, digest)
}
if c.state.ShouldDownload(target) {
if !c.outputsExist(target, digest) {
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Downloading")
if err := c.download(target, func() error {
return c.reallyDownload(target, digest, ar)
}); err != nil {
return metadata, err
}
} else {
log.Debug("Not downloading outputs for %s, they are already up-to-date", target)
// Ensure this is marked as already downloaded.
v, _ := c.downloads.LoadOrStore(target, &pendingDownload{})
v.(*pendingDownload).once.Do(func() {})
}
if err := c.downloadData(target); err != nil {
return metadata, err
}
}
return metadata, nil
}
// downloadData downloads all the runtime data for a target, recursively.
func (c *Client) downloadData(target *core.BuildTarget) error {
var g errgroup.Group
for _, datum := range target.AllData() {
if l := datum.Label(); l != nil {
t := c.state.Graph.TargetOrDie(*l)
g.Go(func() error {
if err := c.Download(t); err != nil {
return err
}
return c.downloadData(t)
})
}
}
return g.Wait()
}
// Run runs a target on the remote executors.
func (c *Client) Run(target *core.BuildTarget) error {
if err := c.CheckInitialised(); err != nil {
return err
}
cmd, digest, err := c.uploadAction(target, false, true)
if err != nil {
return err
}
// 24 hours is kind of an arbitrarily long timeout. Basically we just don't want to limit it here.
_, _, err = c.execute(0, target, cmd, digest, false, false)
return err
}
// build implements the actual build of a target.
func (c *Client) build(tid int, target *core.BuildTarget) (*core.BuildMetadata, *pb.ActionResult, *pb.Digest, error) {
needStdout := target.PostBuildFunction != nil
// If we're gonna stamp the target, first check the unstamped equivalent that we store results under.
// This implements the rules of stamp whereby we don't force rebuilds every time e.g. the SCM revision changes.
var unstampedDigest *pb.Digest
if target.Stamp {
command, digest, err := c.buildAction(target, false, false)
if err != nil {
return nil, nil, nil, err
} else if metadata, ar := c.maybeRetrieveResults(tid, target, command, digest, false, needStdout); metadata != nil {
return metadata, ar, digest, nil
}
unstampedDigest = digest
}
command, stampedDigest, err := c.buildAction(target, false, true)
if err != nil {
return nil, nil, nil, err
}
metadata, ar, err := c.execute(tid, target, command, stampedDigest, false, needStdout)
if target.Stamp && err == nil {
// Store results under unstamped digest too.
c.locallyCacheResults(target, unstampedDigest, metadata, ar)
c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: unstampedDigest,
ActionResult: ar,
})
c.unstampedBuildActionDigests.Put(target.Label, unstampedDigest)
} else {
c.unstampedBuildActionDigests.Put(target.Label, stampedDigest)
}
return metadata, ar, stampedDigest, err
}
// Download downloads outputs for the given target.
func (c *Client) Download(target *core.BuildTarget) error {
if target.Local {
return nil // No download needed since this target was built locally
}
return c.download(target, func() error {
buildAction := c.unstampedBuildActionDigests.Get(target.Label)
if c.outputsExist(target, buildAction) {
return nil
}
_, ar := c.retrieveResults(target, nil, buildAction, false, false)
if ar == nil {
return fmt.Errorf("Failed to retrieve action result for %s", target)
}
return c.reallyDownload(target, buildAction, ar)
})
}
func (c *Client) download(target *core.BuildTarget, f func() error) error {
v, _ := c.downloads.LoadOrStore(target, &pendingDownload{})
d := v.(*pendingDownload)
d.once.Do(func() {
d.err = f()
})
return d.err
}
func (c *Client) reallyDownload(target *core.BuildTarget, digest *pb.Digest, ar *pb.ActionResult) error {
log.Debug("Downloading outputs for %s", target)
if err := removeOutputs(target); err != nil {
return err
}
if err := c.downloadActionOutputs(context.Background(), ar, target); err != nil {
return c.wrapActionErr(err, digest)
}
c.recordAttrs(target, digest)
log.Debug("Downloaded outputs for %s", target)
return nil
}
func (c *Client) downloadActionOutputs(ctx context.Context, ar *pb.ActionResult, target *core.BuildTarget) error {
// We can download straight into the out dir if there are no outdirs to worry about
if len(target.OutputDirectories) == 0 {
_, err := c.client.DownloadActionOutputs(ctx, ar, target.OutDir(), c.fileMetadataCache)
return err
}
defer os.RemoveAll(target.TmpDir())
if _, err := c.client.DownloadActionOutputs(ctx, ar, target.TmpDir(), c.fileMetadataCache); err != nil {
return err
}
if err := moveOutDirsToTmpRoot(target); err != nil {
return fmt.Errorf("failed to move out directories to correct place in tmp folder: %w", err)
}
if err := moveTmpFilesToOutDir(target); err != nil {
return fmt.Errorf("failed to move downloaded action output from target tmp dir to out dir: %w", err)
}
return nil
}
// moveTmpFilesToOutDir moves files from the target tmp dir to the out dir
func moveTmpFilesToOutDir(target *core.BuildTarget) error {
files, err := ioutil.ReadDir(target.TmpDir())
if err != nil {
return err
}
for _, f := range files {
oldPath := filepath.Join(target.TmpDir(), f.Name())
newPath := filepath.Join(target.OutDir(), f.Name())
if err := fs.RecursiveCopy(oldPath, newPath, target.OutMode()); err != nil {
return err
}
}
return nil
}
// moveOutDirsToTmpRoot moves all the files from the output dirs into the root of the build temp dir and deletes the
// now empty directory
func moveOutDirsToTmpRoot(target *core.BuildTarget) error {
for _, dir := range target.OutputDirectories {
if err := moveOutDirFilesToTmpRoot(target, dir.Dir()); err != nil {
return fmt.Errorf("failed to move output dir (%s) contents to rule root: %w", dir, err)
}
if err := os.Remove(filepath.Join(target.TmpDir(), dir.Dir())); err != nil {
return err
}
}
return nil
}
func moveOutDirFilesToTmpRoot(target *core.BuildTarget, dir string) error {
fullDir := filepath.Join(target.TmpDir(), dir)
files, err := ioutil.ReadDir(fullDir)
if err != nil {
return err
}
for _, f := range files {
from := filepath.Join(fullDir, f.Name())
to := filepath.Join(target.TmpDir(), f.Name())
if err := os.Rename(from, to); err != nil {
return err
}
}
return nil
}
// Test executes a remote test of the given target.
// It returns the results (and coverage if appropriate) as bytes to be parsed elsewhere.
func (c *Client) Test(tid int, target *core.BuildTarget, run int) (metadata *core.BuildMetadata, err error) {
if err := c.CheckInitialised(); err != nil {
return nil, err
}
command, digest, err := c.buildAction(target, true, false)
if err != nil {
return nil, err
}
metadata, ar, err := c.execute(tid, target, command, digest, true, false)
if ar != nil {
_, dlErr := c.client.DownloadActionOutputs(context.Background(), ar, target.TestDir(run), c.fileMetadataCache)
if dlErr != nil {
log.Warningf("%v: failed to download test outputs: %v", target.Label, dlErr)
}
}
return metadata, err
}
// retrieveResults retrieves target results from where it can (either from the local cache or from remote).
// It returns nil if it cannot be retrieved.
func (c *Client) retrieveResults(target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest bool) (*core.BuildMetadata, *pb.ActionResult) {
// First see if this execution is cached locally
if metadata, ar := c.retrieveLocalResults(target, digest); metadata != nil {
log.Debug("Got locally cached results for %s %s", target.Label, c.actionURL(digest, true))
metadata.Cached = true
return metadata, ar
}
// Now see if it is cached on the remote server
if ar, err := c.client.GetActionResult(context.Background(), &pb.GetActionResultRequest{
InstanceName: c.instance,
ActionDigest: digest,
InlineStdout: needStdout,
}); err == nil {
// This action already exists and has been cached.
if metadata, err := c.buildMetadata(ar, needStdout, false); err == nil {
log.Debug("Got remotely cached results for %s %s", target.Label, c.actionURL(digest, true))
if command != nil {
err = c.verifyActionResult(target, command, digest, ar, c.state.Config.Remote.VerifyOutputs, isTest)
}
if err == nil {
c.locallyCacheResults(target, digest, metadata, ar)
metadata.Cached = true
return metadata, ar
}
log.Debug("Remotely cached results for %s were missing some outputs, forcing a rebuild: %s", target.Label, err)
}
}
return nil, nil
}
// maybeRetrieveResults is like retrieveResults but only retrieves if we aren't forcing a rebuild of the target
// (i.e. not if we're doing plz build --rebuild or plz test --rerun).
func (c *Client) maybeRetrieveResults(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, isTest, needStdout bool) (*core.BuildMetadata, *pb.ActionResult) {
if !c.state.ShouldRebuild(target) && !(c.state.NeedTests && isTest && c.state.ForceRerun) {
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking remote...")
if metadata, ar := c.retrieveResults(target, command, digest, needStdout, isTest); metadata != nil {
return metadata, ar
}
}
return nil, nil
}
// execute submits an action to the remote executor and monitors its progress.
// The returned ActionResult may be nil on failure.
func (c *Client) execute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, isTest, needStdout bool) (*core.BuildMetadata, *pb.ActionResult, error) {
if !isTest || !c.state.ForceRerun || c.state.NumTestRuns == 1 {
if metadata, ar := c.maybeRetrieveResults(tid, target, command, digest, isTest, needStdout); metadata != nil {
return metadata, ar, nil
}
}
// We didn't actually upload the inputs before, so we must do so now.
command, digest, err := c.uploadAction(target, isTest, false)
if err != nil {
return nil, nil, fmt.Errorf("Failed to upload build action: %s", err)
}
// Remote actions & filegroups get special treatment at this point.
if target.IsFilegroup {
// Filegroups get special-cased since they are just a movement of files.
return c.buildFilegroup(target, command, digest)
} else if target.IsRemoteFile {
return c.fetchRemoteFile(tid, target, digest)
} else if target.IsTextFile {
return c.buildTextFile(target, command, digest)
}
// We should skip the cache lookup (and override any existing action result) if we --rebuild, or --rerun and this is
// one fo the targets we're testing or building.
skipCacheLookup := (isTest && c.state.ForceRerun) || (!isTest && c.state.ForceRebuild)
skipCacheLookup = skipCacheLookup && c.state.IsOriginalTarget(target)
return c.reallyExecute(tid, target, command, digest, needStdout, isTest, skipCacheLookup)
}
// reallyExecute is like execute but after the initial cache check etc.
// The action & sources must have already been uploaded.
func (c *Client) reallyExecute(tid int, target *core.BuildTarget, command *pb.Command, digest *pb.Digest, needStdout, isTest, skipCacheLookup bool) (*core.BuildMetadata, *pb.ActionResult, error) {
executing := false
updateProgress := func(metadata *pb.ExecuteOperationMetadata) {
if c.state.Config.Remote.DisplayURL != "" {
log.Debug("Remote progress for %s: %s%s", target.Label, metadata.Stage, c.actionURL(metadata.ActionDigest, true))
}
if target.State() <= core.Built {
switch metadata.Stage {
case pb.ExecutionStage_CACHE_CHECK:
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Checking cache...")
case pb.ExecutionStage_QUEUED:
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Queued")
case pb.ExecutionStage_EXECUTING:
executing = true
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Building...")
case pb.ExecutionStage_COMPLETED:
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Completed")
}
} else {
switch metadata.Stage {
case pb.ExecutionStage_CACHE_CHECK:
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Checking cache...")
case pb.ExecutionStage_QUEUED:
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Queued")
case pb.ExecutionStage_EXECUTING:
executing = true
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Testing...")
case pb.ExecutionStage_COMPLETED:
c.state.LogBuildResult(tid, target.Label, core.TargetTesting, "Completed")
}
}
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
for i := 1; i < 1000000; i++ {
select {
case <-ctx.Done():
return
case <-time.After(1 * time.Minute):
description := "queued"
if executing {
description = "executing"
}
if i == 1 {
log.Notice("%s still %s after 1 minute", target, description)
} else {
log.Notice("%s still %s after %d minutes", target, description, i)
}
}
}
}()
resp, err := c.client.ExecuteAndWaitProgress(c.contextWithMetadata(target), &pb.ExecuteRequest{
InstanceName: c.instance,
ActionDigest: digest,
SkipCacheLookup: skipCacheLookup,
}, updateProgress)
if err != nil {
// Handle timing issues if we try to resume an execution as it fails. If we get a
// "not found" we might find that it's already been completed and we can't resume.
if status.Code(err) == codes.NotFound {
if metadata, ar := c.retrieveResults(target, command, digest, needStdout, isTest); metadata != nil {
return metadata, ar, nil
}
}
return nil, nil, c.wrapActionErr(fmt.Errorf("Failed to execute %s: %s", target, err), digest)
}
switch result := resp.Result.(type) {
case *longrunning.Operation_Error:
// We shouldn't really get here - the rex API requires servers to always
// use the response field instead of error.
return nil, nil, convertError(result.Error)
case *longrunning.Operation_Response:
response := &pb.ExecuteResponse{}
if err := ptypes.UnmarshalAny(result.Response, response); err != nil {
log.Error("Failed to deserialise execution response: %s", err)
return nil, nil, err
}
if response.CachedResult {
c.state.LogBuildResult(tid, target.Label, core.TargetCached, "Cached")
}
for k, v := range response.ServerLogs {
log.Debug("Server log available: %s: hash key %s", k, v.Digest.Hash)
}
var respErr error
if response.Status != nil {
respErr = convertError(response.Status)
if respErr != nil {
if !strings.Contains(respErr.Error(), c.state.Config.Remote.DisplayURL) {
if url := c.actionURL(digest, false); url != "" {
respErr = fmt.Errorf("%s\nAction URL: %s", respErr, url)
}
}
}
}
if resp.Result == nil { // This is optional on failure.
return nil, nil, respErr
}
if response.Result == nil { // This seems to happen when things go wrong on the build server end.
if response.Status != nil {
return nil, nil, fmt.Errorf("Build server returned invalid result: %s", convertError(response.Status))
}
log.Debug("Bad result from build server: %+v", response)
return nil, nil, fmt.Errorf("Build server did not return valid result")
}
if response.Message != "" {
// Informational messages can be emitted on successful actions.
log.Debug("Message from build server:\n %s", response.Message)
}
failed := respErr != nil || response.Result.ExitCode != 0
metadata, err := c.buildMetadata(response.Result, needStdout || failed, failed)
logResponseTimings(target, response.Result)
// The original error is higher priority than us trying to retrieve the
// output of the thing that failed.
if respErr != nil {
return metadata, response.Result, respErr
} else if response.Result.ExitCode != 0 {
err := fmt.Errorf("Remotely executed command exited with %d", response.Result.ExitCode)
if response.Message != "" {
err = fmt.Errorf("%s\n %s", err, response.Message)
}
if len(metadata.Stdout) != 0 {
err = fmt.Errorf("%s\nStdout:\n%s", err, metadata.Stdout)
}
if len(metadata.Stderr) != 0 {
err = fmt.Errorf("%s\nStderr:\n%s", err, metadata.Stderr)
}
// Add a link to the action URL, but only if the server didn't do it (they
// might add one to the failed action if they're using the Buildbarn extension
// for it, which we can't replicate here).
if !strings.Contains(response.Message, c.state.Config.Remote.DisplayURL) {
if url := c.actionURL(digest, true); url != "" {
err = fmt.Errorf("%s\n%s", err, url)
}
}
return metadata, response.Result, err
} else if err != nil {
return nil, nil, err
}
log.Debug("Completed remote build action for %s", target)
if err := c.verifyActionResult(target, command, digest, response.Result, false, isTest); err != nil {
return metadata, response.Result, err
}
c.locallyCacheResults(target, digest, metadata, response.Result)
return metadata, response.Result, nil
default:
if !resp.Done {
log.Error("Received an incomplete response for %s: %#v", target, resp)
return nil, nil, fmt.Errorf("Received an incomplete response for %s", target)
}
return nil, nil, fmt.Errorf("Unknown response type (was a %T): %#v", resp.Result, resp) // Shouldn't get here
}
}
func logResponseTimings(target *core.BuildTarget, ar *pb.ActionResult) {
if ar != nil && ar.ExecutionMetadata != nil {
startTime := toTime(ar.ExecutionMetadata.ExecutionStartTimestamp)
endTime := toTime(ar.ExecutionMetadata.ExecutionCompletedTimestamp)
inputFetchStartTime := toTime(ar.ExecutionMetadata.InputFetchStartTimestamp)
inputFetchEndTime := toTime(ar.ExecutionMetadata.InputFetchCompletedTimestamp)
log.Debug("Completed remote build action for %s; input fetch %s, build time %s", target, inputFetchEndTime.Sub(inputFetchStartTime), endTime.Sub(startTime))
}
}
// PrintHashes prints the action hashes for a target.
func (c *Client) PrintHashes(target *core.BuildTarget, isTest bool) {
actionDigest := c.unstampedBuildActionDigests.Get(target.Label)
fmt.Printf(" Action: %7d bytes: %s\n", actionDigest.SizeBytes, actionDigest.Hash)
if c.state.Config.Remote.DisplayURL != "" {
fmt.Printf(" URL: %s\n", c.actionURL(actionDigest, false))
}
}
// DataRate returns an estimate of the current in/out RPC data rates in bytes per second.
func (c *Client) DataRate() (int, int, int, int) {
return c.byteRateIn, c.byteRateOut, c.totalBytesIn, c.totalBytesOut
}
// fetchRemoteFile sends a request to fetch a file using the remote asset API.
func (c *Client) fetchRemoteFile(tid int, target *core.BuildTarget, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) {
c.state.LogBuildResult(tid, target.Label, core.TargetBuilding, "Downloading...")
urls := target.AllURLs(c.state)
req := &fpb.FetchBlobRequest{
InstanceName: c.instance,
Timeout: ptypes.DurationProto(target.BuildTimeout),
Uris: urls,
}
if !c.state.NeedHashesOnly || !c.state.IsOriginalTargetOrParent(target) {
if sri := subresourceIntegrity(target); sri != "" {
req.Qualifiers = []*fpb.Qualifier{{
Name: "checksum.sri",
Value: sri,
}}
}
}
ctx, cancel := context.WithTimeout(context.Background(), target.BuildTimeout)
defer cancel()
resp, err := c.fetchClient.FetchBlob(ctx, req)
if err != nil {
return nil, nil, fmt.Errorf("Failed to download file: %s", err)
}
c.state.LogBuildResult(tid, target.Label, core.TargetBuilt, "Downloaded.")
// If we get here, the blob exists in the CAS. Create an ActionResult corresponding to it.
outs := target.Outputs()
ar := &pb.ActionResult{
OutputFiles: []*pb.OutputFile{{
Path: outs[0],
Digest: resp.BlobDigest,
IsExecutable: target.IsBinary,
}},
}
if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: actionDigest,
ActionResult: ar,
}); err != nil {
return nil, nil, fmt.Errorf("Error updating action result: %s", err)
}
return &core.BuildMetadata{}, ar, nil
}
// buildFilegroup "builds" a single filegroup target.
func (c *Client) buildFilegroup(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) {
inputDir, err := c.uploadInputDir(nil, target, false) // We don't need to actually upload the inputs here, that is already done.
if err != nil {
return nil, nil, err
}
ar := &pb.ActionResult{}
if err := c.uploadBlobs(func(ch chan<- *uploadinfo.Entry) error {
defer close(ch)
inputDir.Build(ch)
for _, out := range command.OutputPaths {
if d, f := inputDir.Node(path.Join(target.Label.PackageName, out)); d != nil {
entry, digest := c.protoEntry(inputDir.Tree(path.Join(target.Label.PackageName, out)))
ch <- entry
ar.OutputDirectories = append(ar.OutputDirectories, &pb.OutputDirectory{
Path: out,
TreeDigest: digest,
})
} else if f != nil {
ar.OutputFiles = append(ar.OutputFiles, &pb.OutputFile{
Path: out,
Digest: f.Digest,
IsExecutable: f.IsExecutable,
})
} else {
// Of course, we should not get here (classic developer things...)
return fmt.Errorf("Missing output from filegroup: %s", out)
}
}
return nil
}); err != nil {
return nil, nil, err
}
if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: actionDigest,
ActionResult: ar,
}); err != nil {
return nil, nil, fmt.Errorf("Error updating action result: %s", err)
}
return &core.BuildMetadata{}, ar, nil
}
// buildTextFile "builds" uploads a text file to the CAS
func (c *Client) buildTextFile(target *core.BuildTarget, command *pb.Command, actionDigest *pb.Digest) (*core.BuildMetadata, *pb.ActionResult, error) {
ar := &pb.ActionResult{}
if err := c.uploadBlobs(func(ch chan<- *uploadinfo.Entry) error {
defer close(ch)
if len(command.OutputPaths) != 1 {
return fmt.Errorf("text_file %s should have a single output, has %d", target.Label, len(command.OutputPaths))
}
entry := uploadinfo.EntryFromBlob([]byte(target.FileContent))
ch <- entry
ar.OutputFiles = append(ar.OutputFiles, &pb.OutputFile{
Path: command.OutputPaths[0],
Digest: entry.Digest.ToProto(),
})
return nil
}); err != nil {
return nil, nil, err
}
if _, err := c.client.UpdateActionResult(context.Background(), &pb.UpdateActionResultRequest{
InstanceName: c.instance,
ActionDigest: actionDigest,
ActionResult: ar,
}); err != nil {
return nil, nil, fmt.Errorf("Error updating action result: %s", err)
}
return &core.BuildMetadata{}, ar, nil
}
// A grpcLogMabob is an implementation of grpc's logging interface using our backend.
type grpcLogMabob struct{}
func (g *grpcLogMabob) Info(args ...interface{}) { log.Info("%s", args) }
func (g *grpcLogMabob) Infof(format string, args ...interface{}) { log.Info(format, args...) }
func (g *grpcLogMabob) Infoln(args ...interface{}) { log.Info("%s", args) }
func (g *grpcLogMabob) Warning(args ...interface{}) { log.Warning("%s", args) }
func (g *grpcLogMabob) Warningf(format string, args ...interface{}) { log.Warning(format, args...) }
func (g *grpcLogMabob) Warningln(args ...interface{}) { log.Warning("%s", args) }
func (g *grpcLogMabob) Error(args ...interface{}) { log.Error("", args...) }
func (g *grpcLogMabob) Errorf(format string, args ...interface{}) { log.Errorf(format, args...) }
func (g *grpcLogMabob) Errorln(args ...interface{}) { log.Error("", args...) }
func (g *grpcLogMabob) Fatal(args ...interface{}) { log.Fatal(args...) }
func (g *grpcLogMabob) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) }
func (g *grpcLogMabob) Fatalln(args ...interface{}) { log.Fatal(args...) }
func (g *grpcLogMabob) V(l int) bool { return log.IsEnabledFor(logging.Level(l)) }
| 1 | 9,779 | Why did this change? | thought-machine-please | go |
@@ -55,11 +55,14 @@ func (c *client) DescribeTable(ctx context.Context, region string, tableName str
globalSecondaryIndexes := getGlobalSecondaryIndexes(result.Table.GlobalSecondaryIndexes)
+ status := newProtoForTableStatus(result.Table.TableStatus)
+
ret := &dynamodbv1.Table{
Name: aws.ToString(result.Table.TableName),
Region: region,
GlobalSecondaryIndexes: globalSecondaryIndexes,
ProvisionedThroughput: currentCapacity,
+ Status: status,
}
return ret, nil
} | 1 | package aws
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
"github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
dynamodbv1 "github.com/lyft/clutch/backend/api/aws/dynamodb/v1"
awsv1 "github.com/lyft/clutch/backend/api/config/service/aws/v1"
)
// defaults for the dynamodb settings config
const (
AwsMaxRCU = 40000
AwsMaxWCU = 40000
SafeScaleFactor = 2.0
)
// get or set defaults for dynamodb scaling
func getScalingLimits(cfg *awsv1.Config) *awsv1.ScalingLimits {
if cfg.GetDynamodbConfig() == nil && cfg.DynamodbConfig.GetScalingLimits() == nil {
ds := &awsv1.ScalingLimits{
MaxReadCapacityUnits: AwsMaxRCU,
MaxWriteCapacityUnits: AwsMaxWCU,
MaxScaleFactor: SafeScaleFactor,
EnableOverride: false,
}
return ds
}
return cfg.DynamodbConfig.ScalingLimits
}
func (c *client) DescribeTable(ctx context.Context, region string, tableName string) (*dynamodbv1.Table, error) {
cl, err := c.getRegionalClient(region)
if err != nil {
c.log.Error("unable to get regional client", zap.Error(err))
return nil, err
}
result, err := getTable(ctx, cl, tableName)
if err != nil {
c.log.Error("unable to find table", zap.Error(err))
return nil, err
}
currentCapacity := &dynamodbv1.ProvisionedThroughput{
WriteCapacityUnits: aws.ToInt64(result.Table.ProvisionedThroughput.WriteCapacityUnits),
ReadCapacityUnits: aws.ToInt64(result.Table.ProvisionedThroughput.ReadCapacityUnits),
}
globalSecondaryIndexes := getGlobalSecondaryIndexes(result.Table.GlobalSecondaryIndexes)
ret := &dynamodbv1.Table{
Name: aws.ToString(result.Table.TableName),
Region: region,
GlobalSecondaryIndexes: globalSecondaryIndexes,
ProvisionedThroughput: currentCapacity,
}
return ret, nil
}
func getTable(ctx context.Context, client *regionalClient, tableName string) (*dynamodb.DescribeTableOutput, error) {
input := &dynamodb.DescribeTableInput{TableName: aws.String(tableName)}
return client.dynamodb.DescribeTable(ctx, input)
}
func getGlobalSecondaryIndexes(indexes []types.GlobalSecondaryIndexDescription) []*dynamodbv1.GlobalSecondaryIndex {
gsis := make([]*dynamodbv1.GlobalSecondaryIndex, len(indexes))
for idx, i := range indexes {
gsis[idx] = newProtoForGlobalSecondaryIndex(i)
}
return gsis
}
func newProtoForGlobalSecondaryIndex(index types.GlobalSecondaryIndexDescription) *dynamodbv1.GlobalSecondaryIndex {
currentCapacity := &dynamodbv1.ProvisionedThroughput{
ReadCapacityUnits: aws.ToInt64(index.ProvisionedThroughput.ReadCapacityUnits),
WriteCapacityUnits: aws.ToInt64(index.ProvisionedThroughput.WriteCapacityUnits),
}
return &dynamodbv1.GlobalSecondaryIndex{
Name: aws.ToString(index.IndexName),
ProvisionedThroughput: currentCapacity,
}
}
func isValidIncrease(client *regionalClient, current *types.ProvisionedThroughputDescription, target types.ProvisionedThroughput) error {
// check for targets that are lower than current (can't scale down)
if *current.ReadCapacityUnits > *target.ReadCapacityUnits {
return status.Errorf(codes.FailedPrecondition, "Target read capacity [%d] is lower than current capacity [%d]", *target.ReadCapacityUnits, *current.ReadCapacityUnits)
}
if *current.WriteCapacityUnits > *target.WriteCapacityUnits {
return status.Errorf(codes.FailedPrecondition, "Target write capacity [%d] is lower than current capacity [%d]", *target.WriteCapacityUnits, *current.WriteCapacityUnits)
}
// check for targets that exceed max limits
if *target.ReadCapacityUnits > client.dynamodbCfg.ScalingLimits.MaxReadCapacityUnits {
return status.Errorf(codes.FailedPrecondition, "Target read capacity exceeds maximum allowed limits [%d]", client.dynamodbCfg.ScalingLimits.MaxReadCapacityUnits)
}
if *target.WriteCapacityUnits > client.dynamodbCfg.ScalingLimits.MaxWriteCapacityUnits {
return status.Errorf(codes.FailedPrecondition, "Target write capacity exceeds maximum allowed limits [%d]", client.dynamodbCfg.ScalingLimits.MaxWriteCapacityUnits)
}
// check for increases that exceed max increase scale
if (float32(*target.ReadCapacityUnits / *current.ReadCapacityUnits)) > client.dynamodbCfg.ScalingLimits.MaxScaleFactor {
return status.Errorf(codes.FailedPrecondition, "Target read capacity exceeds the scale limit of [%.1f]x current capacity", client.dynamodbCfg.ScalingLimits.MaxScaleFactor)
}
if (float32(*target.WriteCapacityUnits / *current.WriteCapacityUnits)) > client.dynamodbCfg.ScalingLimits.MaxScaleFactor {
return status.Errorf(codes.FailedPrecondition, "Target write capacity exceeds the scale limit of [%.1f]x current capacity", client.dynamodbCfg.ScalingLimits.MaxScaleFactor)
}
return nil
}
func (c *client) UpdateTableCapacity(ctx context.Context, region string, tableName string, targetTableRcu int64, targetTableWcu int64) error {
cl, err := c.getRegionalClient(region)
if err != nil {
c.log.Error("unable to get regional client", zap.Error(err))
return err
}
currentTable, err := getTable(ctx, cl, tableName)
if err != nil {
c.log.Error("unable to find table", zap.Error(err))
return err
}
targetCapacity := types.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(targetTableRcu),
WriteCapacityUnits: aws.Int64(targetTableWcu),
}
err = isValidIncrease(cl, currentTable.Table.ProvisionedThroughput, targetCapacity)
if err != nil {
c.log.Error("invalid requested amount for capacity increase", zap.Error(err))
return err
}
input := &dynamodb.UpdateTableInput{
TableName: aws.String(tableName),
ProvisionedThroughput: &targetCapacity,
}
_, err = cl.dynamodb.UpdateTable(ctx, input)
return err
}
| 1 | 11,692 | nit: `status` collides with imported package named `status` | lyft-clutch | go |
@@ -584,6 +584,14 @@ func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger led
return eval(context.Background(), accUpdatesLedger, blk, false, nil, nil)
}
+// IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service so
+// that the memory pressure could be decreased until the catchpoint file writing is complete.
+func (l *Ledger) IsWritingCatchpointFile() bool {
+ l.trackerMu.RLock()
+ defer l.trackerMu.RUnlock()
+ return l.accts.IsWritingCatchpointFile()
+}
+
// A txlease is a transaction (sender, lease) pair which uniquely specifies a
// transaction lease.
type txlease struct { | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package ledger
import (
"context"
"database/sql"
"fmt"
"io"
"os"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/logging"
)
// Ledger is a database storing the contents of the ledger.
type Ledger struct {
// Database connections to the DBs storing blocks and tracker state.
// We use potentially different databases to avoid SQLite contention
// during catchup.
trackerDBs dbPair
blockDBs dbPair
// blockQ is the buffer of added blocks that will be flushed to
// persistent storage
blockQ *blockQueue
log logging.Logger
// archival determines whether the ledger keeps all blocks forever
// (archival mode) or trims older blocks to save space (non-archival).
archival bool
// genesisHash stores the genesis hash for this ledger.
genesisHash crypto.Digest
genesisAccounts map[basics.Address]basics.AccountData
genesisProto config.ConsensusParams
// State-machine trackers
accts accountUpdates
txTail txTail
bulletin bulletin
notifier blockNotifier
time timeTracker
metrics metricsTracker
trackers trackerRegistry
trackerMu deadlock.RWMutex
headerCache heapLRUCache
}
// InitState structure defines blockchain init params
type InitState struct {
Block bookkeeping.Block
Accounts map[basics.Address]basics.AccountData
GenesisHash crypto.Digest
}
// OpenLedger creates a Ledger object, using SQLite database filenames
// based on dbPathPrefix (in-memory if dbMem is true). genesisInitState.Blocks and
// genesisInitState.Accounts specify the initial blocks and accounts to use if the
// database wasn't initialized before.
func OpenLedger(
log logging.Logger, dbPathPrefix string, dbMem bool, genesisInitState InitState, cfg config.Local,
) (*Ledger, error) {
var err error
l := &Ledger{
log: log,
archival: cfg.Archival,
genesisHash: genesisInitState.GenesisHash,
genesisAccounts: genesisInitState.Accounts,
genesisProto: config.Consensus[genesisInitState.Block.CurrentProtocol],
}
l.headerCache.maxEntries = 10
defer func() {
if err != nil {
l.Close()
}
}()
l.trackerDBs, l.blockDBs, err = openLedgerDB(dbPathPrefix, dbMem)
if err != nil {
err = fmt.Errorf("OpenLedger.openLedgerDB %v", err)
return nil, err
}
l.trackerDBs.rdb.SetLogger(log)
l.trackerDBs.wdb.SetLogger(log)
l.blockDBs.rdb.SetLogger(log)
l.blockDBs.wdb.SetLogger(log)
err = l.blockDBs.wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
return initBlocksDB(tx, l, []bookkeeping.Block{genesisInitState.Block}, cfg.Archival)
})
if err != nil {
err = fmt.Errorf("OpenLedger.initBlocksDB %v", err)
return nil, err
}
if l.genesisAccounts == nil {
l.genesisAccounts = make(map[basics.Address]basics.AccountData)
}
l.accts.initialize(cfg, dbPathPrefix, l.genesisProto, l.genesisAccounts)
err = l.reloadLedger()
if err != nil {
return nil, err
}
return l, nil
}
func (l *Ledger) reloadLedger() error {
// similar to the Close function, we want to start by closing the blockQ first. The
// blockQ is having a sync goroutine which indirectly calls other trackers. We want to eliminate that go-routine first,
// and follow up by taking the trackers lock.
if l.blockQ != nil {
l.blockQ.close()
l.blockQ = nil
}
// take the trackers lock. This would ensure that no other goroutine is using the trackers.
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
// close the trackers.
l.trackers.close()
// reload -
var err error
l.blockQ, err = bqInit(l)
if err != nil {
err = fmt.Errorf("reloadLedger.bqInit %v", err)
return err
}
l.trackers.register(&l.accts) // update the balances
l.trackers.register(&l.time) // tracks the block timestamps
l.trackers.register(&l.txTail) // update the transaction tail, tracking the recent 1000 txn
l.trackers.register(&l.bulletin) // provide closed channel signaling support for completed rounds
l.trackers.register(&l.notifier) // send OnNewBlocks to subscribers
l.trackers.register(&l.metrics) // provides metrics reporting support
err = l.trackers.loadFromDisk(l)
if err != nil {
err = fmt.Errorf("reloadLedger.loadFromDisk %v", err)
return err
}
// Check that the genesis hash, if present, matches.
err = l.verifyMatchingGenesisHash()
if err != nil {
return err
}
return nil
}
// verifyMatchingGenesisHash tests to see that the latest block header pointing to the same genesis hash provided in genesisHash.
func (l *Ledger) verifyMatchingGenesisHash() (err error) {
// Check that the genesis hash, if present, matches.
err = l.blockDBs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
latest, err := blockLatest(tx)
if err != nil {
return err
}
hdr, err := blockGetHdr(tx, latest)
if err != nil {
return err
}
params := config.Consensus[hdr.CurrentProtocol]
if params.SupportGenesisHash && hdr.GenesisHash != l.genesisHash {
return fmt.Errorf(
"latest block %d genesis hash %v does not match expected genesis hash %v",
latest, hdr.GenesisHash, l.genesisHash,
)
}
return nil
})
return
}
func openLedgerDB(dbPathPrefix string, dbMem bool) (trackerDBs dbPair, blockDBs dbPair, err error) {
// Backwards compatibility: we used to store both blocks and tracker
// state in a single SQLite db file.
var trackerDBFilename string
var blockDBFilename string
if !dbMem {
commonDBFilename := dbPathPrefix + ".sqlite"
_, err = os.Stat(commonDBFilename)
if !os.IsNotExist(err) {
// before launch, we used to have both blocks and tracker
// state in a single SQLite db file. We don't have that anymore,
// and we want to fail when that's the case.
err = fmt.Errorf("A single ledger database file '%s' was detected. This is no longer supported by current binary", commonDBFilename)
return
}
}
trackerDBFilename = dbPathPrefix + ".tracker.sqlite"
blockDBFilename = dbPathPrefix + ".block.sqlite"
trackerDBs, err = dbOpen(trackerDBFilename, dbMem)
if err != nil {
return
}
blockDBs, err = dbOpen(blockDBFilename, dbMem)
if err != nil {
return
}
return
}
// initBlocksDB performs DB initialization:
// - creates and populates it with genesis blocks
// - ensures DB is in good shape for archival mode and resets it if not
func initBlocksDB(tx *sql.Tx, l *Ledger, initBlocks []bookkeeping.Block, isArchival bool) (err error) {
err = blockInit(tx, initBlocks)
if err != nil {
err = fmt.Errorf("initBlocksDB.blockInit %v", err)
return err
}
// in archival mode check if DB contains all blocks up to the latest
if isArchival {
earliest, err := blockEarliest(tx)
if err != nil {
err = fmt.Errorf("initBlocksDB.blockEarliest %v", err)
return err
}
// Detect possible problem - archival node needs all block but have only subsequence of them
// So reset the DB and init it again
if earliest != basics.Round(0) {
l.log.Warnf("resetting blocks DB (earliest block is %v)", earliest)
err := blockResetDB(tx)
if err != nil {
err = fmt.Errorf("initBlocksDB.blockResetDB %v", err)
return err
}
err = blockInit(tx, initBlocks)
if err != nil {
err = fmt.Errorf("initBlocksDB.blockInit 2 %v", err)
return err
}
}
// Manually replace block 0, even if we already had it
// (necessary to normalize the payset commitment because of a
// bug that caused its value to change)
//
// Don't bother for non-archival nodes since they will toss
// block 0 almost immediately
//
// TODO remove this once a version containing this code has
// been deployed to archival nodes
if len(initBlocks) > 0 && initBlocks[0].Round() == basics.Round(0) {
updated, err := blockReplaceIfExists(tx, l.log, initBlocks[0], agreement.Certificate{})
if err != nil {
err = fmt.Errorf("initBlocksDB.blockReplaceIfExists %v", err)
return err
}
if updated {
l.log.Infof("initBlocksDB replaced block 0")
}
}
}
return nil
}
// Close reclaims resources used by the ledger (namely, the database connection
// and goroutines used by trackers).
func (l *Ledger) Close() {
// we shut the the blockqueue first, since it's sync goroutine dispatches calls
// back to the trackers.
if l.blockQ != nil {
l.blockQ.close()
l.blockQ = nil
}
// take the trackers lock. This would ensure that no other goroutine is using the trackers.
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
// then, we shut down the trackers and their corresponding goroutines.
l.trackers.close()
// last, we close the underlaying database connections.
l.blockDBs.close()
l.trackerDBs.close()
}
// RegisterBlockListeners registers listeners that will be called when a
// new block is added to the ledger.
func (l *Ledger) RegisterBlockListeners(listeners []BlockListener) {
l.notifier.register(listeners)
}
// notifyCommit informs the trackers that all blocks up to r have been
// written to disk. Returns the minimum block number that must be kept
// in the database.
func (l *Ledger) notifyCommit(r basics.Round) basics.Round {
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
minToSave := l.trackers.committedUpTo(r)
if l.archival {
// Do not forget any blocks.
minToSave = 0
}
return minToSave
}
// GetLastCatchpointLabel returns the latest catchpoint label that was written to the
// database.
func (l *Ledger) GetLastCatchpointLabel() string {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.GetLastCatchpointLabel()
}
// GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to
// look up a creator address, setting ok to false if the query succeeded but no
// creator was found.
func (l *Ledger) GetCreatorForRound(rnd basics.Round, cidx basics.CreatableIndex, ctype basics.CreatableType) (creator basics.Address, ok bool, err error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.GetCreatorForRound(rnd, cidx, ctype)
}
// GetCreator is like GetCreatorForRound, but for the latest round and race-free
// with respect to ledger.Latest()
func (l *Ledger) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.GetCreatorForRound(l.blockQ.latest(), cidx, ctype)
}
// ListAssets takes a maximum asset index and maximum result length, and
// returns up to that many CreatableLocators from the database where app idx is
// less than or equal to the maximum.
func (l *Ledger) ListAssets(maxAssetIdx basics.AssetIndex, maxResults uint64) (results []basics.CreatableLocator, err error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.ListAssets(maxAssetIdx, maxResults)
}
// ListApplications takes a maximum app index and maximum result length, and
// returns up to that many CreatableLocators from the database where app idx is
// less than or equal to the maximum.
func (l *Ledger) ListApplications(maxAppIdx basics.AppIndex, maxResults uint64) (results []basics.CreatableLocator, err error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.ListApplications(maxAppIdx, maxResults)
}
// Lookup uses the accounts tracker to return the account state for a
// given account in a particular round. The account values reflect
// the changes of all blocks up to and including rnd.
func (l *Ledger) Lookup(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
// Intentionally apply (pending) rewards up to rnd.
data, err := l.accts.Lookup(rnd, addr, true)
if err != nil {
return basics.AccountData{}, err
}
return data, nil
}
// LookupWithoutRewards is like Lookup but does not apply pending rewards up
// to the requested round rnd.
func (l *Ledger) LookupWithoutRewards(rnd basics.Round, addr basics.Address) (basics.AccountData, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
data, err := l.accts.Lookup(rnd, addr, false)
if err != nil {
return basics.AccountData{}, err
}
return data, nil
}
// Totals returns the totals of all accounts at the end of round rnd.
func (l *Ledger) Totals(rnd basics.Round) (AccountTotals, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.Totals(rnd)
}
func (l *Ledger) isDup(currentProto config.ConsensusParams, current basics.Round, firstValid basics.Round, lastValid basics.Round, txid transactions.Txid, txl txlease) (bool, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.txTail.isDup(currentProto, current, firstValid, lastValid, txid, txl)
}
// GetRoundTxIds returns a map of the transactions ids that we have for the given round
// this function is currently not being used, but remains here as it migth be useful in the future.
func (l *Ledger) GetRoundTxIds(rnd basics.Round) (txMap map[transactions.Txid]bool) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.txTail.getRoundTxIds(rnd)
}
// Latest returns the latest known block round added to the ledger.
func (l *Ledger) Latest() basics.Round {
return l.blockQ.latest()
}
// LatestCommitted returns the last block round number written to
// persistent storage. This block, and all previous blocks, are
// guaranteed to be available after a crash.
func (l *Ledger) LatestCommitted() basics.Round {
return l.blockQ.latestCommitted()
}
// Block returns the block for round rnd.
func (l *Ledger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
return l.blockQ.getBlock(rnd)
}
// BlockHdr returns the BlockHeader of the block for round rnd.
func (l *Ledger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) {
value, exists := l.headerCache.Get(rnd)
if exists {
blk = value.(bookkeeping.BlockHeader)
return
}
blk, err = l.blockQ.getBlockHdr(rnd)
if err == nil {
l.headerCache.Put(rnd, blk)
}
return
}
// EncodedBlockCert returns the encoded block and the corresponding encoded certificate of the block for round rnd.
func (l *Ledger) EncodedBlockCert(rnd basics.Round) (blk []byte, cert []byte, err error) {
return l.blockQ.getEncodedBlockCert(rnd)
}
// BlockCert returns the block and the certificate of the block for round rnd.
func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreement.Certificate, err error) {
return l.blockQ.getBlockCert(rnd)
}
// AddBlock adds a new block to the ledger. The block is stored in an
// in-memory queue and is written to the disk in the background. An error
// is returned if this is not the expected next block number.
func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error {
// passing nil as the verificationPool is ok since we've asking the evaluator to skip verification.
updates, err := eval(context.Background(), l, blk, false, nil, nil)
if err != nil {
return err
}
vb := ValidatedBlock{
blk: blk,
delta: updates,
}
return l.AddValidatedBlock(vb, cert)
}
// AddValidatedBlock adds a new block to the ledger, after the block has
// been validated by calling Ledger.Validate(). This saves the cost of
// having to re-compute the effect of the block on the ledger state, if
// the block has previously been validated. Otherwise, AddValidatedBlock
// behaves like AddBlock.
func (l *Ledger) AddValidatedBlock(vb ValidatedBlock, cert agreement.Certificate) error {
// Grab the tracker lock first, to ensure newBlock() is notified before committedUpTo().
l.trackerMu.Lock()
defer l.trackerMu.Unlock()
err := l.blockQ.putBlock(vb.blk, cert)
if err != nil {
return err
}
l.headerCache.Put(vb.blk.Round(), vb.blk.BlockHeader)
l.trackers.newBlock(vb.blk, vb.delta)
return nil
}
// WaitForCommit waits until block r (and block before r) are durably
// written to disk.
func (l *Ledger) WaitForCommit(r basics.Round) {
l.blockQ.waitCommit(r)
}
// Wait returns a channel that closes once a given round is stored
// durably in the ledger.
// When <-l.Wait(r) finishes, ledger is guaranteed to have round r,
// and will not lose round r after a crash.
// This makes it easy to use in a select{} statement.
func (l *Ledger) Wait(r basics.Round) chan struct{} {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.bulletin.Wait(r)
}
// Timestamp uses the timestamp tracker to return the timestamp
// from block r.
func (l *Ledger) Timestamp(r basics.Round) (int64, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.time.timestamp(r)
}
// GenesisHash returns the genesis hash for this ledger.
func (l *Ledger) GenesisHash() crypto.Digest {
return l.genesisHash
}
// GetCatchpointCatchupState returns the current state of the catchpoint catchup.
func (l *Ledger) GetCatchpointCatchupState(ctx context.Context) (state CatchpointCatchupState, err error) {
return MakeCatchpointCatchupAccessor(l, l.log).GetState(ctx)
}
// GetCatchpointStream returns an io.ReadCloser file stream from which the catchpoint file
// for the provided round could be retrieved. If no such stream can be generated, a non-nil
// error is returned. The io.ReadCloser and the error are mutually exclusive -
// if error is returned, the file stream is gurenteed to be nil, and vice versa,
// if the file stream is not nil, the error is gurenteed to be nil.
func (l *Ledger) GetCatchpointStream(round basics.Round) (io.ReadCloser, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
return l.accts.GetCatchpointStream(round)
}
// ledgerForTracker methods
func (l *Ledger) trackerDB() dbPair {
return l.trackerDBs
}
// ledgerForTracker methods
func (l *Ledger) blockDB() dbPair {
return l.blockDBs
}
func (l *Ledger) trackerLog() logging.Logger {
return l.log
}
// trackerEvalVerified is used by the accountUpdates to reconstruct the StateDelta from a given block during it's loadFromDisk execution.
// when this function is called, the trackers mutex is expected alredy to be taken. The provided accUpdatesLedger would allow the
// evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time.
func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (StateDelta, error) {
// passing nil as the verificationPool is ok since we've asking the evaluator to skip verification.
return eval(context.Background(), accUpdatesLedger, blk, false, nil, nil)
}
// A txlease is a transaction (sender, lease) pair which uniquely specifies a
// transaction lease.
type txlease struct {
sender basics.Address
lease [32]byte
}
| 1 | 39,913 | "memory pressure could be decreased" --> "to avoid memory pressure" | algorand-go-algorand | go |
@@ -426,8 +426,11 @@ class TestTimescaleListenStore(DatabaseTestCase):
self.assertEqual(count + 1, int(cache.get(user_key, decode=False) or 0))
def test_delete_listens(self):
- self._create_test_data(self.testuser_name)
- listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1400000300)
+ uid = random.randint(2000, 1 << 31)
+ testuser = db_user.get_or_create(uid, "user_%d" % uid)
+ testuser_name = testuser['musicbrainz_id']
+ self._create_test_data(testuser_name)
+ listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=testuser_name, to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150) | 1 | # coding=utf-8
import os
from time import time
from datetime import datetime
import logging
import shutil
import subprocess
import tarfile
import tempfile
import random
import ujson
import psycopg2
import sqlalchemy
import listenbrainz.db.user as db_user
from psycopg2.extras import execute_values
from listenbrainz.db.testing import DatabaseTestCase
from listenbrainz.db import timescale as ts
from listenbrainz import config
from listenbrainz.listenstore.tests.util import create_test_data_for_timescalelistenstore, generate_data
from listenbrainz.webserver.timescale_connection import init_timescale_connection
from listenbrainz.db.dump import SchemaMismatchException
from listenbrainz.listenstore import LISTENS_DUMP_SCHEMA_VERSION
from listenbrainz.listenstore.timescale_listenstore import REDIS_USER_LISTEN_COUNT
from brainzutils import cache
TIMESCALE_SQL_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', 'admin', 'timescale')
class TestTimescaleListenStore(DatabaseTestCase):
def reset_timescale_db(self):
ts.init_db_connection(config.TIMESCALE_ADMIN_URI)
ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'drop_db.sql'))
ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'create_db.sql'))
ts.engine.dispose()
ts.init_db_connection(config.TIMESCALE_ADMIN_LB_URI)
ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'create_extensions.sql'))
ts.engine.dispose()
ts.init_db_connection(config.SQLALCHEMY_TIMESCALE_URI)
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_schemas.sql'))
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_tables.sql'))
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_functions.sql'))
ts.run_sql_script_without_transaction(os.path.join(TIMESCALE_SQL_DIR, 'create_views.sql'))
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_indexes.sql'))
ts.create_view_indexes()
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_primary_keys.sql'))
ts.run_sql_script(os.path.join(TIMESCALE_SQL_DIR, 'create_foreign_keys.sql'))
ts.engine.dispose()
def setUp(self):
super(TestTimescaleListenStore, self).setUp()
self.log = logging.getLogger(__name__)
self.reset_timescale_db()
self.ns = config.REDIS_NAMESPACE
self.logstore = init_timescale_connection(self.log, {
'REDIS_HOST': config.REDIS_HOST,
'REDIS_PORT': config.REDIS_PORT,
'REDIS_NAMESPACE': config.REDIS_NAMESPACE,
'SQLALCHEMY_TIMESCALE_URI': config.SQLALCHEMY_TIMESCALE_URI,
})
self.testuser_id = db_user.create(1, "test")
self.testuser_name = db_user.get(self.testuser_id)['musicbrainz_id']
def tearDown(self):
self.logstore = None
super(TestTimescaleListenStore, self).tearDown()
def _create_test_data(self, user_name, test_data_file_name=None):
test_data = create_test_data_for_timescalelistenstore(user_name, test_data_file_name)
self.logstore.insert(test_data)
return len(test_data)
def _insert_with_created(self, listens):
""" Insert a batch of listens with 'created' field.
"""
submit = []
for listen in listens:
submit.append((*listen.to_timescale(), listen.inserted_timestamp))
query = """INSERT INTO listen (listened_at, track_name, user_name, data, created)
VALUES %s
ON CONFLICT (listened_at, track_name, user_name)
DO NOTHING
"""
conn = ts.engine.raw_connection()
with conn.cursor() as curs:
execute_values(curs, query, submit, template=None)
conn.commit()
def test_check_listen_count_view_exists(self):
try:
with ts.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("""SELECT column_name
FROM information_schema.columns
WHERE table_name = 'listen_count_30day'
ORDER BY column_name"""))
cols = result.fetchall()
except psycopg2.OperationalError as e:
self.log.error("Cannot query timescale listen_count: %s" % str(e), exc_info=True)
raise
self.assertEqual(cols[0][0], "count")
self.assertEqual(cols[1][0], "listened_at_bucket")
self.assertEqual(cols[2][0], "user_name")
# The test test_aaa_get_total_listen_count is gone because all it did was test to see if the
# timescale continuous aggregate works and often times it didn't work fast enough. We don't care
# about immediate correctness, but eventual correctness, so test tossed.
def test_insert_timescale(self):
count = self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1399999999)
self.assertEqual(len(listens), count)
def test_fetch_listens_0(self):
self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1400000000, limit=1)
self.assertEqual(len(listens), 1)
self.assertEqual(listens[0].ts_since_epoch, 1400000050)
self.assertEqual(min_ts, 1400000000)
self.assertEqual(max_ts, 1400000200)
def test_fetch_listens_1(self):
self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1400000000)
self.assertEqual(len(listens), 4)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
def test_fetch_listens_2(self):
self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1400000100)
self.assertEqual(len(listens), 2)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
def test_fetch_listens_3(self):
self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
self.assertEqual(listens[4].ts_since_epoch, 1400000000)
def test_fetch_listens_4(self):
self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1400000049, to_ts=1400000101)
self.assertEqual(len(listens), 2)
self.assertEqual(listens[0].ts_since_epoch, 1400000100)
self.assertEqual(listens[1].ts_since_epoch, 1400000050)
def test_fetch_listens_5(self):
self._create_test_data(self.testuser_name)
with self.assertRaises(ValueError):
self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1400000101, to_ts=1400000001)
def test_fetch_listens_with_gaps(self):
self._create_test_data(self.testuser_name,
test_data_file_name='timescale_listenstore_test_listens_over_greater_time_range.json')
# test from_ts with gaps
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1399999999)
self.assertEqual(len(listens), 4)
self.assertEqual(listens[0].ts_since_epoch, 1420000050)
self.assertEqual(listens[1].ts_since_epoch, 1420000000)
self.assertEqual(listens[2].ts_since_epoch, 1400000050)
self.assertEqual(listens[3].ts_since_epoch, 1400000000)
# test from_ts and to_ts with gaps
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, from_ts=1400000049, to_ts=1420000001)
self.assertEqual(len(listens), 2)
self.assertEqual(listens[0].ts_since_epoch, 1420000000)
self.assertEqual(listens[1].ts_since_epoch, 1400000050)
# test to_ts with gaps
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1420000051)
self.assertEqual(len(listens), 4)
self.assertEqual(listens[0].ts_since_epoch, 1420000050)
self.assertEqual(listens[1].ts_since_epoch, 1420000000)
self.assertEqual(listens[2].ts_since_epoch, 1400000050)
self.assertEqual(listens[3].ts_since_epoch, 1400000000)
def test_get_listen_count_for_user(self):
uid = random.randint(2000, 1 << 31)
testuser = db_user.get_or_create(uid, "user_%d" % uid)
testuser_name = testuser['musicbrainz_id']
count = self._create_test_data(testuser_name)
listen_count = self.logstore.get_listen_count_for_user(user_name=testuser_name)
self.assertEqual(count, listen_count)
def test_get_timestamps_for_user(self):
uid = random.randint(2000, 1 << 31)
testuser = db_user.get_or_create(uid, "user_%d" % uid)
testuser_name = testuser['musicbrainz_id']
(min_ts, max_ts) = self.logstore.get_timestamps_for_user(user_name=testuser_name)
self.assertEqual(min_ts, 0)
self.assertEqual(max_ts, 0)
self._create_test_data(testuser_name)
(min_ts, max_ts) = self.logstore.get_timestamps_for_user(user_name=testuser_name)
self.assertEqual(min_ts, 1400000000)
self.assertEqual(max_ts, 1400000200)
def test_fetch_recent_listens(self):
user = db_user.get_or_create(2, 'someuser')
user_name = user['musicbrainz_id']
self._create_test_data(user_name)
user2 = db_user.get_or_create(3, 'otheruser')
user_name2 = user2['musicbrainz_id']
self._create_test_data(user_name2)
recent = self.logstore.fetch_recent_listens_for_users([user_name, user_name2], limit=1, max_age=10000000000)
self.assertEqual(len(recent), 2)
recent = self.logstore.fetch_recent_listens_for_users([user_name, user_name2], max_age=10000000000)
self.assertEqual(len(recent), 4)
recent = self.logstore.fetch_recent_listens_for_users([user_name], max_age=int(time()) -
recent[0].ts_since_epoch + 1)
self.assertEqual(len(recent), 1)
self.assertEqual(recent[0].ts_since_epoch, 1400000200)
def test_dump_listens(self):
self._create_test_data(self.testuser_name)
temp_dir = tempfile.mkdtemp()
dump = self.logstore.dump_listens(
location=temp_dir,
dump_id=1,
end_time=datetime.now(),
)
self.assertTrue(os.path.isfile(dump))
shutil.rmtree(temp_dir)
def test_incremental_dump(self):
base = 1500000000
listens = generate_data(1, self.testuser_name, base-4, 5, base+1) # generate 5 listens with inserted_ts 1-5
self._insert_with_created(listens)
listens = generate_data(1, self.testuser_name, base+1, 5, base+6) # generate 5 listens with inserted_ts 6-10
self._insert_with_created(listens)
temp_dir = tempfile.mkdtemp()
dump_location = self.logstore.dump_listens(
location=temp_dir,
dump_id=1,
start_time=datetime.utcfromtimestamp(base + 6),
end_time=datetime.utcfromtimestamp(base + 10)
)
self.assertTrue(os.path.isfile(dump_location))
self.reset_timescale_db()
self.logstore.import_listens_dump(dump_location)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=base + 11)
self.assertEqual(len(listens), 4)
self.assertEqual(listens[0].ts_since_epoch, base + 5)
self.assertEqual(listens[1].ts_since_epoch, base + 4)
self.assertEqual(listens[2].ts_since_epoch, base + 3)
self.assertEqual(listens[3].ts_since_epoch, base + 2)
shutil.rmtree(temp_dir)
def test_time_range_full_dumps(self):
base = 1500000000
listens = generate_data(1, self.testuser_name, base + 1, 5) # generate 5 listens with ts 1-5
self.logstore.insert(listens)
listens = generate_data(1, self.testuser_name, base + 6, 5) # generate 5 listens with ts 6-10
self.logstore.insert(listens)
temp_dir = tempfile.mkdtemp()
dump_location = self.logstore.dump_listens(
location=temp_dir,
dump_id=1,
end_time=datetime.utcfromtimestamp(base + 5)
)
self.assertTrue(os.path.isfile(dump_location))
self.reset_timescale_db()
self.logstore.import_listens_dump(dump_location)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=base + 11)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, base + 5)
self.assertEqual(listens[1].ts_since_epoch, base + 4)
self.assertEqual(listens[2].ts_since_epoch, base + 3)
self.assertEqual(listens[3].ts_since_epoch, base + 2)
self.assertEqual(listens[4].ts_since_epoch, base + 1)
# tests test_full_dump_listen_with_no_created
# and test_incremental_dumps_listen_with_no_created have been removed because
# with timescale all the missing inserted timestamps will have been
# been assigned sane created timestamps by the migration script
# and timescale will not allow blank created timestamps, so this test is pointless
def test_import_listens(self):
self._create_test_data(self.testuser_name)
temp_dir = tempfile.mkdtemp()
dump_location = self.logstore.dump_listens(
location=temp_dir,
dump_id=1,
end_time=datetime.now(),
)
self.assertTrue(os.path.isfile(dump_location))
self.reset_timescale_db()
self.logstore.import_listens_dump(dump_location)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
self.assertEqual(listens[4].ts_since_epoch, 1400000000)
shutil.rmtree(temp_dir)
def test_dump_and_import_listens_escaped(self):
user = db_user.get_or_create(3, 'i have a\\weird\\user, na/me"\n')
self._create_test_data(user['musicbrainz_id'])
self._create_test_data(self.testuser_name)
temp_dir = tempfile.mkdtemp()
dump_location = self.logstore.dump_listens(
location=temp_dir,
dump_id=1,
end_time=datetime.now(),
)
self.assertTrue(os.path.isfile(dump_location))
self.reset_timescale_db()
self.logstore.import_listens_dump(dump_location)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=user['musicbrainz_id'], to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
self.assertEqual(listens[4].ts_since_epoch, 1400000000)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
self.assertEqual(listens[4].ts_since_epoch, 1400000000)
shutil.rmtree(temp_dir)
# test test_import_dump_many_users is gone -- why are we testing user dump/restore here??
def create_test_dump(self, archive_name, archive_path, schema_version=None):
""" Creates a test dump to test the import listens functionality.
Args:
archive_name (str): the name of the archive
archive_path (str): the full path to the archive
schema_version (int): the version of the schema to be written into SCHEMA_SEQUENCE
if not provided, the SCHEMA_SEQUENCE file is not added to the archive
Returns:
the full path to the archive created
"""
temp_dir = tempfile.mkdtemp()
with tarfile.open(archive_path, mode='w|xz') as tar:
schema_version_path = os.path.join(temp_dir, 'SCHEMA_SEQUENCE')
with open(schema_version_path, 'w') as f:
f.write(str(schema_version or ' '))
tar.add(schema_version_path,
arcname=os.path.join(archive_name, 'SCHEMA_SEQUENCE'))
return archive_path
def test_schema_mismatch_exception_for_dump_incorrect_schema(self):
""" Tests that SchemaMismatchException is raised when the schema of the dump is old """
# create a temp archive with incorrect SCHEMA_VERSION
temp_dir = tempfile.mkdtemp()
archive_name = 'temp_dump'
archive_path = os.path.join(temp_dir, archive_name + '.tar.xz')
archive_path = self.create_test_dump(
archive_name=archive_name,
archive_path=archive_path,
schema_version=LISTENS_DUMP_SCHEMA_VERSION - 1
)
with self.assertRaises(SchemaMismatchException):
self.logstore.import_listens_dump(archive_path)
def test_schema_mismatch_exception_for_dump_no_schema(self):
""" Tests that SchemaMismatchException is raised when there is no schema version in the archive """
temp_dir = tempfile.mkdtemp()
archive_name = 'temp_dump'
archive_path = os.path.join(temp_dir, archive_name + '.tar.xz')
archive_path = self.create_test_dump(
archive_name=archive_name,
archive_path=archive_path,
schema_version=None
)
with self.assertRaises(SchemaMismatchException):
self.logstore.import_listens_dump(archive_path)
def test_listen_counts_in_cache(self):
uid = random.randint(2000, 1 << 31)
testuser = db_user.get_or_create(uid, "user_%d" % uid)
testuser_name = testuser['musicbrainz_id']
count = self._create_test_data(testuser_name)
user_key = REDIS_USER_LISTEN_COUNT + testuser_name
self.assertEqual(count, self.logstore.get_listen_count_for_user(testuser_name))
self.assertEqual(count, int(cache.get(user_key, decode=False) or 0))
batch = generate_data(uid, testuser_name, int(time()), 1)
self.logstore.insert(batch)
self.assertEqual(count + 1, int(cache.get(user_key, decode=False) or 0))
def test_delete_listens(self):
self._create_test_data(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
self.assertEqual(listens[4].ts_since_epoch, 1400000000)
self.logstore.delete(self.testuser_name)
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=self.testuser_name, to_ts=1400000300)
self.assertEqual(len(listens), 0)
def test_delete_listens_escaped(self):
user = db_user.get_or_create(213, 'i have a\\weird\\user, na/me"\n')
self._create_test_data(user['musicbrainz_id'])
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=user['musicbrainz_id'], to_ts=1400000300)
self.assertEqual(len(listens), 5)
self.assertEqual(listens[0].ts_since_epoch, 1400000200)
self.assertEqual(listens[1].ts_since_epoch, 1400000150)
self.assertEqual(listens[2].ts_since_epoch, 1400000100)
self.assertEqual(listens[3].ts_since_epoch, 1400000050)
self.assertEqual(listens[4].ts_since_epoch, 1400000000)
self.logstore.delete(user['musicbrainz_id'])
listens, min_ts, max_ts = self.logstore.fetch_listens(user_name=user['musicbrainz_id'], to_ts=1400000300)
self.assertEqual(len(listens), 0)
| 1 | 18,828 | I think it would make sense to fetch the cache values after deleting the listens and making sure they are what we expect. | metabrainz-listenbrainz-server | py |
@@ -169,6 +169,11 @@ export class CollapsibleColumns extends BasePlugin {
});
} else if (Array.isArray(collapsibleColumns)) {
+
+ this.headerStateManager.mapState(() => {
+ return { collapsible: false };
+ });
+
this.headerStateManager.mergeStateWith(collapsibleColumns);
}
} | 1 | import { BasePlugin } from '../base';
import { arrayEach, arrayFilter, arrayUnique } from '../../helpers/array';
import { rangeEach } from '../../helpers/number';
import { warn } from '../../helpers/console';
import {
addClass,
hasClass,
fastInnerText
} from '../../helpers/dom/element';
import EventManager from '../../eventManager';
import { stopImmediatePropagation } from '../../helpers/dom/event';
export const PLUGIN_KEY = 'collapsibleColumns';
export const PLUGIN_PRIORITY = 290;
const actionDictionary = new Map([
['collapse', {
hideColumn: true,
beforeHook: 'beforeColumnCollapse',
afterHook: 'afterColumnCollapse',
}],
['expand', {
hideColumn: false,
beforeHook: 'beforeColumnExpand',
afterHook: 'afterColumnExpand',
}],
]);
/**
* @plugin CollapsibleColumns
* @class CollapsibleColumns
*
* @description
* The _CollapsibleColumns_ plugin allows collapsing of columns, covered by a header with the `colspan` property defined.
*
* Clicking the "collapse/expand" button collapses (or expands) all "child" headers except the first one.
*
* Setting the {@link Options#collapsiblecolumns} property to `true` will display a "collapse/expand" button in every header
* with a defined `colspan` property.
*
* To limit this functionality to a smaller group of headers, define the `collapsibleColumns` property as an array
* of objects, as in the example below.
*
* @example
* ```js
* const container = document.getElementById('example');
* const hot = new Handsontable(container, {
* data: generateDataObj(),
* colHeaders: true,
* rowHeaders: true,
* nestedHeaders: true,
* // enable plugin
* collapsibleColumns: true,
* });
*
* // or
* const hot = new Handsontable(container, {
* data: generateDataObj(),
* colHeaders: true,
* rowHeaders: true,
* nestedHeaders: true,
* // enable and configure which columns can be collapsed
* collapsibleColumns: [
* {row: -4, col: 1, collapsible: true},
* {row: -3, col: 5, collapsible: true}
* ],
* });
* ```
*/
export class CollapsibleColumns extends BasePlugin {
static get PLUGIN_KEY() {
return PLUGIN_KEY;
}
static get PLUGIN_PRIORITY() {
return PLUGIN_PRIORITY;
}
static get PLUGIN_DEPS() {
return [
'plugin:NestedHeaders',
];
}
/**
* Cached reference to the NestedHeaders plugin.
*
* @private
* @type {NestedHeaders}
*/
nestedHeadersPlugin = null;
/**
* Event manager instance reference.
*
* @private
* @type {EventManager}
*/
eventManager = new EventManager(this);
/**
* The NestedHeaders plugin StateManager instance.
*
* @private
* @type {StateManager}
*/
headerStateManager = null;
/**
* Map of collapsed columns by the plugin.
*
* @private
* @type {HidingMap|null}
*/
#collapsedColumnsMap = null;
/**
* Checks if the plugin is enabled in the handsontable settings. This method is executed in {@link Hooks#beforeInit}
* hook and if it returns `true` than the {@link CollapsibleColumns#enablePlugin} method is called.
*
* @returns {boolean}
*/
isEnabled() {
return !!this.hot.getSettings()[PLUGIN_KEY];
}
/**
* Enables the plugin functionality for this Handsontable instance.
*/
enablePlugin() {
if (this.enabled) {
return;
}
const { nestedHeaders } = this.hot.getSettings();
if (!nestedHeaders) {
warn('You need to configure the Nested Headers plugin in order to use collapsible headers.');
}
this.#collapsedColumnsMap = this.hot.columnIndexMapper.createAndRegisterIndexMap(this.pluginName, 'hiding');
this.nestedHeadersPlugin = this.hot.getPlugin('nestedHeaders');
this.headerStateManager = this.nestedHeadersPlugin.getStateManager();
this.addHook('init', () => this.onInit());
this.addHook('afterLoadData', (...args) => this.onAfterLoadData(...args));
this.addHook('afterSetData', (...args) => this.onAfterSetData(...args));
this.addHook('afterGetColHeader', (col, TH) => this.onAfterGetColHeader(col, TH));
this.addHook('beforeOnCellMouseDown', (event, coords, TD) => this.onBeforeOnCellMouseDown(event, coords, TD));
super.enablePlugin();
// @TODO: Workaround for broken plugin initialization abstraction (#6806).
this.updatePlugin();
}
/**
* Updates the plugin state. This method is executed when {@link Core#updateSettings} is invoked.
*/
updatePlugin() {
// @TODO: Workaround for broken plugin initialization abstraction (#6806).
if (!this.hot.view) {
return;
}
if (!this.nestedHeadersPlugin.detectedOverlappedHeaders) {
const { collapsibleColumns } = this.hot.getSettings();
if (typeof collapsibleColumns === 'boolean') {
// Add `collapsible: true` attribute to all headers with colspan higher than 1.
this.headerStateManager.mapState((headerSettings) => {
return { collapsible: headerSettings.origColspan > 1 };
});
} else if (Array.isArray(collapsibleColumns)) {
this.headerStateManager.mergeStateWith(collapsibleColumns);
}
}
super.updatePlugin();
}
/**
* Disables the plugin functionality for this Handsontable instance.
*/
disablePlugin() {
this.hot.columnIndexMapper.unregisterMap(this.pluginName);
this.#collapsedColumnsMap = null;
this.nestedHeadersPlugin = null;
this.clearButtons();
super.disablePlugin();
}
/**
* Clears the expand/collapse buttons.
*
* @private
*/
clearButtons() {
if (!this.hot.view) {
return;
}
const headerLevels = this.hot.view.wt.getSetting('columnHeaders').length;
const mainHeaders = this.hot.view.wt.wtTable.THEAD;
const topHeaders = this.hot.view.wt.wtOverlays.topOverlay.clone.wtTable.THEAD;
const topLeftCornerHeaders = this.hot.view.wt.wtOverlays.topLeftCornerOverlay ?
this.hot.view.wt.wtOverlays.topLeftCornerOverlay.clone.wtTable.THEAD : null;
const removeButton = function(button) {
if (button) {
button.parentNode.removeChild(button);
}
};
rangeEach(0, headerLevels - 1, (i) => {
const masterLevel = mainHeaders.childNodes[i];
const topLevel = topHeaders.childNodes[i];
const topLeftCornerLevel = topLeftCornerHeaders ? topLeftCornerHeaders.childNodes[i] : null;
rangeEach(0, masterLevel.childNodes.length - 1, (j) => {
let button = masterLevel.childNodes[j].querySelector('.collapsibleIndicator');
removeButton(button);
if (topLevel && topLevel.childNodes[j]) {
button = topLevel.childNodes[j].querySelector('.collapsibleIndicator');
removeButton(button);
}
if (topLeftCornerHeaders && topLeftCornerLevel && topLeftCornerLevel.childNodes[j]) {
button = topLeftCornerLevel.childNodes[j].querySelector('.collapsibleIndicator');
removeButton(button);
}
});
}, true);
}
/**
* Expands section at the provided coords.
*
* @param {object} coords Contains coordinates information. (`coords.row`, `coords.col`).
*/
expandSection(coords) {
this.toggleCollapsibleSection([coords], 'expand');
}
/**
* Collapses section at the provided coords.
*
* @param {object} coords Contains coordinates information. (`coords.row`, `coords.col`).
*/
collapseSection(coords) {
this.toggleCollapsibleSection([coords], 'collapse');
}
/**
* Collapses or expand all collapsible sections, depending on the action parameter.
*
* @param {string} action 'collapse' or 'expand'.
*/
toggleAllCollapsibleSections(action) {
const coords = this.headerStateManager.mapNodes(({ collapsible, origColspan, headerLevel, columnIndex }) => {
if (collapsible === true && origColspan > 1) {
return {
row: this.headerStateManager.levelToRowCoords(headerLevel),
col: columnIndex,
};
}
});
this.toggleCollapsibleSection(coords, action);
}
/**
* Collapses all collapsible sections.
*/
collapseAll() {
this.toggleAllCollapsibleSections('collapse');
}
/**
* Expands all collapsible sections.
*/
expandAll() {
this.toggleAllCollapsibleSections('expand');
}
/**
* Collapses/Expands a section.
*
* @param {Array} coords Array of coords - section coordinates.
* @param {string} [action] Action definition ('collapse' or 'expand').
* @fires Hooks#beforeColumnCollapse
* @fires Hooks#beforeColumnExpand
* @fires Hooks#afterColumnCollapse
* @fires Hooks#afterColumnExpand
*/
toggleCollapsibleSection(coords, action) {
if (!actionDictionary.has(action)) {
throw new Error(`Unsupported action is passed (${action}).`);
}
if (!Array.isArray(coords)) {
return;
}
// Ignore coordinates which points to the cells range.
const filteredCoords = arrayFilter(coords, ({ row }) => row < 0);
let isActionPossible = filteredCoords.length > 0;
arrayEach(filteredCoords, ({ row, col: column }) => {
const { collapsible, isCollapsed } = this.headerStateManager.getHeaderSettings(row, column) ?? {};
if (!collapsible || isCollapsed && action === 'collapse' || !isCollapsed && action === 'expand') {
isActionPossible = false;
return false;
}
});
const nodeModRollbacks = [];
const affectedColumnsIndexes = [];
if (isActionPossible) {
arrayEach(filteredCoords, ({ row, col: column }) => {
const {
colspanCompensation,
affectedColumns,
rollbackModification,
} = this.headerStateManager.triggerNodeModification(action, row, column);
if (colspanCompensation > 0) {
affectedColumnsIndexes.push(...affectedColumns);
nodeModRollbacks.push(rollbackModification);
}
});
}
const currentCollapsedColumns = this.getCollapsedColumns();
let destinationCollapsedColumns = [];
if (action === 'collapse') {
destinationCollapsedColumns = arrayUnique([...currentCollapsedColumns, ...affectedColumnsIndexes]);
} else if (action === 'expand') {
destinationCollapsedColumns = arrayFilter(currentCollapsedColumns,
index => !affectedColumnsIndexes.includes(index));
}
const actionTranslator = actionDictionary.get(action);
const isActionAllowed = this.hot.runHooks(
actionTranslator.beforeHook,
currentCollapsedColumns,
destinationCollapsedColumns,
isActionPossible,
);
if (isActionAllowed === false) {
// Rollback all header nodes modification (collapse or expand).
arrayEach(nodeModRollbacks, (nodeModRollback) => {
nodeModRollback();
});
return;
}
this.hot.batchExecution(() => {
arrayEach(affectedColumnsIndexes, (visualColumn) => {
this.#collapsedColumnsMap
.setValueAtIndex(this.hot.toPhysicalColumn(visualColumn), actionTranslator.hideColumn);
});
}, true);
const isActionPerformed = this.getCollapsedColumns().length !== currentCollapsedColumns.length;
this.hot.runHooks(
actionTranslator.afterHook,
currentCollapsedColumns,
destinationCollapsedColumns,
isActionPossible,
isActionPerformed,
);
this.hot.render();
this.hot.view.adjustElementsSize(true);
}
/**
* Gets an array of physical indexes of collapsed columns.
*
* @private
* @returns {number[]}
*/
getCollapsedColumns() {
return this.#collapsedColumnsMap.getHiddenIndexes();
}
/**
* Generates the indicator element.
*
* @private
* @param {number} row Row index.
* @param {number} column Column index.
* @returns {HTMLElement}
*/
generateIndicator(row, column) {
const divEl = this.hot.rootDocument.createElement('div');
const columnSettings = this.headerStateManager.getHeaderSettings(row, column);
addClass(divEl, 'collapsibleIndicator');
if (columnSettings.isCollapsed) {
addClass(divEl, 'collapsed');
fastInnerText(divEl, '+');
} else {
addClass(divEl, 'expanded');
fastInnerText(divEl, '-');
}
return divEl;
}
/**
* Adds the indicator to the headers.
*
* @private
* @param {number} column Column index.
* @param {HTMLElement} TH TH element.
*/
onAfterGetColHeader(column, TH) {
const TR = TH.parentNode;
const THEAD = TR.parentNode;
const row = ((-1) * THEAD.childNodes.length) + Array.prototype.indexOf.call(THEAD.childNodes, TR);
const { collapsible, origColspan } = this.headerStateManager.getHeaderSettings(row, column) ?? {};
if (collapsible && origColspan > 1 && column >= this.hot.getSettings().fixedColumnsLeft) {
const button = this.generateIndicator(row, column);
TH.querySelector('div:first-child').appendChild(button);
}
}
/**
* Indicator mouse event callback.
*
* @private
* @param {object} event Mouse event.
* @param {object} coords Event coordinates.
*/
onBeforeOnCellMouseDown(event, coords) {
if (hasClass(event.target, 'collapsibleIndicator')) {
if (hasClass(event.target, 'expanded')) {
this.eventManager.fireEvent(event.target, 'mouseup');
this.toggleCollapsibleSection([coords], 'collapse');
} else if (hasClass(event.target, 'collapsed')) {
this.eventManager.fireEvent(event.target, 'mouseup');
this.toggleCollapsibleSection([coords], 'expand');
}
stopImmediatePropagation(event);
}
}
/**
* Updates the plugin state after HoT initialization.
*
* @private
*/
onInit() {
// @TODO: Workaround for broken plugin initialization abstraction (#6806).
this.updatePlugin();
}
/**
* Updates the plugin state after new dataset load.
*
* @private
* @param {Array[]} sourceData Array of arrays or array of objects containing data.
* @param {boolean} initialLoad Flag that determines whether the data has been loaded
* during the initialization.
*/
onAfterSetData(sourceData, initialLoad) {
if (!initialLoad) {
this.updatePlugin();
}
}
/**
* Alias for `onAfterSetData`.
*
* @private
* @param {Array[]} sourceData Array of arrays or array of objects containing data.
* @param {boolean} initialLoad Flag that determines whether the data has been loaded
* during the initialization.
* @param {string} source Source of the hook call.
*/
onAfterLoadData(sourceData, initialLoad, source) {
if (source !== 'updateSettings') {
this.onAfterSetData(sourceData, initialLoad, source);
}
}
/**
* Destroys the plugin instance.
*/
destroy() {
this.#collapsedColumnsMap = null;
super.destroy();
}
}
| 1 | 20,749 | I'm thinking about covering this change with the test. Can you do that? | handsontable-handsontable | js |
@@ -41,7 +41,7 @@ var (
// NATProviderPinger pings provider and optionally hands off connection to consumer proxy.
type NATProviderPinger interface {
- PingProvider(ip string, providerPort, consumerPort, proxyPort int, stop <-chan struct{}) error
+ PingProvider(ip string, cPorts, pPorts []int, proxyPort int) (*net.UDPConn, error)
}
// NATPinger is responsible for pinging nat holes | 1 | /*
* Copyright (C) 2019 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package traversal
import (
"fmt"
"net"
"sync"
"time"
"github.com/mysteriumnetwork/node/core/port"
"github.com/mysteriumnetwork/node/eventbus"
"github.com/mysteriumnetwork/node/nat/event"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"golang.org/x/net/ipv4"
)
// StageName represents hole-punching stage of NAT traversal
const StageName = "hole_punching"
var (
errNATPunchAttemptStopped = errors.New("NAT punch attempt stopped")
errNATPunchAttemptTimedOut = errors.New("NAT punch attempt timed out")
)
// NATProviderPinger pings provider and optionally hands off connection to consumer proxy.
type NATProviderPinger interface {
PingProvider(ip string, providerPort, consumerPort, proxyPort int, stop <-chan struct{}) error
}
// NATPinger is responsible for pinging nat holes
type NATPinger interface {
NATProviderPinger
PingTarget(*Params)
BindServicePort(key string, port int)
Start()
Stop()
SetProtectSocketCallback(SocketProtect func(socket int) bool)
Valid() bool
}
// PingConfig represents NAT pinger config.
type PingConfig struct {
Interval time.Duration
Timeout time.Duration
}
// DefaultPingConfig returns default NAT pinger config.
func DefaultPingConfig() *PingConfig {
return &PingConfig{
Interval: 200 * time.Millisecond,
Timeout: 10 * time.Second,
}
}
// Pinger represents NAT pinger structure
type Pinger struct {
pingConfig *PingConfig
pingTarget chan *Params
stop chan struct{}
stopNATProxy chan struct{}
once sync.Once
natProxy *NATProxy
eventPublisher eventbus.Publisher
}
// PortSupplier provides port needed to run a service on
type PortSupplier interface {
Acquire() (port.Port, error)
}
// NewPinger returns Pinger instance
func NewPinger(pingConfig *PingConfig, proxy *NATProxy, publisher eventbus.Publisher) NATPinger {
return &Pinger{
pingConfig: pingConfig,
pingTarget: make(chan *Params),
stop: make(chan struct{}),
stopNATProxy: make(chan struct{}),
natProxy: proxy,
eventPublisher: publisher,
}
}
// Params contains session parameters needed to NAT ping remote peer
type Params struct {
ProviderPort int
ConsumerPort int
ConsumerPublicIP string
ProxyPortMappingKey string
Cancel chan struct{}
}
// Start starts NAT pinger and waits for PingTarget to ping
func (p *Pinger) Start() {
log.Info().Msg("Starting a NAT pinger")
for {
select {
case <-p.stop:
log.Info().Msg("NAT pinger is stopped")
return
case pingParams := <-p.pingTarget:
if isPunchingRequired(pingParams) {
go p.pingTargetConsumer(pingParams)
}
}
}
}
func isPunchingRequired(params *Params) bool {
return params.ConsumerPort > 0
}
// Stop stops pinger loop
func (p *Pinger) Stop() {
p.once.Do(func() {
close(p.stopNATProxy)
close(p.stop)
})
}
// PingProvider pings provider determined by destination provided in sessionConfig
func (p *Pinger) PingProvider(ip string, providerPort, consumerPort, proxyPort int, stop <-chan struct{}) error {
log.Info().Msg("NAT pinging to provider")
conn, err := p.getConnection(ip, providerPort, consumerPort)
if err != nil {
return errors.Wrap(err, "failed to get connection")
}
// Add read deadline to prevent possible conn.Read hang when remote peer doesn't send ping ack.
conn.SetReadDeadline(time.Now().Add(p.pingConfig.Timeout * 2))
pingStop := make(chan struct{})
defer close(pingStop)
go func() {
err := p.ping(conn, pingStop)
if err != nil {
log.Warn().Err(err).Msg("Error while pinging")
}
}()
time.Sleep(p.pingConfig.Interval)
err = p.pingReceiver(conn, stop)
if err != nil {
return err
}
// send one last ping request to end hole punching procedure gracefully
err = p.sendPingRequest(conn, 128)
if err != nil {
return errors.Wrap(err, "remote ping failed")
}
if proxyPort > 0 {
consumerAddr := fmt.Sprintf("127.0.0.1:%d", proxyPort)
log.Info().Msg("Handing connection to consumer NATProxy: " + consumerAddr)
// Set higher read deadline when NAT proxy is used.
conn.SetReadDeadline(time.Now().Add(12 * time.Hour))
p.stopNATProxy = p.natProxy.consumerHandOff(consumerAddr, conn)
} else {
log.Info().Msg("Closing ping connection")
if err := conn.Close(); err != nil {
return errors.Wrap(err, "could not close ping conn")
}
}
return nil
}
func (p *Pinger) ping(conn *net.UDPConn, stop <-chan struct{}) error {
// Windows detects that 1 TTL is too low and throws an exception during send
ttl := 0
i := 0
for {
select {
case <-stop:
return nil
case <-time.After(p.pingConfig.Interval):
log.Debug().Msg("Pinging... ")
// This is the essence of the TTL based udp punching.
// We're slowly increasing the TTL so that the packet is held.
// After a few attempts we're setting the value to 128 and assuming we're through.
// We could stop sending ping to Consumer beyond 4 hops to prevent from possible Consumer's router's
// DOS block, but we plan, that Consumer at the same time will be Provider too in near future.
ttl++
if ttl > 4 {
ttl = 128
}
err := p.sendPingRequest(conn, ttl)
if err != nil {
p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildFailureEvent(StageName, err))
return err
}
i++
if time.Duration(i)*p.pingConfig.Interval > p.pingConfig.Timeout {
err := errors.New("timeout while waiting for ping ack, trying to continue")
p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildFailureEvent(StageName, err))
return err
}
}
}
}
func (p *Pinger) sendPingRequest(conn *net.UDPConn, ttl int) error {
err := ipv4.NewConn(conn).SetTTL(ttl)
if err != nil {
return errors.Wrap(err, "pinger setting ttl failed")
}
_, err = conn.Write([]byte("continuously pinging to " + conn.RemoteAddr().String()))
return errors.Wrap(err, "pinging request failed")
}
func (p *Pinger) getConnection(ip string, port int, pingerPort int) (*net.UDPConn, error) {
udpAddr, err := net.ResolveUDPAddr("udp4", fmt.Sprintf("%s:%d", ip, port))
if err != nil {
return nil, err
}
log.Info().Msg("Remote socket: " + udpAddr.String())
conn, err := net.DialUDP("udp", &net.UDPAddr{Port: pingerPort}, udpAddr)
if err != nil {
return nil, err
}
log.Info().Msg("Local socket: " + conn.LocalAddr().String())
return conn, nil
}
// PingTarget relays ping target address data
func (p *Pinger) PingTarget(target *Params) {
select {
case p.pingTarget <- target:
return
// do not block if ping target is not received
case <-time.After(100 * time.Millisecond):
log.Info().Msgf("Ping target timeout: %v", target)
return
}
}
// BindServicePort register service port to forward connection to
func (p *Pinger) BindServicePort(key string, port int) {
p.natProxy.registerServicePort(key, port)
}
func (p *Pinger) pingReceiver(conn *net.UDPConn, stop <-chan struct{}) error {
timeout := time.After(p.pingConfig.Timeout)
buf := make([]byte, bufferLen)
for {
select {
case <-timeout:
return errNATPunchAttemptTimedOut
case <-stop:
return errNATPunchAttemptStopped
default:
n, err := conn.Read(buf)
if err != nil {
log.Error().Err(err).Msgf("Failed to read remote peer: %s - attempting to continue", conn.RemoteAddr().String())
continue
}
if n > 0 {
log.Info().Msgf("Remote peer data received: %s, len: %d", string(buf[:n]), n)
return nil
}
}
}
}
// SetProtectSocketCallback sets socket protection callback to be called when new socket is created in consumer NATProxy
func (p *Pinger) SetProtectSocketCallback(socketProtect func(socket int) bool) {
p.natProxy.setProtectSocketCallback(socketProtect)
}
// Valid returns that this pinger is a valid pinger
func (p *Pinger) Valid() bool {
return true
}
func (p *Pinger) pingTargetConsumer(pingParams *Params) {
log.Info().Msgf("Pinging peer with: %+v", pingParams)
if pingParams.ProxyPortMappingKey == "" {
log.Error().Msg("Service proxy connection port mapping key is missing")
return
}
log.Info().Msgf("Ping target received: IP: %v, port: %v", pingParams.ConsumerPublicIP, pingParams.ConsumerPort)
if !p.natProxy.isAvailable(pingParams.ProxyPortMappingKey) {
log.Warn().Msgf("NATProxy is not available for this transport protocol key %v", pingParams.ProxyPortMappingKey)
return
}
conn, err := p.getConnection(pingParams.ConsumerPublicIP, pingParams.ConsumerPort, pingParams.ProviderPort)
if err != nil {
log.Error().Err(err).Msg("Failed to get connection")
return
}
pingStop := make(chan struct{})
defer close(pingStop)
go func() {
err := p.ping(conn, pingStop)
if err != nil {
log.Warn().Err(err).Msg("Error while pinging")
}
}()
err = p.pingReceiver(conn, pingParams.Cancel)
if err != nil {
log.Error().Err(err).Msg("Ping receiver error")
return
}
p.eventPublisher.Publish(event.AppTopicTraversal, event.BuildSuccessfulEvent(StageName))
log.Info().Msg("Ping received, waiting for a new connection")
go p.natProxy.handOff(pingParams.ProxyPortMappingKey, conn)
}
| 1 | 15,760 | Not sure if it's a good idea to remove stop channel. If user cancels connection how will you stop pinger? | mysteriumnetwork-node | go |
@@ -373,7 +373,13 @@ func (dg *dockerGoClient) pullImage(image string, authData *apicontainer.Registr
return CannotGetDockerClientError{version: dg.version, err: err}
}
- authConfig, err := dg.getAuthdata(image, authData)
+ sdkAuthConfig, err := dg.getAuthdata(image, authData)
+ authConfig := docker.AuthConfiguration{
+ Username: sdkAuthConfig.Username,
+ Password: sdkAuthConfig.Password,
+ Email: sdkAuthConfig.Email,
+ ServerAddress: sdkAuthConfig.ServerAddress,
+ }
if err != nil {
return wrapPullErrorAsNamedError(err)
} | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package dockerapi
import (
"archive/tar"
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"strings"
"sync"
"time"
apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/async"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/dockerclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/clientfactory"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerauth"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/dockeriface"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclient"
"github.com/aws/amazon-ecs-agent/agent/dockerclient/sdkclientfactory"
"github.com/aws/amazon-ecs-agent/agent/ecr"
"github.com/aws/amazon-ecs-agent/agent/emptyvolume"
"github.com/aws/amazon-ecs-agent/agent/utils"
"github.com/aws/amazon-ecs-agent/agent/utils/ttime"
"github.com/cihub/seelog"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/volume"
docker "github.com/fsouza/go-dockerclient"
)
const (
dockerDefaultTag = "latest"
// imageNameFormat is the name of a image may look like: repo:tag
imageNameFormat = "%s:%s"
// the buffer size will ensure agent doesn't miss any event from docker
dockerEventBufferSize = 100
// healthCheckStarting is the initial status returned from docker container health check
healthCheckStarting = "starting"
// healthCheckHealthy is the healthy status returned from docker container health check
healthCheckHealthy = "healthy"
// healthCheckUnhealthy is unhealthy status returned from docker container health check
healthCheckUnhealthy = "unhealthy"
// maxHealthCheckOutputLength is the maximum length of healthcheck command output that agent will save
maxHealthCheckOutputLength = 1024
// VolumeDriverType is one of the plugin capabilities see https://docs.docker.com/engine/reference/commandline/plugin_ls/#filtering
VolumeDriverType = "volumedriver"
)
// Timelimits for docker operations enforced above docker
// TODO: Make these limits configurable.
const (
pullImageTimeout = 2 * time.Hour
// CreateContainerTimeout is the timeout for the CreateContainer API.
CreateContainerTimeout = 4 * time.Minute
// StopContainerTimeout is the timeout for the StopContainer API.
StopContainerTimeout = 30 * time.Second
// RemoveContainerTimeout is the timeout for the RemoveContainer API.
RemoveContainerTimeout = 5 * time.Minute
// InspectContainerTimeout is the timeout for the InspectContainer API.
InspectContainerTimeout = 30 * time.Second
// RemoveImageTimeout is the timeout for the RemoveImage API.
RemoveImageTimeout = 3 * time.Minute
// ListPluginsTimeout is the timout for ListPlugins API.
ListPluginsTimeout = 1 * time.Minute
// CreateVolumeTimeout is the timout for CreateVolume API.
CreateVolumeTimeout = 5 * time.Minute
// InspectVolumeTimeout is the timout for InspectVolume API.
InspectVolumeTimeout = 5 * time.Minute
// RemoveVolumeTimeout is the timout for RemoveVolume API.
RemoveVolumeTimeout = 5 * time.Minute
// Parameters for caching the docker auth for ECR
tokenCacheSize = 100
// tokenCacheTTL is the default ttl of the docker auth for ECR
tokenCacheTTL = 12 * time.Hour
// dockerPullBeginTimeout is the timeout from when a 'pull' is called to when
// we expect to see output on the pull progress stream. This is to work
// around a docker bug which sometimes results in pulls not progressing.
dockerPullBeginTimeout = 5 * time.Minute
// dockerPullInactivityTimeout is the amount of time that we will
// wait when the pulling does not progress
dockerPullInactivityTimeout = 1 * time.Minute
// pullStatusSuppressDelay controls the time where pull status progress bar
// output will be suppressed in debug mode
pullStatusSuppressDelay = 2 * time.Second
// StatsInactivityTimeout controls the amount of time we hold open a
// connection to the Docker daemon waiting for stats data
StatsInactivityTimeout = 5 * time.Second
// retry settings for pulling images
maximumPullRetries = 10
minimumPullRetryDelay = 250 * time.Millisecond
maximumPullRetryDelay = 1 * time.Second
pullRetryDelayMultiplier = 1.5
pullRetryJitterMultiplier = 0.2
)
// DockerClient interface to make testing it easier
type DockerClient interface {
// SupportedVersions returns a slice of the supported docker versions (or at least supposedly supported).
SupportedVersions() []dockerclient.DockerVersion
// KnownVersions returns a slice of the Docker API versions known to the Docker daemon.
KnownVersions() []dockerclient.DockerVersion
// WithVersion returns a new DockerClient for which all operations will use the given remote api version.
// A default version will be used for a client not produced via this method.
WithVersion(dockerclient.DockerVersion) DockerClient
// ContainerEvents returns a channel of DockerContainerChangeEvents. Events are placed into the channel and should
// be processed by the listener.
ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error)
// PullImage pulls an image. authData should contain authentication data provided by the ECS backend.
PullImage(image string, authData *apicontainer.RegistryAuthenticationData) DockerContainerMetadata
// ImportLocalEmptyVolumeImage imports a locally-generated empty-volume image for supported platforms.
ImportLocalEmptyVolumeImage() DockerContainerMetadata
// CreateContainer creates a container with the provided docker.Config, docker.HostConfig, and name. A timeout value
// and a context should be provided for the request.
CreateContainer(context.Context, *docker.Config, *docker.HostConfig, string, time.Duration) DockerContainerMetadata
// StartContainer starts the container identified by the name provided. A timeout value and a context should be
// provided for the request.
StartContainer(context.Context, string, time.Duration) DockerContainerMetadata
// StopContainer stops the container identified by the name provided. A timeout value and a context should be provided
// for the request.
StopContainer(context.Context, string, time.Duration) DockerContainerMetadata
// DescribeContainer returns status information about the specified container. A context should be provided
// for the request
DescribeContainer(context.Context, string) (apicontainerstatus.ContainerStatus, DockerContainerMetadata)
// RemoveContainer removes a container (typically the rootfs, logs, and associated metadata) identified by the name.
// A timeout value and a context should be provided for the request.
RemoveContainer(context.Context, string, time.Duration) error
// InspectContainer returns information about the specified container. A timeout value and a context should be
// provided for the request.
InspectContainer(context.Context, string, time.Duration) (*docker.Container, error)
// ListContainers returns the set of containers known to the Docker daemon. A timeout value and a context
// should be provided for the request.
ListContainers(context.Context, bool, time.Duration) ListContainersResponse
// CreateVolume creates a docker volume. A timeout value should be provided for the request
CreateVolume(context.Context, string, string, map[string]string, map[string]string, time.Duration) SDKVolumeResponse
// InspectVolume returns a volume by its name. A timeout value should be provided for the request
InspectVolume(context.Context, string, time.Duration) SDKVolumeResponse
// RemoveVolume removes a volume by its name. A timeout value should be provided for the request
RemoveVolume(context.Context, string, time.Duration) error
// ListPluginsWithFilters returns the set of docker plugins installed on the host, filtered by options provided.
// A timeout value should be provided for the request.
ListPluginsWithFilters(context.Context, bool, []string, time.Duration) ([]string, error)
// ListPlugins returns the set of docker plugins installed on the host. A timeout value should be provided for
// the request.
ListPlugins(context.Context, time.Duration) ListPluginsResponse
// Stats returns a channel of stat data for the specified container. A context should be provided so the request can
// be canceled.
Stats(string, context.Context) (<-chan *docker.Stats, error)
// Version returns the version of the Docker daemon.
Version(context.Context, time.Duration) (string, error)
// APIVersion returns the api version of the client
APIVersion() (dockerclient.DockerVersion, error)
// InspectImage returns information about the specified image.
InspectImage(string) (*docker.Image, error)
// RemoveImage removes the metadata associated with an image and may remove the underlying layer data. A timeout
// value and a context should be provided for the request.
RemoveImage(context.Context, string, time.Duration) error
// LoadImage loads an image from an input stream. A timeout value and a context should be provided for the request.
LoadImage(context.Context, io.Reader, time.Duration) error
}
// DockerGoClient wraps the underlying go-dockerclient and docker/docker library.
// It exists primarily for the following four purposes:
// 1) Provide an abstraction over inputs and outputs,
// a) Inputs: Trims them down to what we actually need (largely unchanged tbh)
// b) Outputs: Unifies error handling and the common 'start->inspect'
// pattern by having a consistent error output. This error output
// contains error data with a given Name that aims to be presentable as a
// 'reason' in state changes. It also filters out the information about a
// container that is of interest, such as network bindings, while
// ignoring the rest.
// 2) Timeouts: It adds timeouts everywhere, mostly as a reaction to
// pull-related issues in the Docker daemon.
// 3) Versioning: It abstracts over multiple client versions to allow juggling
// appropriately there.
// 4) Allows for both the go-dockerclient client and Docker SDK client to live
// side-by-side until migration to the Docker SDK is complete.
// Implements DockerClient
// TODO Remove clientfactory field once all API calls are migrated to sdkclientFactory
type dockerGoClient struct {
clientFactory clientfactory.Factory
sdkClientFactory sdkclientfactory.Factory
version dockerclient.DockerVersion
ecrClientFactory ecr.ECRFactory
auth dockerauth.DockerAuthProvider
ecrTokenCache async.Cache
config *config.Config
_time ttime.Time
_timeOnce sync.Once
daemonVersionUnsafe string
lock sync.Mutex
}
func (dg *dockerGoClient) WithVersion(version dockerclient.DockerVersion) DockerClient {
return &dockerGoClient{
clientFactory: dg.clientFactory,
sdkClientFactory: dg.sdkClientFactory,
version: version,
auth: dg.auth,
config: dg.config,
}
}
// scratchCreateLock guards against multiple 'scratch' image creations at once
var scratchCreateLock sync.Mutex
// NewDockerGoClient creates a new DockerGoClient
// TODO Remove clientfactory parameter once migration to Docker SDK is complete.
func NewDockerGoClient(clientFactory clientfactory.Factory, sdkclientFactory sdkclientfactory.Factory,
cfg *config.Config, ctx context.Context) (DockerClient, error) {
// Ensure both clients can connect to the Docker daemon.
client, err := clientFactory.GetDefaultClient()
if err != nil {
seelog.Errorf("DockerGoClient: go-dockerclient unable to connect to Docker daemon. " +
"Ensure Docker is running: %v", err)
return nil, err
}
sdkclient, err := sdkclientFactory.GetDefaultClient()
if err != nil {
seelog.Errorf("DockerGoClient: Docker SDK client unable to connect to Docker daemon. " +
"Ensure Docker is running: %v", err)
return nil, err
}
// Even if we have a DockerClient, the daemon might not be running. Ping from both clients
// to ensure it's up.
err = client.Ping()
if err != nil {
seelog.Errorf("DockerGoClient: go-dockerclient unable to ping Docker daemon. " +
"Ensure Docker is running: %v", err)
return nil, err
}
_, err = sdkclient.Ping(ctx)
if err != nil {
seelog.Errorf("DockerGoClient: Docker SDK client unable to ping Docker daemon. " +
"Ensure Docker is running: %v", err)
return nil, err
}
var dockerAuthData json.RawMessage
if cfg.EngineAuthData != nil {
dockerAuthData = cfg.EngineAuthData.Contents()
}
return &dockerGoClient{
clientFactory: clientFactory,
sdkClientFactory: sdkclientFactory,
auth: dockerauth.NewDockerAuthProvider(cfg.EngineAuthType, dockerAuthData),
ecrClientFactory: ecr.NewECRFactory(cfg.AcceptInsecureCert),
ecrTokenCache: async.NewLRUCache(tokenCacheSize, tokenCacheTTL),
config: cfg,
}, nil
}
// Returns the Docker SDK Client
func (dg *dockerGoClient) sdkDockerClient() (sdkclient.Client, error){
if dg.version == "" {
return dg.sdkClientFactory.GetDefaultClient()
}
return dg.sdkClientFactory.GetClient(dg.version)
}
// Returns the go-dockerclient Client
// TODO Remove method once migration is complete.
func (dg *dockerGoClient) dockerClient() (dockeriface.Client, error) {
if dg.version == "" {
return dg.clientFactory.GetDefaultClient()
}
return dg.clientFactory.GetClient(dg.version)
}
func (dg *dockerGoClient) time() ttime.Time {
dg._timeOnce.Do(func() {
if dg._time == nil {
dg._time = &ttime.DefaultTime{}
}
})
return dg._time
}
func (dg *dockerGoClient) PullImage(image string, authData *apicontainer.RegistryAuthenticationData) DockerContainerMetadata {
// TODO Switch to just using context.WithDeadline and get rid of this funky code
timeout := dg.time().After(pullImageTimeout)
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
response := make(chan DockerContainerMetadata, 1)
go func() {
imagePullBackoff := utils.NewSimpleBackoff(minimumPullRetryDelay,
maximumPullRetryDelay, pullRetryJitterMultiplier, pullRetryDelayMultiplier)
err := utils.RetryNWithBackoffCtx(ctx, imagePullBackoff, maximumPullRetries,
func() error {
err := dg.pullImage(image, authData)
if err != nil {
seelog.Warnf("DockerGoClient: failed to pull image %s: %s", image, err.Error())
}
return err
})
response <- DockerContainerMetadata{Error: wrapPullErrorAsNamedError(err)}
}()
select {
case resp := <-response:
return resp
case <-timeout:
cancel()
return DockerContainerMetadata{Error: &DockerTimeoutError{pullImageTimeout, "pulled"}}
}
}
func wrapPullErrorAsNamedError(err error) apierrors.NamedError {
var retErr apierrors.NamedError
if err != nil {
engErr, ok := err.(apierrors.NamedError)
if !ok {
engErr = CannotPullContainerError{err}
}
retErr = engErr
}
return retErr
}
func (dg *dockerGoClient) pullImage(image string, authData *apicontainer.RegistryAuthenticationData) apierrors.NamedError {
seelog.Debugf("DockerGoClient: pulling image: %s", image)
client, err := dg.dockerClient()
if err != nil {
return CannotGetDockerClientError{version: dg.version, err: err}
}
authConfig, err := dg.getAuthdata(image, authData)
if err != nil {
return wrapPullErrorAsNamedError(err)
}
pullDebugOut, pullWriter := io.Pipe()
defer pullWriter.Close()
repository := getRepository(image)
opts := docker.PullImageOptions{
Repository: repository,
OutputStream: pullWriter,
InactivityTimeout: dockerPullInactivityTimeout,
}
timeout := dg.time().After(dockerPullBeginTimeout)
// pullBegan is a channel indicating that we have seen at least one line of data on the 'OutputStream' above.
// It is here to guard against a bug wherin docker never writes anything to that channel and hangs in pulling forever.
pullBegan := make(chan bool, 1)
go dg.filterPullDebugOutput(pullDebugOut, pullBegan, image)
pullFinished := make(chan error, 1)
go func() {
pullFinished <- client.PullImage(opts, authConfig)
seelog.Debugf("DockerGoClient: pulling image complete: %s", image)
}()
select {
case <-pullBegan:
break
case pullErr := <-pullFinished:
if pullErr != nil {
return CannotPullContainerError{pullErr}
}
return nil
case <-timeout:
return &DockerTimeoutError{dockerPullBeginTimeout, "pullBegin"}
}
seelog.Debugf("DockerGoClient: pull began for image: %s", image)
defer seelog.Debugf("DockerGoClient: pull completed for image: %s", image)
err = <-pullFinished
if err != nil {
return CannotPullContainerError{err}
}
return nil
}
func (dg *dockerGoClient) filterPullDebugOutput(pullDebugOut *io.PipeReader, pullBegan chan<- bool, image string) {
// pullBeganOnce ensures we only indicate it began once (since our channel will only be read 0 or 1 times)
pullBeganOnce := sync.Once{}
reader := bufio.NewReader(pullDebugOut)
var line string
var pullErr error
var statusDisplayed time.Time
for {
line, pullErr = reader.ReadString('\n')
if pullErr != nil {
break
}
pullBeganOnce.Do(func() {
pullBegan <- true
})
now := time.Now()
if !strings.Contains(line, "[=") || now.After(statusDisplayed.Add(pullStatusSuppressDelay)) {
// skip most of the progress bar lines, but retain enough for debugging
seelog.Debugf("DockerGoClient: pulling image %s, status %s", image, line)
statusDisplayed = now
}
if strings.Contains(line, "already being pulled by another client. Waiting.") {
// This can mean the daemon is 'hung' in pulling status for this image, but we can't be sure.
seelog.Errorf("DockerGoClient: image 'pull' status marked as already being pulled for image %s, status %s",
image, line)
}
}
if pullErr != nil && pullErr != io.EOF {
seelog.Warnf("DockerGoClient: error reading pull image status for image %s: %v", image, pullErr)
}
}
func getRepository(image string) string {
repository, tag := parseRepositoryTag(image)
if tag == "" {
repository = repository + ":" + dockerDefaultTag
} else {
repository = image
}
return repository
}
// ImportLocalEmptyVolumeImage imports a locally-generated empty-volume image for supported platforms.
func (dg *dockerGoClient) ImportLocalEmptyVolumeImage() DockerContainerMetadata {
timeout := dg.time().After(pullImageTimeout)
response := make(chan DockerContainerMetadata, 1)
go func() {
err := dg.createScratchImageIfNotExists()
var wrapped apierrors.NamedError
if err != nil {
wrapped = CreateEmptyVolumeError{err}
}
response <- DockerContainerMetadata{Error: wrapped}
}()
select {
case resp := <-response:
return resp
case <-timeout:
return DockerContainerMetadata{Error: &DockerTimeoutError{pullImageTimeout, "pulled"}}
}
}
func (dg *dockerGoClient) createScratchImageIfNotExists() error {
client, err := dg.dockerClient()
if err != nil {
return err
}
scratchCreateLock.Lock()
defer scratchCreateLock.Unlock()
_, err = client.InspectImage(emptyvolume.Image + ":" + emptyvolume.Tag)
if err == nil {
seelog.Debug("DockerGoClient: empty volume image is already present, skipping import")
// Already exists; assume that it's okay to use it
return nil
}
reader, writer := io.Pipe()
emptytarball := tar.NewWriter(writer)
go func() {
emptytarball.Close()
writer.Close()
}()
seelog.Debug("DockerGoClient: importing empty volume image")
// Create it from an empty tarball
err = client.ImportImage(docker.ImportImageOptions{
Repository: emptyvolume.Image,
Tag: emptyvolume.Tag,
Source: "-",
InputStream: reader,
})
return err
}
func (dg *dockerGoClient) InspectImage(image string) (*docker.Image, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
return client.InspectImage(image)
}
func (dg *dockerGoClient) getAuthdata(image string, authData *apicontainer.RegistryAuthenticationData) (docker.AuthConfiguration, error) {
if authData == nil {
return dg.auth.GetAuthconfig(image, nil)
}
switch authData.Type {
case apicontainer.AuthTypeECR:
provider := dockerauth.NewECRAuthProvider(dg.ecrClientFactory, dg.ecrTokenCache)
authConfig, err := provider.GetAuthconfig(image, authData)
if err != nil {
return authConfig, CannotPullECRContainerError{err}
}
return authConfig, nil
case apicontainer.AuthTypeASM:
return authData.ASMAuthData.GetDockerAuthConfig(), nil
default:
return dg.auth.GetAuthconfig(image, nil)
}
}
func (dg *dockerGoClient) CreateContainer(ctx context.Context,
config *docker.Config,
hostConfig *docker.HostConfig,
name string,
timeout time.Duration) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.createContainer(ctx, config, hostConfig, name) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "created"}}
}
// Context was canceled even though there was no timeout. Send
// back an error.
return DockerContainerMetadata{Error: &CannotCreateContainerError{err}}
}
}
func (dg *dockerGoClient) createContainer(ctx context.Context,
config *docker.Config,
hostConfig *docker.HostConfig,
name string) DockerContainerMetadata {
client, err := dg.dockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
containerOptions := docker.CreateContainerOptions{
Config: config,
HostConfig: hostConfig,
Name: name,
Context: ctx,
}
dockerContainer, err := client.CreateContainer(containerOptions)
if err != nil {
return DockerContainerMetadata{Error: CannotCreateContainerError{err}}
}
return dg.containerMetadata(ctx, dockerContainer.ID)
}
func (dg *dockerGoClient) StartContainer(ctx context.Context, id string, timeout time.Duration) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.startContainer(ctx, id) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "started"}}
}
return DockerContainerMetadata{Error: CannotStartContainerError{err}}
}
}
func (dg *dockerGoClient) startContainer(ctx context.Context, id string) DockerContainerMetadata {
client, err := dg.sdkDockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
err = client.ContainerStart(ctx, id, types.ContainerStartOptions{} )
metadata := dg.containerMetadata(ctx, id)
if err != nil {
metadata.Error = CannotStartContainerError{err}
}
return metadata
}
// DockerStateToState converts the container status from docker to status recognized by the agent
// Ref: https://github.com/fsouza/go-dockerclient/blob/fd53184a1439b6d7b82ca54c1cd9adac9a5278f2/container.go#L133
func DockerStateToState(state docker.State) apicontainerstatus.ContainerStatus {
if state.Running {
return apicontainerstatus.ContainerRunning
}
if state.Dead {
return apicontainerstatus.ContainerStopped
}
if state.StartedAt.IsZero() && state.Error == "" {
return apicontainerstatus.ContainerCreated
}
return apicontainerstatus.ContainerStopped
}
func (dg *dockerGoClient) DescribeContainer(ctx context.Context, dockerID string) (apicontainerstatus.ContainerStatus, DockerContainerMetadata) {
dockerContainer, err := dg.InspectContainer(ctx, dockerID, InspectContainerTimeout)
if err != nil {
return apicontainerstatus.ContainerStatusNone, DockerContainerMetadata{Error: CannotDescribeContainerError{err}}
}
return DockerStateToState(dockerContainer.State), MetadataFromContainer(dockerContainer)
}
func (dg *dockerGoClient) InspectContainer(ctx context.Context, dockerID string, timeout time.Duration) (*docker.Container, error) {
type inspectResponse struct {
container *docker.Container
err error
}
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan inspectResponse, 1)
go func() {
container, err := dg.inspectContainer(ctx, dockerID)
response <- inspectResponse{container, err}
}()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp.container, resp.err
case <-ctx.Done():
err := ctx.Err()
if err == context.DeadlineExceeded {
return nil, &DockerTimeoutError{timeout, "inspecting"}
}
return nil, &CannotInspectContainerError{err}
}
}
func (dg *dockerGoClient) inspectContainer(ctx context.Context, dockerID string) (*docker.Container, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
return client.InspectContainerWithContext(dockerID, ctx)
}
func (dg *dockerGoClient) StopContainer(ctx context.Context, dockerID string, timeout time.Duration) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan DockerContainerMetadata, 1)
go func() { response <- dg.stopContainer(ctx, dockerID) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return DockerContainerMetadata{Error: &DockerTimeoutError{timeout, "stopped"}}
}
return DockerContainerMetadata{Error: CannotStopContainerError{err}}
}
}
func (dg *dockerGoClient) stopContainer(ctx context.Context, dockerID string) DockerContainerMetadata {
client, err := dg.sdkDockerClient()
if err != nil {
return DockerContainerMetadata{Error: CannotGetDockerClientError{version: dg.version, err: err}}
}
err = client.ContainerStop(ctx, dockerID, &dg.config.DockerStopTimeout)
metadata := dg.containerMetadata(ctx, dockerID)
if err != nil {
seelog.Infof("DockerGoClient: error stopping container %s: %v", dockerID, err)
if metadata.Error == nil {
metadata.Error = CannotStopContainerError{err}
}
}
return metadata
}
func (dg *dockerGoClient) RemoveContainer(ctx context.Context, dockerID string, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan error, 1)
go func() { response <- dg.removeContainer(ctx, dockerID) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
err := ctx.Err()
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
if err == context.DeadlineExceeded {
return &DockerTimeoutError{dockerclient.RemoveContainerTimeout, "removing"}
}
return &CannotRemoveContainerError{err}
}
}
func (dg *dockerGoClient) removeContainer(ctx context.Context, dockerID string) error {
client, err := dg.sdkDockerClient()
if err != nil {
return err
}
return client.ContainerRemove(ctx, dockerID,
types.ContainerRemoveOptions{
RemoveVolumes: true,
RemoveLinks: false,
Force: false,
})
}
func (dg *dockerGoClient) containerMetadata(ctx context.Context, id string) DockerContainerMetadata {
ctx, cancel := context.WithTimeout(ctx, dockerclient.InspectContainerTimeout)
defer cancel()
dockerContainer, err := dg.InspectContainer(ctx, id, dockerclient.InspectContainerTimeout)
if err != nil {
return DockerContainerMetadata{DockerID: id, Error: CannotInspectContainerError{err}}
}
return MetadataFromContainer(dockerContainer)
}
// MetadataFromContainer translates dockerContainer into DockerContainerMetadata
func MetadataFromContainer(dockerContainer *docker.Container) DockerContainerMetadata {
var bindings []apicontainer.PortBinding
var err apierrors.NamedError
if dockerContainer.NetworkSettings != nil {
// Convert port bindings into the format our container expects
bindings, err = apicontainer.PortBindingFromDockerPortBinding(dockerContainer.NetworkSettings.Ports)
if err != nil {
seelog.Criticalf("DockerGoClient: Docker had network bindings we couldn't understand: %v", err)
return DockerContainerMetadata{Error: apierrors.NamedError(err)}
}
}
metadata := DockerContainerMetadata{
DockerID: dockerContainer.ID,
PortBindings: bindings,
Volumes: dockerContainer.Volumes,
CreatedAt: dockerContainer.Created,
StartedAt: dockerContainer.State.StartedAt,
FinishedAt: dockerContainer.State.FinishedAt,
}
if dockerContainer.Config != nil {
metadata.Labels = dockerContainer.Config.Labels
}
metadata = getMetadataVolumes(metadata, dockerContainer)
if !dockerContainer.State.Running && !dockerContainer.State.FinishedAt.IsZero() {
// Only record an exitcode if it has exited
metadata.ExitCode = &dockerContainer.State.ExitCode
}
if dockerContainer.State.Error != "" {
metadata.Error = NewDockerStateError(dockerContainer.State.Error)
}
if dockerContainer.State.OOMKilled {
metadata.Error = OutOfMemoryError{}
}
if dockerContainer.State.Health.Status == "" || dockerContainer.State.Health.Status == healthCheckStarting {
return metadata
}
// Record the health check information if exists
metadata.Health = getMetadataHealthCheck(dockerContainer)
return metadata
}
func getMetadataVolumes(metadata DockerContainerMetadata, dockerContainer *docker.Container) DockerContainerMetadata {
// Workaround for https://github.com/docker/docker/issues/27601
// See https://github.com/docker/docker/blob/v1.12.2/daemon/inspect_unix.go#L38-L43
// for how Docker handles API compatibility on Linux
if len(metadata.Volumes) == 0 {
metadata.Volumes = make(map[string]string)
for _, m := range dockerContainer.Mounts {
metadata.Volumes[m.Destination] = m.Source
}
}
return metadata
}
func getMetadataHealthCheck(dockerContainer *docker.Container) apicontainer.HealthStatus {
health := apicontainer.HealthStatus{}
logLength := len(dockerContainer.State.Health.Log)
if logLength != 0 {
// Only save the last log from the health check
output := dockerContainer.State.Health.Log[logLength-1].Output
size := len(output)
if size > maxHealthCheckOutputLength {
size = maxHealthCheckOutputLength
}
health.Output = output[:size]
}
switch dockerContainer.State.Health.Status {
case healthCheckHealthy:
health.Status = apicontainerstatus.ContainerHealthy
case healthCheckUnhealthy:
health.Status = apicontainerstatus.ContainerUnhealthy
if logLength == 0 {
seelog.Warn("DockerGoClient: no container healthcheck data returned by Docker")
break
}
health.ExitCode = dockerContainer.State.Health.Log[logLength-1].ExitCode
default:
seelog.Debugf("DockerGoClient: unknown healthcheck status event from docker: %s", dockerContainer.State.Health.Status)
}
return health
}
// Listen to the docker event stream for container changes and pass them up
func (dg *dockerGoClient) ContainerEvents(ctx context.Context) (<-chan DockerContainerChangeEvent, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
dockerEvents := make(chan *docker.APIEvents, dockerEventBufferSize)
events := make(chan *docker.APIEvents)
buffer := NewInfiniteBuffer()
err = client.AddEventListener(dockerEvents)
if err != nil {
seelog.Errorf("DockerGoClient: unable to add a docker event listener: %v", err)
return nil, err
}
go func() {
<-ctx.Done()
client.RemoveEventListener(dockerEvents)
}()
// Cache the event from go docker client
go buffer.StartListening(dockerEvents)
// Read the buffered events and send to task engine
go buffer.Consume(events)
changedContainers := make(chan DockerContainerChangeEvent)
go dg.handleContainerEvents(ctx, events, changedContainers)
return changedContainers, nil
}
func (dg *dockerGoClient) handleContainerEvents(ctx context.Context,
events <-chan *docker.APIEvents,
changedContainers chan<- DockerContainerChangeEvent) {
for event := range events {
containerID := event.ID
seelog.Debugf("DockerGoClient: got event from docker daemon: %v", event)
var status apicontainerstatus.ContainerStatus
eventType := apicontainer.ContainerStatusEvent
switch event.Status {
case "create":
status = apicontainerstatus.ContainerCreated
// TODO no need to inspect containers here.
// There's no need to inspect containers after they are created when we
// adopt Docker's volume APIs. Today, that's the only information we need
// from the `inspect` API. Once we start injecting that ourselves,
// there's no need to `inspect` containers on `Create` anymore. This will
// save us a lot of `inspect` calls in the future.
case "start":
status = apicontainerstatus.ContainerRunning
case "stop":
fallthrough
case "die":
status = apicontainerstatus.ContainerStopped
case "oom":
containerInfo := event.ID
// events only contain the container's name in newer Docker API
// versions (starting with 1.22)
if containerName, ok := event.Actor.Attributes["name"]; ok {
containerInfo += fmt.Sprintf(" (name: %q)", containerName)
}
seelog.Infof("DockerGoClient: process within container %s died due to OOM", containerInfo)
// "oom" can either means any process got OOM'd, but doesn't always
// mean the container dies (non-init processes). If the container also
// dies, you see a "die" status as well; we'll update suitably there
continue
case "health_status: healthy":
fallthrough
case "health_status: unhealthy":
eventType = apicontainer.ContainerHealthEvent
default:
// Because docker emits new events even when you use an old event api
// version, it's not that big a deal
seelog.Debugf("DockerGoClient: unknown status event from docker: %v", event)
}
metadata := dg.containerMetadata(ctx, containerID)
changedContainers <- DockerContainerChangeEvent{
Status: status,
Type: eventType,
DockerContainerMetadata: metadata,
}
}
}
// ListContainers returns a slice of container IDs.
func (dg *dockerGoClient) ListContainers(ctx context.Context, all bool, timeout time.Duration) ListContainersResponse {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan ListContainersResponse, 1)
go func() { response <- dg.listContainers(ctx, all) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return ListContainersResponse{Error: &DockerTimeoutError{timeout, "listing"}}
}
return ListContainersResponse{Error: &CannotListContainersError{err}}
}
}
func (dg *dockerGoClient) listContainers(ctx context.Context, all bool) ListContainersResponse {
client, err := dg.sdkDockerClient()
if err != nil {
return ListContainersResponse{Error: err}
}
containers, err := client.ContainerList(ctx, types.ContainerListOptions{
All: all,
})
if err != nil {
return ListContainersResponse{Error: err}
}
// We get an empty slice if there are no containers to be listed.
// Extract container IDs from this list.
containerIDs := make([]string, len(containers))
for i, container := range containers {
containerIDs[i] = container.ID
}
return ListContainersResponse{DockerIDs: containerIDs, Error: nil}
}
func (dg *dockerGoClient) SupportedVersions() []dockerclient.DockerVersion {
return dg.sdkClientFactory.FindSupportedAPIVersions()
}
func (dg *dockerGoClient) KnownVersions() []dockerclient.DockerVersion {
return dg.sdkClientFactory.FindKnownAPIVersions()
}
func (dg *dockerGoClient) Version(ctx context.Context, timeout time.Duration) (string, error) {
version := dg.getDaemonVersion()
if version != "" {
return version, nil
}
derivedCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
client, err := dg.sdkDockerClient()
if err != nil {
return "", err
}
info, err := client.ServerVersion(derivedCtx)
if err != nil {
return "", err
}
version = info.Version
dg.setDaemonVersion(version)
return version, nil
}
func (dg *dockerGoClient) getDaemonVersion() string {
dg.lock.Lock()
defer dg.lock.Unlock()
return dg.daemonVersionUnsafe
}
func (dg *dockerGoClient) setDaemonVersion(version string) {
dg.lock.Lock()
defer dg.lock.Unlock()
dg.daemonVersionUnsafe = version
}
func (dg *dockerGoClient) CreateVolume(ctx context.Context, name string,
driver string,
driverOptions map[string]string,
labels map[string]string,
timeout time.Duration) SDKVolumeResponse {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan SDKVolumeResponse, 1)
go func() { response <- dg.createVolume(ctx, name, driver, driverOptions, labels) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return SDKVolumeResponse{DockerVolume: nil, Error: &DockerTimeoutError{timeout, "creating volume"}}
}
// Context was canceled even though there was no timeout. Send
// back an error.
return SDKVolumeResponse{DockerVolume: nil, Error: &CannotCreateVolumeError{err}}
}
}
func (dg *dockerGoClient) createVolume(ctx context.Context,
name string,
driver string,
driverOptions map[string]string,
labels map[string]string) SDKVolumeResponse {
client, err := dg.sdkDockerClient()
if err != nil {
return SDKVolumeResponse{DockerVolume: nil, Error: &CannotGetDockerClientError{version: dg.version, err: err}}
}
volumeOptions := volume.VolumesCreateBody{
Driver: driver,
DriverOpts: driverOptions,
Labels: labels,
Name: name,
}
dockerVolume, err := client.VolumeCreate(ctx, volumeOptions)
if err != nil {
return SDKVolumeResponse{DockerVolume: nil, Error: &CannotCreateVolumeError{err}}
}
return SDKVolumeResponse{DockerVolume: &dockerVolume, Error: nil}
}
func (dg *dockerGoClient) InspectVolume(ctx context.Context, name string, timeout time.Duration) SDKVolumeResponse {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan SDKVolumeResponse, 1)
go func() { response <- dg.inspectVolume(ctx, name) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return SDKVolumeResponse{DockerVolume: nil, Error: &DockerTimeoutError{timeout, "inspecting volume"}}
}
// Context was canceled even though there was no timeout. Send
// back an error.
return SDKVolumeResponse{DockerVolume: nil, Error: &CannotInspectVolumeError{err}}
}
}
func (dg *dockerGoClient) inspectVolume(ctx context.Context, name string) SDKVolumeResponse {
client, err := dg.sdkDockerClient()
if err != nil {
return SDKVolumeResponse{
DockerVolume: nil,
Error: &CannotGetDockerClientError{version: dg.version, err: err}}
}
dockerVolume, err := client.VolumeInspect(ctx, name)
if err != nil {
return SDKVolumeResponse{DockerVolume: nil, Error: &CannotInspectVolumeError{err}}
}
return SDKVolumeResponse{DockerVolume: &dockerVolume, Error: nil}
}
func (dg *dockerGoClient) RemoveVolume(ctx context.Context, name string, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan error, 1)
go func() { response <- dg.removeVolume(ctx, name) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return &DockerTimeoutError{timeout, "removing volume"}
}
// Context was canceled even though there was no timeout. Send
// back an error.
return &CannotRemoveVolumeError{err}
}
}
func (dg *dockerGoClient) removeVolume(ctx context.Context, name string) error {
client, err := dg.sdkDockerClient()
if err != nil {
return &CannotGetDockerClientError{version: dg.version, err: err}
}
ok := client.VolumeRemove(ctx, name, false)
if ok != nil {
return &CannotRemoveVolumeError{err}
}
return nil
}
// ListPluginsWithFilters currently is a convenience method as go-dockerclient doesn't implement fitered list. When we or someone else submits
// PR for the fix we will refactor this to pass in the fiters. See https://docs.docker.com/engine/reference/commandline/plugin_ls/#filtering.
func (dg *dockerGoClient) ListPluginsWithFilters(ctx context.Context, enabled bool, capabilities []string, timeout time.Duration) ([]string, error) {
var filteredPluginNames []string
response := dg.ListPlugins(ctx, timeout)
if response.Error != nil {
return nil, response.Error
}
for _, pluginDetail := range response.Plugins {
if pluginDetail.Active != enabled {
continue
}
// One plugin might have multiple capabilities, see https://docs.docker.com/engine/reference/commandline/plugin_ls/#filtering
for _, pluginType := range pluginDetail.Config.Interface.Types {
for _, capability := range capabilities {
// capability looks like volumedriver, pluginType looks like docker.volumedriver/1.0 (prefix.capability/version)
if strings.Contains(pluginType, capability) {
filteredPluginNames = append(filteredPluginNames, pluginDetail.Name)
break
}
}
}
}
return filteredPluginNames, nil
}
func (dg *dockerGoClient) ListPlugins(ctx context.Context, timeout time.Duration) ListPluginsResponse {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
// Buffered channel so in the case of timeout it takes one write, never gets
// read, and can still be GC'd
response := make(chan ListPluginsResponse, 1)
go func() { response <- dg.listPlugins(ctx) }()
// Wait until we get a response or for the 'done' context channel
select {
case resp := <-response:
return resp
case <-ctx.Done():
// Context has either expired or canceled. If it has timed out,
// send back the DockerTimeoutError
err := ctx.Err()
if err == context.DeadlineExceeded {
return ListPluginsResponse{Plugins: nil, Error: &DockerTimeoutError{timeout, "listing plugins"}}
}
// Context was canceled even though there was no timeout. Send
// back an error.
return ListPluginsResponse{Plugins: nil, Error: &CannotListPluginsError{err}}
}
}
func (dg *dockerGoClient) listPlugins(ctx context.Context) ListPluginsResponse {
client, err := dg.dockerClient()
if err != nil {
return ListPluginsResponse{Plugins: nil, Error: &CannotGetDockerClientError{version: dg.version, err: err}}
}
plugins, err := client.ListPlugins(ctx)
if err != nil {
return ListPluginsResponse{Plugins: nil, Error: &CannotListPluginsError{err}}
}
return ListPluginsResponse{Plugins: plugins, Error: nil}
}
// APIVersion returns the client api version
func (dg *dockerGoClient) APIVersion() (dockerclient.DockerVersion, error) {
client, err := dg.sdkDockerClient()
if err != nil {
return "", err
}
return dg.sdkClientFactory.FindClientAPIVersion(client), nil
}
// Stats returns a channel of *docker.Stats entries for the container.
func (dg *dockerGoClient) Stats(id string, ctx context.Context) (<-chan *docker.Stats, error) {
client, err := dg.dockerClient()
if err != nil {
return nil, err
}
stats := make(chan *docker.Stats)
options := docker.StatsOptions{
ID: id,
Stats: stats,
Stream: true,
Context: ctx,
InactivityTimeout: StatsInactivityTimeout,
}
go func() {
statsErr := client.Stats(options)
if statsErr != nil {
seelog.Infof("DockerGoClient: Unable to retrieve stats for container %s: %v",
id, statsErr)
}
}()
return stats, nil
}
// RemoveImage invokes github.com/fsouza/go-dockerclient.Client's
// RemoveImage API with a timeout
func (dg *dockerGoClient) RemoveImage(ctx context.Context, imageName string, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
response := make(chan error, 1)
go func() { response <- dg.removeImage(ctx, imageName) }()
select {
case resp := <-response:
return resp
case <-ctx.Done():
return &DockerTimeoutError{timeout, "removing image"}
}
}
func (dg *dockerGoClient) removeImage(ctx context.Context, imageName string) error {
client, err := dg.sdkDockerClient()
if err != nil {
return err
}
_, err = client.ImageRemove(ctx, imageName,types.ImageRemoveOptions{})
return err
}
// LoadImage invokes loads an image from an input stream, with a specified timeout
func (dg *dockerGoClient) LoadImage(ctx context.Context, inputStream io.Reader, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
response := make(chan error, 1)
go func() {
response <- dg.loadImage(docker.LoadImageOptions{
InputStream: inputStream,
Context: ctx,
})
}()
select {
case resp := <-response:
return resp
case <-ctx.Done():
return &DockerTimeoutError{timeout, "loading image"}
}
}
func (dg *dockerGoClient) loadImage(opts docker.LoadImageOptions) error {
client, err := dg.dockerClient()
if err != nil {
return err
}
return client.LoadImage(opts)
}
| 1 | 20,637 | Could you please add a `TODO` here that we won't need `docker.AuthConfiguration` anymore when we migrate to SDK's pull image? | aws-amazon-ecs-agent | go |
@@ -31,6 +31,8 @@ namespace Microsoft.DotNet.Build.CloudTestTasks
public string BlobNamePrefix { get; set; }
+ public ITaskItem[] BlobNames { get; set; }
+
public override bool Execute()
{
return ExecuteAsync().GetAwaiter().GetResult(); | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using System;
using System.Collections.Generic;
using System.Globalization;
using System.IO;
using System.Linq;
using System.Net.Http;
using System.Threading.Tasks;
using System.Xml;
namespace Microsoft.DotNet.Build.CloudTestTasks
{
public sealed class DownloadFromAzure : AzureConnectionStringBuildTask
{
/// <summary>
/// The name of the container to access. The specified name must be in the correct format, see the
/// following page for more info. https://msdn.microsoft.com/en-us/library/azure/dd135715.aspx
/// </summary>
[Required]
public string ContainerName { get; set; }
/// <summary>
/// Directory to download blob files to.
/// </summary>
[Required]
public string DownloadDirectory { get; set; }
public string BlobNamePrefix { get; set; }
public override bool Execute()
{
return ExecuteAsync().GetAwaiter().GetResult();
}
public async Task<bool> ExecuteAsync()
{
ParseConnectionString();
// If the connection string AND AccountKey & AccountName are provided, error out.
if (Log.HasLoggedErrors)
{
return false;
}
Log.LogMessage(MessageImportance.Normal, "Downloading contents of container {0} from storage account '{1}' to directory {2}.",
ContainerName, AccountName, DownloadDirectory);
string optionalBlobPrefixForQuery = string.IsNullOrEmpty(BlobNamePrefix) ? "" : "&prefix=" + BlobNamePrefix;
List<string> blobsNames = new List<string>();
string urlListBlobs = $"https://{AccountName}.blob.core.windows.net/{ContainerName}?restype=container&comp=list{optionalBlobPrefixForQuery}";
Log.LogMessage(MessageImportance.Low, "Sending request to list blobsNames for container '{0}'.", ContainerName);
using (HttpClient client = new HttpClient())
{
try
{
Func<HttpRequestMessage> createRequest = () =>
{
DateTime dateTime = DateTime.UtcNow;
var request = new HttpRequestMessage(HttpMethod.Get, urlListBlobs);
request.Headers.Add(AzureHelper.DateHeaderString, dateTime.ToString("R", CultureInfo.InvariantCulture));
request.Headers.Add(AzureHelper.VersionHeaderString, AzureHelper.StorageApiVersion);
request.Headers.Add(AzureHelper.AuthorizationHeaderString, AzureHelper.AuthorizationHeader(
AccountName,
AccountKey,
"GET",
dateTime,
request));
return request;
};
XmlDocument responseFile;
string nextMarker = string.Empty;
using (HttpResponseMessage response = await AzureHelper.RequestWithRetry(Log, client, createRequest))
{
responseFile = new XmlDocument();
responseFile.LoadXml(await response.Content.ReadAsStringAsync());
XmlNodeList elemList = responseFile.GetElementsByTagName("Name");
blobsNames.AddRange(elemList.Cast<XmlNode>()
.Select(x => x.InnerText)
.ToList());
nextMarker = responseFile.GetElementsByTagName("NextMarker").Cast<XmlNode>().FirstOrDefault()?.InnerText;
}
while (!string.IsNullOrEmpty(nextMarker))
{
urlListBlobs = string.Format($"https://{AccountName}.blob.core.windows.net/{ContainerName}?restype=container&comp=list&marker={nextMarker}");
using (HttpResponseMessage response = AzureHelper.RequestWithRetry(Log, client, createRequest).GetAwaiter().GetResult())
{
responseFile = new XmlDocument();
responseFile.LoadXml(response.Content.ReadAsStringAsync().GetAwaiter().GetResult());
XmlNodeList elemList = responseFile.GetElementsByTagName("Name");
blobsNames.AddRange(elemList.Cast<XmlNode>()
.Select(x => x.InnerText)
.ToList());
nextMarker = responseFile.GetElementsByTagName("NextMarker").Cast<XmlNode>().FirstOrDefault()?.InnerText;
}
}
// track the number of blobs that fail to download
int failureCount = 0;
if (blobsNames.Count == 0)
Log.LogWarning("No blobs were found.");
else
Log.LogMessage(MessageImportance.Low, $"{blobsNames.Count} blobs found.");
foreach (string blob in blobsNames)
{
Log.LogMessage(MessageImportance.Low, "Downloading BLOB - {0}", blob);
string urlGetBlob = string.Format("https://{0}.blob.core.windows.net/{1}/{2}", AccountName, ContainerName, blob);
string filename = Path.Combine(DownloadDirectory, blob);
string blobDirectory = blob.Substring(0, blob.LastIndexOf("/"));
string downloadBlobDirectory = Path.Combine(DownloadDirectory, blobDirectory);
if (!Directory.Exists(downloadBlobDirectory))
{
Directory.CreateDirectory(downloadBlobDirectory);
}
createRequest = () =>
{
DateTime dateTime = DateTime.UtcNow;
var request = new HttpRequestMessage(HttpMethod.Get, urlGetBlob);
request.Headers.Add(AzureHelper.DateHeaderString, dateTime.ToString("R", CultureInfo.InvariantCulture));
request.Headers.Add(AzureHelper.VersionHeaderString, AzureHelper.StorageApiVersion);
request.Headers.Add(AzureHelper.AuthorizationHeaderString, AzureHelper.AuthorizationHeader(
AccountName,
AccountKey,
"GET",
dateTime,
request));
return request;
};
using (HttpResponseMessage response = await AzureHelper.RequestWithRetry(Log, client, createRequest))
{
if (response.IsSuccessStatusCode)
{
// Blobs can be files but have the name of a directory. We'll skip those and log something weird happened.
if (!string.IsNullOrEmpty(Path.GetFileName(filename)))
{
Stream responseStream = await response.Content.ReadAsStreamAsync();
using (FileStream sourceStream = File.Open(filename, FileMode.Create))
{
responseStream.CopyTo(sourceStream);
}
}
else
{
Log.LogWarning($"Unable to download blob '{blob}' as it has a directory-like name. This may cause problems if it was needed.");
}
}
else
{
Log.LogError("Failed to retrieve blob {0}, the status code was {1}", blob, response.StatusCode);
++failureCount;
}
}
}
Log.LogMessage($"{failureCount} errors seen downloading blobs.");
}
catch (Exception e)
{
Log.LogErrorFromException(e, true);
}
return !Log.HasLoggedErrors;
}
}
}
}
| 1 | 12,841 | Is there a scenario were we want to get an arbitrary set of blobs that don't share a common root? | dotnet-buildtools | .cs |
@@ -22,5 +22,7 @@ describe('Client Side Encryption', function() {
return testContext.setup(this.configuration);
});
- generateTopologyTests(testSuites, testContext);
+ generateTopologyTests(testSuites, testContext, spec => {
+ return !spec.description.match(/type=regex/);
+ });
}); | 1 | 'use strict';
const path = require('path');
const TestRunnerContext = require('./spec-runner').TestRunnerContext;
const gatherTestSuites = require('./spec-runner').gatherTestSuites;
const generateTopologyTests = require('./spec-runner').generateTopologyTests;
const missingAwsConfiguration =
process.env.AWS_ACCESS_KEY_ID == null || process.env.AWS_SECRET_ACCESS_KEY == null;
const skipTests = missingAwsConfiguration || process.env.MONGODB_CLIENT_ENCRYPTION == null;
describe('Client Side Encryption', function() {
if (skipTests) {
console.log('skipping Client Side Encryption tests due to lack of AWS credentials');
return;
}
const testContext = new TestRunnerContext();
const testSuites = gatherTestSuites(path.join(__dirname, 'spec', 'client-side-encryption'));
after(() => testContext.teardown());
before(function() {
return testContext.setup(this.configuration);
});
generateTopologyTests(testSuites, testContext);
});
| 1 | 16,810 | Can you leave a note about why we are skipping regex tests? | mongodb-node-mongodb-native | js |
@@ -55,10 +55,15 @@ var initCmd = &cmds.Command{
if err != nil {
return err
}
- rep, err := repo.CreateRepo(repoDir, newConfig)
+
+ if err := repo.InitFSRepo(repoDir, newConfig); err != nil {
+ return err
+ }
+ rep, err := repo.OpenFSRepo(repoDir)
if err != nil {
return err
}
+
// The only error Close can return is that the repo has already been closed
defer rep.Close() // nolint: errcheck
| 1 | package commands
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"github.com/ipfs/go-car"
hamt "github.com/ipfs/go-hamt-ipld"
"github.com/ipfs/go-ipfs-blockstore"
cmdkit "github.com/ipfs/go-ipfs-cmdkit"
cmds "github.com/ipfs/go-ipfs-cmds"
"github.com/libp2p/go-libp2p-crypto"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/config"
"github.com/filecoin-project/go-filecoin/consensus"
"github.com/filecoin-project/go-filecoin/fixtures"
"github.com/filecoin-project/go-filecoin/node"
"github.com/filecoin-project/go-filecoin/paths"
"github.com/filecoin-project/go-filecoin/repo"
"github.com/filecoin-project/go-filecoin/types"
)
var initCmd = &cmds.Command{
Helptext: cmdkit.HelpText{
Tagline: "Initialize a filecoin repo",
},
Options: []cmdkit.Option{
cmdkit.StringOption(GenesisFile, "path of file or HTTP(S) URL containing archive of genesis block DAG data"),
cmdkit.StringOption(PeerKeyFile, "path of file containing key to use for new node's libp2p identity"),
cmdkit.StringOption(WithMiner, "when set, creates a custom genesis block with a pre generated miner account, requires running the daemon using dev mode (--dev)"),
cmdkit.StringOption(OptionSectorDir, "path of directory into which staged and sealed sectors will be written"),
cmdkit.StringOption(DefaultAddress, "when set, sets the daemons's default address to the provided address"),
cmdkit.UintOption(AutoSealIntervalSeconds, "when set to a number > 0, configures the daemon to check for and seal any staged sectors on an interval.").WithDefault(uint(120)),
cmdkit.BoolOption(DevnetTest, "when set, populates config bootstrap addrs with the dns multiaddrs of the test devnet and other test devnet specific bootstrap parameters."),
cmdkit.BoolOption(DevnetNightly, "when set, populates config bootstrap addrs with the dns multiaddrs of the nightly devnet and other nightly devnet specific bootstrap parameters"),
cmdkit.BoolOption(DevnetUser, "when set, populates config bootstrap addrs with the dns multiaddrs of the user devnet and other user devnet specific bootstrap parameters"),
},
Run: func(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error {
newConfig, err := getConfigFromOptions(req.Options)
if err != nil {
return err
}
repoDir, _ := req.Options[OptionRepoDir].(string)
if err := re.Emit(fmt.Sprintf("initializing filecoin node at %s\n", repoDir)); err != nil {
return err
}
repoDir, err = paths.GetRepoPath(repoDir)
if err != nil {
return err
}
rep, err := repo.CreateRepo(repoDir, newConfig)
if err != nil {
return err
}
// The only error Close can return is that the repo has already been closed
defer rep.Close() // nolint: errcheck
genesisFileSource, _ := req.Options[GenesisFile].(string)
genesisFile, err := loadGenesis(req.Context, rep, genesisFileSource)
if err != nil {
return err
}
autoSealIntervalSeconds, _ := req.Options[AutoSealIntervalSeconds].(uint)
peerKeyFile, _ := req.Options[PeerKeyFile].(string)
initopts, err := getNodeInitOpts(autoSealIntervalSeconds, peerKeyFile)
if err != nil {
return err
}
return node.Init(req.Context, rep, genesisFile, initopts...)
},
Encoders: cmds.EncoderMap{
cmds.Text: cmds.MakeEncoder(initTextEncoder),
},
}
func getConfigFromOptions(options cmdkit.OptMap) (*config.Config, error) {
newConfig := config.NewDefaultConfig()
if dir, ok := options[OptionSectorDir].(string); ok {
newConfig.SectorBase.RootDir = dir
}
if m, ok := options[WithMiner].(string); ok {
var err error
newConfig.Mining.MinerAddress, err = address.NewFromString(m)
if err != nil {
return nil, err
}
}
if m, ok := options[DefaultAddress].(string); ok {
var err error
newConfig.Wallet.DefaultAddress, err = address.NewFromString(m)
if err != nil {
return nil, err
}
}
devnetTest, _ := options[DevnetTest].(bool)
devnetNightly, _ := options[DevnetNightly].(bool)
devnetUser, _ := options[DevnetUser].(bool)
if (devnetTest && devnetNightly) || (devnetTest && devnetUser) || (devnetNightly && devnetUser) {
return nil, fmt.Errorf(`cannot specify more than one "devnet-" option`)
}
// Setup devnet specific config options.
if devnetTest || devnetNightly || devnetUser {
newConfig.Bootstrap.MinPeerThreshold = 1
newConfig.Bootstrap.Period = "10s"
}
// Setup devnet test specific config options.
if devnetTest {
newConfig.Bootstrap.Addresses = fixtures.DevnetTestBootstrapAddrs
newConfig.Net = "devnet-test"
}
// Setup devnet nightly specific config options.
if devnetNightly {
newConfig.Bootstrap.Addresses = fixtures.DevnetNightlyBootstrapAddrs
newConfig.Net = "devnet-nightly"
}
// Setup devnet user specific config options.
if devnetUser {
newConfig.Bootstrap.Addresses = fixtures.DevnetUserBootstrapAddrs
newConfig.Net = "devnet-user"
}
return newConfig, nil
}
func initTextEncoder(req *cmds.Request, w io.Writer, val interface{}) error {
_, err := fmt.Fprintf(w, val.(string))
return err
}
func loadGenesis(ctx context.Context, rep repo.Repo, sourceName string) (consensus.GenesisInitFunc, error) {
if sourceName == "" {
return consensus.MakeGenesisFunc(consensus.ProofsMode(types.LiveProofsMode)), nil
}
sourceURL, err := url.Parse(sourceName)
if err != nil {
return nil, fmt.Errorf("invalid filepath or URL for genesis file: %s", sourceURL)
}
var source io.ReadCloser
if sourceURL.Scheme == "http" || sourceURL.Scheme == "https" {
// NOTE: This code is temporary. It allows downloading a genesis block via HTTP(S) to be able to join a
// recently deployed test devnet.
response, err := http.Get(sourceName)
if err != nil {
return nil, err
}
source = response.Body
} else if sourceURL.Scheme != "" {
return nil, fmt.Errorf("unsupported protocol for genesis file: %s", sourceURL.Scheme)
} else {
file, err := os.Open(sourceName)
if err != nil {
return nil, err
}
source = file
}
defer source.Close() // nolint: errcheck
bs := blockstore.NewBlockstore(rep.Datastore())
ch, err := car.LoadCar(bs, source)
if err != nil {
return nil, err
}
if len(ch.Roots) != 1 {
return nil, fmt.Errorf("expected car with only a single root")
}
gif := func(cst *hamt.CborIpldStore, bs blockstore.Blockstore) (*types.Block, error) {
var blk types.Block
if err := cst.Get(ctx, ch.Roots[0], &blk); err != nil {
return nil, err
}
return &blk, nil
}
return gif, nil
}
func getNodeInitOpts(autoSealIntervalSeconds uint, peerKeyFile string) ([]node.InitOpt, error) {
var initOpts []node.InitOpt
if peerKeyFile != "" {
data, err := ioutil.ReadFile(peerKeyFile)
if err != nil {
return nil, err
}
peerKey, err := crypto.UnmarshalPrivateKey(data)
if err != nil {
return nil, err
}
initOpts = append(initOpts, node.PeerKeyOpt(peerKey))
}
initOpts = append(initOpts, node.AutoSealIntervalSecondsOpt(autoSealIntervalSeconds))
return initOpts, nil
}
| 1 | 19,023 | This was the only caller of CreateRepo, so I inlined it. | filecoin-project-venus | go |
@@ -109,6 +109,8 @@ type VaultAuth struct {
TokenSecretRef SecretKeySelector `json:"tokenSecretRef,omitempty"`
// This Secret contains a AppRole and Secret
AppRole VaultAppRole `json:"appRole,omitempty"`
+ // Where the authentication path is mounted in Vault.
+ AuthPath string `json:"authPath,omitempty"`
}
type VaultAppRole struct { | 1 | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
AltNamesAnnotationKey = "certmanager.k8s.io/alt-names"
CommonNameAnnotationKey = "certmanager.k8s.io/common-name"
IssuerNameAnnotationKey = "certmanager.k8s.io/issuer-name"
IssuerKindAnnotationKey = "certmanager.k8s.io/issuer-kind"
CertificateNameKey = "certmanager.k8s.io/certificate-name"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=clusterissuers
type ClusterIssuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterIssuerList is a list of Issuers
type ClusterIssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ClusterIssuer `json:"items"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=issuers
type Issuer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec IssuerSpec `json:"spec,omitempty"`
Status IssuerStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// IssuerList is a list of Issuers
type IssuerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Issuer `json:"items"`
}
// IssuerSpec is the specification of an Issuer. This includes any
// configuration required for the issuer.
type IssuerSpec struct {
IssuerConfig `json:",inline"`
}
type IssuerConfig struct {
ACME *ACMEIssuer `json:"acme,omitempty"`
CA *CAIssuer `json:"ca,omitempty"`
Vault *VaultIssuer `json:"vault,omitempty"`
SelfSigned *SelfSignedIssuer `json:"selfSigned,omitempty"`
}
type SelfSignedIssuer struct {
}
type VaultIssuer struct {
// Vault authentication
Auth VaultAuth `json:"auth"`
// Server is the vault connection address
Server string `json:"server"`
// Vault URL path to the certificate role
Path string `json:"path"`
}
// Vault authentication can be configured:
// - With a secret containing a token. Cert-manager is using this token as-is.
// - With a secret containing a AppRole. This AppRole is used to authenticate to
// Vault and retrieve a token.
type VaultAuth struct {
// This Secret contains the Vault token key
TokenSecretRef SecretKeySelector `json:"tokenSecretRef,omitempty"`
// This Secret contains a AppRole and Secret
AppRole VaultAppRole `json:"appRole,omitempty"`
}
type VaultAppRole struct {
RoleId string `json:"roleId"`
SecretRef SecretKeySelector `json:"secretRef"`
}
type CAIssuer struct {
// SecretName is the name of the secret used to sign Certificates issued
// by this Issuer.
SecretName string `json:"secretName"`
}
// ACMEIssuer contains the specification for an ACME issuer
type ACMEIssuer struct {
// Email is the email for this account
Email string `json:"email"`
// Server is the ACME server URL
Server string `json:"server"`
// If true, skip verifying the ACME server TLS certificate
SkipTLSVerify bool `json:"skipTLSVerify,omitempty"`
// PrivateKey is the name of a secret containing the private key for this
// user account.
PrivateKey SecretKeySelector `json:"privateKeySecretRef"`
// HTTP01 config
HTTP01 *ACMEIssuerHTTP01Config `json:"http01,omitempty"`
// DNS-01 config
DNS01 *ACMEIssuerDNS01Config `json:"dns01,omitempty"`
}
type ACMEIssuerHTTP01Config struct {
}
// ACMEIssuerDNS01Config is a structure containing the ACME DNS configuration
// options
type ACMEIssuerDNS01Config struct {
Providers []ACMEIssuerDNS01Provider `json:"providers"`
}
type ACMEIssuerDNS01Provider struct {
Name string `json:"name"`
Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"`
CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"clouddns,omitempty"`
Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"`
Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"`
AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azuredns,omitempty"`
}
// ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS
// configuration for Akamai DNS—Zone Record Management API
type ACMEIssuerDNS01ProviderAkamai struct {
ServiceConsumerDomain string `json:"serviceConsumerDomain"`
ClientToken SecretKeySelector `json:"clientTokenSecretRef"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
AccessToken SecretKeySelector `json:"accessTokenSecretRef"`
}
// ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS
// configuration for Google Cloud DNS
type ACMEIssuerDNS01ProviderCloudDNS struct {
ServiceAccount SecretKeySelector `json:"serviceAccountSecretRef"`
Project string `json:"project"`
}
// ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS
// configuration for Cloudflare
type ACMEIssuerDNS01ProviderCloudflare struct {
Email string `json:"email"`
APIKey SecretKeySelector `json:"apiKeySecretRef"`
}
// ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53
// configuration for AWS
type ACMEIssuerDNS01ProviderRoute53 struct {
AccessKeyID string `json:"accessKeyID"`
SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"`
HostedZoneID string `json:"hostedZoneID"`
Region string `json:"region"`
}
// ACMEIssuerDNS01ProviderAzureDNS is a structure containing the
// configuration for Azure DNS
type ACMEIssuerDNS01ProviderAzureDNS struct {
ClientID string `json:"clientID"`
ClientSecret SecretKeySelector `json:"clientSecretSecretRef"`
SubscriptionID string `json:"subscriptionID"`
TenantID string `json:"tenantID"`
ResourceGroupName string `json:"resourceGroupName"`
// + optional
HostedZoneName string `json:"hostedZoneName"`
}
// IssuerStatus contains status information about an Issuer
type IssuerStatus struct {
Conditions []IssuerCondition `json:"conditions"`
ACME *ACMEIssuerStatus `json:"acme,omitempty"`
}
// IssuerCondition contains condition information for an Issuer.
type IssuerCondition struct {
// Type of the condition, currently ('Ready').
Type IssuerConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// IssuerConditionType represents an Issuer condition value.
type IssuerConditionType string
const (
// IssuerConditionReady represents the fact that a given Issuer condition
// is in ready state.
IssuerConditionReady IssuerConditionType = "Ready"
)
// ConditionStatus represents a condition's status.
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in
// the condition; "ConditionFalse" means a resource is not in the condition;
// "ConditionUnknown" means kubernetes can't decide if a resource is in the
// condition or not. In the future, we could add other intermediate
// conditions, e.g. ConditionDegraded.
const (
// ConditionTrue represents the fact that a given condition is true
ConditionTrue ConditionStatus = "True"
// ConditionFalse represents the fact that a given condition is false
ConditionFalse ConditionStatus = "False"
// ConditionUnknown represents the fact that a given condition is unknown
ConditionUnknown ConditionStatus = "Unknown"
)
type ACMEIssuerStatus struct {
// URI is the unique account identifier, which can also be used to retrieve
// account details from the CA
URI string `json:"uri"`
}
// +genclient
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +kubebuilder:resource:path=certificates
// Certificate is a type to represent a Certificate from ACME
type Certificate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CertificateSpec `json:"spec,omitempty"`
Status CertificateStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CertificateList is a list of Certificates
type CertificateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Certificate `json:"items"`
}
// CertificateSpec defines the desired state of Certificate
type CertificateSpec struct {
// CommonName is a common name to be used on the Certificate
CommonName string `json:"commonName"`
// DNSNames is a list of subject alt names to be used on the Certificate
DNSNames []string `json:"dnsNames"`
// SecretName is the name of the secret resource to store this secret in
SecretName string `json:"secretName"`
// IssuerRef is a reference to the issuer for this certificate. If the
// namespace field is not set, it is assumed to be in the same namespace
// as the certificate. If the namespace field is set to the empty value "",
// a ClusterIssuer of the given name will be used. Any other value is
// invalid.
IssuerRef ObjectReference `json:"issuerRef"`
ACME *ACMECertificateConfig `json:"acme,omitempty"`
}
// ACMEConfig contains the configuration for the ACME certificate provider
type ACMECertificateConfig struct {
Config []ACMECertificateDomainConfig `json:"config"`
}
type ACMECertificateDomainConfig struct {
Domains []string `json:"domains"`
ACMESolverConfig `json:",inline"`
}
type ACMESolverConfig struct {
HTTP01 *ACMECertificateHTTP01Config `json:"http01,omitempty"`
DNS01 *ACMECertificateDNS01Config `json:"dns01,omitempty"`
}
type ACMECertificateHTTP01Config struct {
Ingress string `json:"ingress"`
IngressClass *string `json:"ingressClass,omitempty"`
}
type ACMECertificateDNS01Config struct {
Provider string `json:"provider"`
}
// CertificateStatus defines the observed state of Certificate
type CertificateStatus struct {
Conditions []CertificateCondition `json:"conditions,omitempty"`
ACME *CertificateACMEStatus `json:"acme,omitempty"`
}
// CertificateCondition contains condition information for an Certificate.
type CertificateCondition struct {
// Type of the condition, currently ('Ready').
Type CertificateConditionType `json:"type"`
// Status of the condition, one of ('True', 'False', 'Unknown').
Status ConditionStatus `json:"status"`
// LastTransitionTime is the timestamp corresponding to the last status
// change of this condition.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a brief machine readable explanation for the condition's last
// transition.
Reason string `json:"reason"`
// Message is a human readable description of the details of the last
// transition, complementing reason.
Message string `json:"message"`
}
// CertificateConditionType represents an Certificate condition value.
type CertificateConditionType string
const (
// CertificateConditionReady represents the fact that a given Certificate condition
// is in ready state.
CertificateConditionReady CertificateConditionType = "Ready"
// CertificateConditionValidationFailed is used to indicate whether a
// validation for a Certificate has failed.
// This is currently used by the ACME issuer to track when the last
// validation was attempted.
CertificateConditionValidationFailed CertificateConditionType = "ValidateFailed"
)
// CertificateACMEStatus holds the status for an ACME issuer
type CertificateACMEStatus struct {
// Order contains details about the current in-progress ACME Order.
Order ACMEOrderStatus `json:"order,omitempty"`
}
type ACMEOrderStatus struct {
// The URL that can be used to get information about the ACME order.
URL string `json:"url"`
Challenges []ACMEOrderChallenge `json:"challenges,omitempty"`
}
type ACMEOrderChallenge struct {
// The URL that can be used to get information about the ACME challenge.
URL string `json:"url"`
// The URL that can be used to get information about the ACME authorization
// associated with the challenge.
AuthzURL string `json:"authzURL"`
// Type of ACME challenge
// Either http-01 or dns-01
Type string `json:"type"`
// Domain this challenge corresponds to
Domain string `json:"domain"`
// Challenge token for this challenge
Token string `json:"token"`
// Challenge key for this challenge
Key string `json:"key"`
// Set to true if this challenge is for a wildcard domain
Wildcard bool `json:"wildcard"`
// Configuration used to present this challenge
ACMESolverConfig `json:",inline"`
}
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
Name string `json:"name,omitempty"`
}
// ObjectReference is a reference to an object. If the namespace field is set,
// it is assumed to be in a namespace
type ObjectReference struct {
Name string `json:"name"`
Kind string `json:"kind,omitempty"`
}
const (
ClusterIssuerKind = "ClusterIssuer"
IssuerKind = "Issuer"
)
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
}
| 1 | 12,604 | As this is already a child of `auth`, I think we can shorten this to `path` | jetstack-cert-manager | go |
@@ -297,6 +297,14 @@ class UIATextInfo(textInfos.TextInfo):
elif position==textInfos.POSITION_LAST:
self._rangeObj=self.obj.UIATextPattern.documentRange
self.collapse(True)
+ elif position in (textInfos.POSITION_FIRSTVISIBLE, textInfos.POSITION_LASTVISIBLE):
+ try:
+ visiRanges = self.obj.UIATextPattern.GetVisibleRanges()
+ element = 0 if position == textInfos.POSITION_FIRSTVISIBLE else visiRanges.length - 1
+ self._rangeObj = visiRanges.GetElement(0)
+ except COMError:
+ # Error: FIRST_VISIBLE position not supported by the UIA text pattern.
+ raise NotImplementedError
elif position==textInfos.POSITION_ALL or position==self.obj:
self._rangeObj=self.obj.UIATextPattern.documentRange
elif isinstance(position,UIA) or isinstance(position,UIAHandler.IUIAutomationElement): | 1 | #NVDAObjects/UIA/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2009-2019 NV Access Limited, Joseph Lee, Mohammad Suliman, Babbage B.V., Leonard de Ruijter
"""Support for UI Automation (UIA) controls."""
from ctypes import byref
from ctypes.wintypes import POINT, RECT
from comtypes import COMError
from comtypes.automation import VARIANT
import time
import weakref
import sys
import numbers
import colors
import languageHandler
import UIAHandler
import globalVars
import eventHandler
import controlTypes
import config
import speech
import api
import textInfos
from logHandler import log
from UIAUtils import *
from NVDAObjects.window import Window
from NVDAObjects import NVDAObjectTextInfo, InvalidNVDAObject
from NVDAObjects.behaviors import ProgressBar, EditableTextWithoutAutoSelectDetection, Dialog, Notification, EditableTextWithSuggestions
import braille
from locationHelper import RectLTWH
import ui
class UIATextInfo(textInfos.TextInfo):
_cache_controlFieldNVDAObjectClass=True
def _get_controlFieldNVDAObjectClass(self):
"""
The NVDAObject class to be used by the _getTextWithFieldsForUIARange method when instantiating NVDAObjects in order to generate control fields for content.
L{UIA} is usually what you want, but if you know the class will always mutate to a certain subclass (E.g. WordDocumentNode) then performance gains can be made by returning the subclass here.
"""
return UIA
# UIA property IDs that should be automatically cached for control fields
_controlFieldUIACachedPropertyIDs={
UIAHandler.UIA_IsValuePatternAvailablePropertyId,
UIAHandler.UIA_HasKeyboardFocusPropertyId,
UIAHandler.UIA_NamePropertyId,
UIAHandler.UIA_ToggleToggleStatePropertyId,
UIAHandler.UIA_HelpTextPropertyId,
UIAHandler.UIA_AccessKeyPropertyId,
UIAHandler.UIA_AcceleratorKeyPropertyId,
UIAHandler.UIA_HasKeyboardFocusPropertyId,
UIAHandler.UIA_SelectionItemIsSelectedPropertyId,
UIAHandler.UIA_IsDataValidForFormPropertyId,
UIAHandler.UIA_IsRequiredForFormPropertyId,
UIAHandler.UIA_ValueIsReadOnlyPropertyId,
UIAHandler.UIA_ExpandCollapseExpandCollapseStatePropertyId,
UIAHandler.UIA_ToggleToggleStatePropertyId,
UIAHandler.UIA_IsKeyboardFocusablePropertyId,
UIAHandler.UIA_IsPasswordPropertyId,
UIAHandler.UIA_IsSelectionItemPatternAvailablePropertyId,
UIAHandler.UIA_GridItemRowPropertyId,
UIAHandler.UIA_TableItemRowHeaderItemsPropertyId,
UIAHandler.UIA_GridItemColumnPropertyId,
UIAHandler.UIA_TableItemColumnHeaderItemsPropertyId,
UIAHandler.UIA_GridRowCountPropertyId,
UIAHandler.UIA_GridColumnCountPropertyId,
UIAHandler.UIA_GridItemContainingGridPropertyId,
UIAHandler.UIA_RangeValueValuePropertyId,
UIAHandler.UIA_RangeValueMinimumPropertyId,
UIAHandler.UIA_RangeValueMaximumPropertyId,
UIAHandler.UIA_ValueValuePropertyId,
UIAHandler.UIA_PositionInSetPropertyId,
UIAHandler.UIA_SizeOfSetPropertyId,
UIAHandler.UIA_AriaRolePropertyId,
UIAHandler.UIA_LandmarkTypePropertyId,
UIAHandler.UIA_AriaPropertiesPropertyId,
UIAHandler.UIA_LevelPropertyId,
UIAHandler.UIA_IsEnabledPropertyId,
} if UIAHandler.isUIAAvailable else set()
def _get__controlFieldUIACacheRequest(self):
""" The UIA cacheRequest object that will be used when fetching all UIA elements needed when generating control fields for this TextInfo's content."""
cacheRequest=UIAHandler.handler.baseCacheRequest.clone()
for ID in self._controlFieldUIACachedPropertyIDs:
try:
cacheRequest.addProperty(ID)
except COMError:
pass
UIATextInfo._controlFieldUIACacheRequest=self._controlFieldUIACacheRequest=cacheRequest
return cacheRequest
#: The UI Automation text units (in order of resolution) that should be used when fetching formatting.
UIAFormatUnits=[
UIAHandler.TextUnit_Format,
UIAHandler.TextUnit_Word,
UIAHandler.TextUnit_Character
] if UIAHandler.isUIAAvailable else []
def find(self,text,caseSensitive=False,reverse=False):
tempRange=self._rangeObj.clone()
documentRange=self.obj.UIATextPattern.documentRange
if reverse:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,documentRange,UIAHandler.TextPatternRangeEndpoint_Start)
else:
if tempRange.move(UIAHandler.TextUnit_Character,1)==0:
return False
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,documentRange,UIAHandler.TextPatternRangeEndpoint_End)
try:
r=tempRange.findText(text,reverse,not caseSensitive)
except COMError:
r=None
if r:
r.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,r,UIAHandler.TextPatternRangeEndpoint_Start)
self._rangeObj=r
return True
return False
def _getFormatFieldAtRange(self,textRange,formatConfig,ignoreMixedValues=False):
"""
Fetches formatting for the given UI Automation Text range.
@ param textRange: the text range whos formatting should be fetched.
@type textRange: L{UIAutomation.IUIAutomationTextRange}
@param formatConfig: the types of formatting requested.
@ type formatConfig: a dictionary of NVDA document formatting configuration keys with values set to true for those types that should be fetched.
@param ignoreMixedValues: If True, formatting that is mixed according to UI Automation will not be included. If False, L{UIAUtils.MixedAttributeError} will be raised if UI Automation gives back a mixed attribute value signifying that the caller may want to try again with a smaller range.
@type: bool
@return: The formatting for the given text range.
@rtype: L{textInfos.FormatField}
"""
formatField=textInfos.FormatField()
if not isinstance(textRange,UIAHandler.IUIAutomationTextRange):
raise ValueError("%s is not a text range"%textRange)
fetchAnnotationTypes=formatConfig["reportSpellingErrors"] or formatConfig["reportComments"] or formatConfig["reportRevisions"]
try:
textRange=textRange.QueryInterface(UIAHandler.IUIAutomationTextRange3)
except (COMError,AttributeError):
fetcher=UIATextRangeAttributeValueFetcher(textRange)
else:
# Precalculate all the IDs we could possibly need so that they can be fetched in one cross-process call where supported
IDs=set()
if formatConfig["reportFontName"]:
IDs.add(UIAHandler.UIA_FontNameAttributeId)
if formatConfig["reportFontSize"]:
IDs.add(UIAHandler.UIA_FontSizeAttributeId)
if formatConfig["reportFontAttributes"]:
IDs.update({UIAHandler.UIA_FontWeightAttributeId,UIAHandler.UIA_IsItalicAttributeId,UIAHandler.UIA_UnderlineStyleAttributeId,UIAHandler.UIA_StrikethroughStyleAttributeId,UIAHandler.UIA_IsSuperscriptAttributeId,UIAHandler.UIA_IsSubscriptAttributeId,})
if formatConfig["reportAlignment"]:
IDs.add(UIAHandler.UIA_HorizontalTextAlignmentAttributeId)
if formatConfig["reportColor"]:
IDs.add(UIAHandler.UIA_BackgroundColorAttributeId)
IDs.add(UIAHandler.UIA_ForegroundColorAttributeId)
if formatConfig['reportLineSpacing']:
IDs.add(UIAHandler.UIA_LineSpacingAttributeId)
if formatConfig['reportLinks']:
IDs.add(UIAHandler.UIA_LinkAttributeId)
if formatConfig['reportStyle']:
IDs.add(UIAHandler.UIA_StyleNameAttributeId)
if formatConfig["reportHeadings"]:
IDs.add(UIAHandler.UIA_StyleIdAttributeId)
if fetchAnnotationTypes:
IDs.add(UIAHandler.UIA_AnnotationTypesAttributeId)
IDs.add(UIAHandler.UIA_CultureAttributeId)
fetcher=BulkUIATextRangeAttributeValueFetcher(textRange,IDs)
if formatConfig["reportFontName"]:
val=fetcher.getValue(UIAHandler.UIA_FontNameAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
formatField["font-name"]=val
if formatConfig["reportFontSize"]:
val=fetcher.getValue(UIAHandler.UIA_FontSizeAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,numbers.Number):
formatField['font-size']="%g pt"%float(val)
if formatConfig["reportFontAttributes"]:
val=fetcher.getValue(UIAHandler.UIA_FontWeightAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,int):
formatField['bold']=(val>=700)
val=fetcher.getValue(UIAHandler.UIA_IsItalicAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
formatField['italic']=val
val=fetcher.getValue(UIAHandler.UIA_UnderlineStyleAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
formatField['underline']=bool(val)
val=fetcher.getValue(UIAHandler.UIA_StrikethroughStyleAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
formatField['strikethrough']=bool(val)
textPosition=None
val=fetcher.getValue(UIAHandler.UIA_IsSuperscriptAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue and val:
textPosition='super'
else:
val=fetcher.getValue(UIAHandler.UIA_IsSubscriptAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue and val:
textPosition="sub"
else:
textPosition="baseline"
if textPosition:
formatField['text-position']=textPosition
if formatConfig['reportStyle']:
val=fetcher.getValue(UIAHandler.UIA_StyleNameAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
formatField["style"]=val
if formatConfig["reportAlignment"]:
val=fetcher.getValue(UIAHandler.UIA_HorizontalTextAlignmentAttributeId,ignoreMixedValues=ignoreMixedValues)
if val==UIAHandler.HorizontalTextAlignment_Left:
val="left"
elif val==UIAHandler.HorizontalTextAlignment_Centered:
val="center"
elif val==UIAHandler.HorizontalTextAlignment_Right:
val="right"
elif val==UIAHandler.HorizontalTextAlignment_Justified:
val="justify"
else:
val=None
if val:
formatField['text-align']=val
if formatConfig["reportColor"]:
val=fetcher.getValue(UIAHandler.UIA_BackgroundColorAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,int):
formatField['background-color']=colors.RGB.fromCOLORREF(val)
val=fetcher.getValue(UIAHandler.UIA_ForegroundColorAttributeId,ignoreMixedValues=ignoreMixedValues)
if isinstance(val,int):
formatField['color']=colors.RGB.fromCOLORREF(val)
if formatConfig['reportLineSpacing']:
val=fetcher.getValue(UIAHandler.UIA_LineSpacingAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
if val:
formatField['line-spacing']=val
if formatConfig['reportLinks']:
val=fetcher.getValue(UIAHandler.UIA_LinkAttributeId,ignoreMixedValues=ignoreMixedValues)
if val!=UIAHandler.handler.reservedNotSupportedValue:
if val:
formatField['link']=True
if formatConfig["reportHeadings"]:
styleIDValue=fetcher.getValue(UIAHandler.UIA_StyleIdAttributeId,ignoreMixedValues=ignoreMixedValues)
if UIAHandler.StyleId_Heading1<=styleIDValue<=UIAHandler.StyleId_Heading9:
formatField["heading-level"]=(styleIDValue-UIAHandler.StyleId_Heading1)+1
if fetchAnnotationTypes:
annotationTypes=fetcher.getValue(UIAHandler.UIA_AnnotationTypesAttributeId,ignoreMixedValues=ignoreMixedValues)
# Some UIA implementations return a single value rather than a tuple.
# Always mutate to a tuple to allow for a generic x in y matching
if not isinstance(annotationTypes,tuple):
annotationTypes=(annotationTypes,)
if formatConfig["reportSpellingErrors"]:
if UIAHandler.AnnotationType_SpellingError in annotationTypes:
formatField["invalid-spelling"]=True
if UIAHandler.AnnotationType_GrammarError in annotationTypes:
formatField["invalid-grammar"]=True
if formatConfig["reportComments"]:
if UIAHandler.AnnotationType_Comment in annotationTypes:
formatField["comment"]=True
if formatConfig["reportRevisions"]:
if UIAHandler.AnnotationType_InsertionChange in annotationTypes:
formatField["revision-insertion"]=True
elif UIAHandler.AnnotationType_DeletionChange in annotationTypes:
formatField["revision-deletion"]=True
cultureVal=fetcher.getValue(UIAHandler.UIA_CultureAttributeId,ignoreMixedValues=ignoreMixedValues)
if cultureVal and isinstance(cultureVal,int):
try:
formatField['language']=languageHandler.windowsLCIDToLocaleName(cultureVal)
except:
log.debugWarning("language error",exc_info=True)
pass
return textInfos.FieldCommand("formatChange",formatField)
def __init__(self,obj,position,_rangeObj=None):
super(UIATextInfo,self).__init__(obj,position)
if _rangeObj:
try:
self._rangeObj=_rangeObj.clone()
except COMError:
# IUIAutomationTextRange::clone can sometimes fail, such as in UWP account login screens
log.debugWarning("Could not clone range",exc_info=True)
raise RuntimeError("Could not clone range")
elif position in (textInfos.POSITION_CARET,textInfos.POSITION_SELECTION):
try:
sel=self.obj.UIATextPattern.GetSelection()
except COMError:
raise RuntimeError("No selection available")
if sel.length>0:
self._rangeObj=sel.getElement(0).clone()
else:
raise NotImplementedError("UIAutomationTextRangeArray is empty")
if position==textInfos.POSITION_CARET:
self.collapse()
elif isinstance(position,UIATextInfo): #bookmark
self._rangeObj=position._rangeObj
elif position==textInfos.POSITION_FIRST:
try:
self._rangeObj=self.obj.UIATextPattern.documentRange
except COMError:
# Error: first position not supported by the UIA text pattern.
raise RuntimeError
self.collapse()
elif position==textInfos.POSITION_LAST:
self._rangeObj=self.obj.UIATextPattern.documentRange
self.collapse(True)
elif position==textInfos.POSITION_ALL or position==self.obj:
self._rangeObj=self.obj.UIATextPattern.documentRange
elif isinstance(position,UIA) or isinstance(position,UIAHandler.IUIAutomationElement):
if isinstance(position,UIA):
position=position.UIAElement
try:
self._rangeObj=self.obj.UIATextPattern.rangeFromChild(position)
except COMError:
raise LookupError
# sometimes rangeFromChild can return a NULL range
if not self._rangeObj: raise LookupError
elif isinstance(position,textInfos.Point):
#rangeFromPoint used to cause a freeze in UIA client library!
p=POINT(position.x,position.y)
self._rangeObj=self.obj.UIATextPattern.RangeFromPoint(p)
elif isinstance(position,UIAHandler.IUIAutomationTextRange):
self._rangeObj=position.clone()
else:
raise ValueError("Unknown position %s"%position)
def __eq__(self,other):
if self is other: return True
if self.__class__ is not other.__class__: return False
return bool(self._rangeObj.compare(other._rangeObj))
def _get_NVDAObjectAtStart(self):
e=self.UIAElementAtStart
if e:
return UIA(UIAElement=e) or self.obj
return self.obj
def _get_UIAElementAtStart(self):
"""
Fetches the deepest UIA element at the start of the text range.
This may be via UIA's getChildren (in the case of embedded controls), or GetEnClosingElement.
"""
tempInfo=self.copy()
tempInfo.collapse()
# some implementations (Edge, Word) do not correctly class embedded objects (graphics, checkboxes) as being the enclosing element, even when the range is completely within them. Rather, they still list the object in getChildren.
# Thus we must check getChildren before getEnclosingElement.
tempInfo.expand(textInfos.UNIT_CHARACTER)
tempRange=tempInfo._rangeObj
try:
children=getChildrenWithCacheFromUIATextRange(tempRange,UIAHandler.handler.baseCacheRequest)
except COMError as e:
log.debugWarning("Could not get children from UIA text range, %s"%e)
children=None
if children and children.length==1:
child=children.getElement(0)
else:
child=getEnclosingElementWithCacheFromUIATextRange(tempRange,UIAHandler.handler.baseCacheRequest)
return child
def _get_bookmark(self):
return self.copy()
UIAControlTypesWhereNameIsContent={
UIAHandler.UIA_ButtonControlTypeId,
UIAHandler.UIA_HyperlinkControlTypeId,
UIAHandler.UIA_ImageControlTypeId,
UIAHandler.UIA_MenuItemControlTypeId,
UIAHandler.UIA_TabItemControlTypeId,
UIAHandler.UIA_TextControlTypeId,
UIAHandler.UIA_SplitButtonControlTypeId
} if UIAHandler.isUIAAvailable else None
def _getControlFieldForObject(self, obj,isEmbedded=False,startOfNode=False,endOfNode=False):
"""
Fetch control field information for the given UIA NVDAObject.
@ param obj: the NVDAObject the control field is for.
@type obj: L{UIA}
@param isEmbedded: True if this NVDAObject is for a leaf node (has no useful children).
@ type isEmbedded: bool
@param startOfNode: True if the control field represents the very start of this object.
@type startOfNode: bool
@param endOfNode: True if the control field represents the very end of this object.
@type endOfNode: bool
@return: The control field for this object
@rtype: textInfos.ControlField containing NVDA control field data.
"""
role = obj.role
field = textInfos.ControlField()
# Ensure this controlField is unique to the object
runtimeID=field['runtimeID']=obj.UIAElement.getRuntimeId()
field['_startOfNode']=startOfNode
field['_endOfNode']=endOfNode
field["role"] = obj.role
states = obj.states
# The user doesn't care about certain states, as they are obvious.
states.discard(controlTypes.STATE_EDITABLE)
states.discard(controlTypes.STATE_MULTILINE)
states.discard(controlTypes.STATE_FOCUSED)
field["states"] = states
field['nameIsContent']=nameIsContent=obj.UIAElement.cachedControlType in self.UIAControlTypesWhereNameIsContent
if not nameIsContent:
field['name']=obj.name
field["description"] = obj.description
field["level"] = obj.positionInfo.get("level")
if role == controlTypes.ROLE_TABLE:
field["table-id"] = runtimeID
try:
field["table-rowcount"] = obj.rowCount
field["table-columncount"] = obj.columnCount
except NotImplementedError:
pass
if role in (controlTypes.ROLE_TABLECELL, controlTypes.ROLE_DATAITEM,controlTypes.ROLE_TABLECOLUMNHEADER, controlTypes.ROLE_TABLEROWHEADER,controlTypes.ROLE_HEADERITEM):
try:
field["table-rownumber"] = obj.rowNumber
field["table-rowsspanned"] = obj.rowSpan
field["table-columnnumber"] = obj.columnNumber
field["table-columnsspanned"] = obj.columnSpan
field["table-id"] = obj.table.UIAElement.getRuntimeId()
field['role']=controlTypes.ROLE_TABLECELL
field['table-columnheadertext']=obj.columnHeaderText
field['table-rowheadertext']=obj.rowHeaderText
except NotImplementedError:
pass
return field
def _getTextFromUIARange(self,range):
"""
Fetches plain text from the given UI Automation text range.
Just calls getText(-1). This only exists to be overridden for filtering.
"""
return range.getText(-1)
def _getTextWithFields_text(self,textRange,formatConfig,UIAFormatUnits=None):
"""
Yields format fields and text for the given UI Automation text range, split up by the first available UI Automation text unit that does not result in mixed attribute values.
@param textRange: the UI Automation text range to walk.
@type textRange: L{UIAHandler.IUIAutomationTextRange}
@param formatConfig: the types of formatting requested.
@ type formatConfig: a dictionary of NVDA document formatting configuration keys with values set to true for those types that should be fetched.
@param UIAFormatUnits: the UI Automation text units (in order of resolution) that should be used to split the text so as to avoid mixed attribute values. This is None by default.
If the parameter is a list of 1 or more units, The range will be split by the first unit in the list, and this method will be recursively run on each subrange, with the remaining units in this list given as the value of this parameter.
If this parameter is an empty list, then formatting and text is fetched for the entire range, but any mixed attribute values are ignored and no splitting occures.
If this parameter is None, text and formatting is fetched for the entire range in one go, but if mixed attribute values are found, it will split by the first unit in self.UIAFormatUnits, and run this method recursively on each subrange, providing the remaining units from self.UIAFormatUnits as the value of this parameter.
@type UIAFormatUnits: List of UI Automation Text Units or None
@rtype: a Generator yielding L{textInfos.FieldCommand} objects containing L{textInfos.FormatField} objects, and text strings.
"""
log.debug("_getTextWithFields_text start")
if UIAFormatUnits:
unit=UIAFormatUnits[0]
furtherUIAFormatUnits=UIAFormatUnits[1:]
else:
# Fetching text and formatting from the entire range will be tried once before any possible splitting.
unit=None
furtherUIAFormatUnits=self.UIAFormatUnits if UIAFormatUnits is None else []
log.debug("Walking by unit %s"%unit)
log.debug("With further units of: %s"%furtherUIAFormatUnits)
rangeIter=iterUIARangeByUnit(textRange,unit) if unit is not None else [textRange]
for tempRange in rangeIter:
text=self._getTextFromUIARange(tempRange) or ""
if text:
log.debug("Chunk has text. Fetching formatting")
try:
field=self._getFormatFieldAtRange(tempRange,formatConfig,ignoreMixedValues=len(furtherUIAFormatUnits)==0)
except UIAMixedAttributeError:
log.debug("Mixed formatting. Trying higher resolution unit")
for subfield in self._getTextWithFields_text(tempRange,formatConfig,UIAFormatUnits=furtherUIAFormatUnits):
yield subfield
log.debug("Done yielding higher resolution unit")
continue
log.debug("Yielding formatting and text")
yield field
yield text
log.debug("Done _getTextWithFields_text")
def _getTextWithFieldsForUIARange(self,rootElement,textRange,formatConfig,includeRoot=False,alwaysWalkAncestors=True,recurseChildren=True,_rootElementClipped=(True,True)):
"""
Yields start and end control fields, and text, for the given UI Automation text range.
@param rootElement: the highest ancestor that encloses the given text range. This function will not walk higher than this point.
@type rootElement: L{UIAHandler.IUIAutomation}
@param textRange: the UI Automation text range whos content should be fetched.
@type textRange: L{UIAHandler.IUIAutomation}
@param formatConfig: the types of formatting requested.
@ type formatConfig: a dictionary of NVDA document formatting configuration keys with values set to true for those types that should be fetched.
@param includeRoot: If true, then a control start and end will be yielded for the root element.
@ type includeRoot: bool
@param alwaysWalkAncestors: If true then control fields will be yielded for any element enclosing the given text range, that is a descendant of the root element. If false then the root element may be assumed to be the only ancestor.
@type alwaysWalkAncestors: bool
@param recurseChildren: If true, this function will be recursively called for each child of the given text range, clipped to the bounds of this text range. Formatted text between the children will also be yielded. If false, only formatted text will be yielded.
@type recurseChildren: bool
@param _rootElementClipped: Indicates if textRange represents all of the given rootElement, or is clipped at the start or end.
@type _rootElementClipped: 2-tuple
@rtype: A generator that yields L{textInfo.FieldCommand} objects and text strings.
"""
if log.isEnabledFor(log.DEBUG):
log.debug("_getTextWithFieldsForUIARange")
log.debug("rootElement: %s"%rootElement.currentLocalizedControlType if rootElement else None)
log.debug("full text: %s"%textRange.getText(-1))
if recurseChildren:
childElements=getChildrenWithCacheFromUIATextRange(textRange,self._controlFieldUIACacheRequest)
# Specific check for embedded elements (checkboxes etc)
# Calling getChildren on their childRange always gives back the same child.
if childElements.length==1:
childElement=childElements.getElement(0)
if childElement and UIAHandler.handler.clientObject.compareElements(childElement,rootElement):
log.debug("Detected embedded child")
recurseChildren=False
parentElements=[]
if alwaysWalkAncestors:
log.debug("Fetching parents starting from enclosingElement")
try:
parentElement=getEnclosingElementWithCacheFromUIATextRange(textRange,self._controlFieldUIACacheRequest)
except COMError:
parentElement=None
while parentElement:
isRoot=UIAHandler.handler.clientObject.compareElements(parentElement,rootElement)
if isRoot:
log.debug("Hit root")
parentElements.append((parentElement,_rootElementClipped))
break
else:
if log.isEnabledFor(log.DEBUG):
log.debug("parentElement: %s"%parentElement.currentLocalizedControlType)
try:
parentRange=self.obj.UIATextPattern.rangeFromChild(parentElement)
except COMError:
parentRange=None
if not parentRange:
log.debug("parentRange is NULL. Breaking")
break
clippedStart=textRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,parentRange,UIAHandler.TextPatternRangeEndpoint_Start)>0
clippedEnd=textRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,parentRange,UIAHandler.TextPatternRangeEndpoint_End)<0
parentElements.append((parentElement,(clippedStart,clippedEnd)))
parentElement=UIAHandler.handler.baseTreeWalker.getParentElementBuildCache(parentElement,self._controlFieldUIACacheRequest)
else:
parentElements.append((rootElement,_rootElementClipped))
log.debug("Done fetching parents")
enclosingElement=parentElements[0][0] if parentElements else rootElement
if not includeRoot and parentElements:
del parentElements[-1]
parentFields=[]
log.debug("Generating controlFields for parents")
windowHandle=self.obj.windowHandle
controlFieldNVDAObjectClass=self.controlFieldNVDAObjectClass
for index,(parentElement,parentClipped) in enumerate(parentElements):
if log.isEnabledFor(log.DEBUG):
log.debug("parentElement: %s"%parentElement.currentLocalizedControlType)
startOfNode=not parentClipped[0]
endOfNode=not parentClipped[1]
try:
obj=controlFieldNVDAObjectClass(windowHandle=windowHandle,UIAElement=parentElement,initialUIACachedPropertyIDs=self._controlFieldUIACachedPropertyIDs)
field=self._getControlFieldForObject(obj,isEmbedded=(index==0 and not recurseChildren),startOfNode=startOfNode,endOfNode=endOfNode)
except LookupError:
log.debug("Failed to fetch controlField data for parentElement. Breaking")
continue
if not field:
continue
parentFields.append(field)
log.debug("Done generating controlFields for parents")
log.debug("Yielding control starts for parents")
for field in reversed(parentFields):
yield textInfos.FieldCommand("controlStart",field)
log.debug("Done yielding control starts for parents")
del parentElements
log.debug("Yielding balanced fields for textRange")
# Move through the text range, collecting text and recursing into children
#: This variable is used to span lengths of plain text between child ranges as we iterate over getChildren
childCount=childElements.length if recurseChildren else 0
if childCount>0:
tempRange=textRange.clone()
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,tempRange,UIAHandler.TextPatternRangeEndpoint_Start)
if log.isEnabledFor(log.DEBUG):
log.debug("Child count: %s"%childElements.length)
log.debug("Walking children")
lastChildIndex=childCount-1
lastChildEndDelta=0
documentTextPattern=self.obj.UIATextPattern
rootElementControlType=rootElement.cachedControlType
for index in xrange(childCount):
childElement=childElements.getElement(index)
if not childElement or UIAHandler.handler.clientObject.compareElements(childElement,enclosingElement):
log.debug("NULL childElement. Skipping")
continue
if rootElementControlType==UIAHandler.UIA_DataItemControlTypeId:
# #9090: MS Word has a rare bug where a child of a table cell's UIA textRange can be its containing page.
# At very least stop the infinite recursion.
childAutomationID=childElement.cachedAutomationId or ""
if childAutomationID.startswith('UIA_AutomationId_Word_Page_'):
continue
if log.isEnabledFor(log.DEBUG):
log.debug("Fetched child %s (%s)"%(index,childElement.currentLocalizedControlType))
try:
childRange=documentTextPattern.rangeFromChild(childElement)
except COMError as e:
log.debug("rangeFromChild failed with %s"%e)
childRange=None
if not childRange:
log.debug("NULL childRange. Skipping")
continue
clippedStart=clippedEnd=False
if index==lastChildIndex and childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,textRange,UIAHandler.TextPatternRangeEndpoint_End)>=0:
log.debug("Child at or past end of textRange. Breaking")
break
if index==lastChildIndex:
lastChildEndDelta=childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)
if lastChildEndDelta>0:
log.debug("textRange ended part way through the child. Crop end of childRange to fit")
childRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)
clippedEnd=True
childStartDelta=childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,tempRange,UIAHandler.TextPatternRangeEndpoint_End)
if childStartDelta>0:
# plain text before this child
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,childRange,UIAHandler.TextPatternRangeEndpoint_Start)
log.debug("Plain text before child")
for field in self._getTextWithFields_text(tempRange,formatConfig):
yield field
elif childStartDelta<0:
log.debug("textRange started part way through child. Cropping Start of child range to fit" )
childRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,tempRange,UIAHandler.TextPatternRangeEndpoint_End)
clippedStart=True
if (index==0 or index==lastChildIndex) and childRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,childRange,UIAHandler.TextPatternRangeEndpoint_End)==0:
log.debug("childRange is degenerate. Skipping")
continue
log.debug("Recursing into child %s"%index)
for field in self._getTextWithFieldsForUIARange(childElement,childRange,formatConfig,includeRoot=True,alwaysWalkAncestors=False,_rootElementClipped=(clippedStart,clippedEnd)):
yield field
log.debug("Done recursing into child %s"%index)
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,childRange,UIAHandler.TextPatternRangeEndpoint_End)
log.debug("children done")
# Plain text after the final child
if tempRange.CompareEndpoints(UIAHandler.TextPatternRangeEndpoint_Start,textRange,UIAHandler.TextPatternRangeEndpoint_End)<0:
tempRange.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,textRange,UIAHandler.TextPatternRangeEndpoint_End)
log.debug("Yielding final text")
for field in self._getTextWithFields_text(tempRange,formatConfig):
yield field
else: #no children
log.debug("no children")
log.debug("Yielding text")
for field in self._getTextWithFields_text(textRange,formatConfig):
yield field
for field in parentFields:
log.debug("Yielding controlEnd for parentElement")
yield textInfos.FieldCommand("controlEnd",field)
log.debug("_getTextWithFieldsForUIARange end")
def getTextWithFields(self,formatConfig=None):
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
fields=list(self._getTextWithFieldsForUIARange(self.obj.UIAElement,self._rangeObj,formatConfig))
return fields
def _get_text(self):
return self._getTextFromUIARange(self._rangeObj)
def _getBoundingRectsFromUIARange(self,range):
"""
Fetches per line bounding rectangles from the given UI Automation text range.
Note that if the range object doesn't cover a whole line (e.g. a character),
the bounding rectangle will be restricted to the range.
@rtype: [locationHelper.RectLTWH]
"""
rects = []
rectArray = range.GetBoundingRectangles()
if not rectArray:
return rects
rectIndexes = xrange(0, len(rectArray), 4)
rectGen = (RectLTWH.fromFloatCollection(*rectArray[i:i+4]) for i in rectIndexes)
rects.extend(rectGen)
return rects
def _get_boundingRects(self):
return self._getBoundingRectsFromUIARange(self._rangeObj)
def expand(self,unit):
UIAUnit=UIAHandler.NVDAUnitsToUIAUnits[unit]
self._rangeObj.ExpandToEnclosingUnit(UIAUnit)
def move(self,unit,direction,endPoint=None):
UIAUnit=UIAHandler.NVDAUnitsToUIAUnits[unit]
if endPoint=="start":
res=self._rangeObj.MoveEndpointByUnit(UIAHandler.TextPatternRangeEndpoint_Start,UIAUnit,direction)
elif endPoint=="end":
res=self._rangeObj.MoveEndpointByUnit(UIAHandler.TextPatternRangeEndpoint_End,UIAUnit,direction)
else:
res=self._rangeObj.Move(UIAUnit,direction)
#Some Implementations of Move and moveEndpointByUnit return a positive number even if the direction is negative
if direction<0 and res>0:
res=0-res
return res
def copy(self):
return self.__class__(self.obj,None,_rangeObj=self._rangeObj)
def collapse(self,end=False):
if end:
self._rangeObj.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_Start,self._rangeObj,UIAHandler.TextPatternRangeEndpoint_End)
else:
self._rangeObj.MoveEndpointByRange(UIAHandler.TextPatternRangeEndpoint_End,self._rangeObj,UIAHandler.TextPatternRangeEndpoint_Start)
def compareEndPoints(self,other,which):
if which.startswith('start'):
src=UIAHandler.TextPatternRangeEndpoint_Start
else:
src=UIAHandler.TextPatternRangeEndpoint_End
if which.endswith('Start'):
target=UIAHandler.TextPatternRangeEndpoint_Start
else:
target=UIAHandler.TextPatternRangeEndpoint_End
return self._rangeObj.CompareEndpoints(src,other._rangeObj,target)
def setEndPoint(self,other,which):
if which.startswith('start'):
src=UIAHandler.TextPatternRangeEndpoint_Start
else:
src=UIAHandler.TextPatternRangeEndpoint_End
if which.endswith('Start'):
target=UIAHandler.TextPatternRangeEndpoint_Start
else:
target=UIAHandler.TextPatternRangeEndpoint_End
self._rangeObj.MoveEndpointByRange(src,other._rangeObj,target)
def updateSelection(self):
self._rangeObj.Select()
updateCaret = updateSelection
class UIA(Window):
def _get__coreCycleUIAPropertyCacheElementCache(self):
"""
A dictionary per core cycle that is ready to map UIA property IDs to UIAElements with that property already cached.
An example of where multiple cache elements may exist would be where the UIA NVDAObject was instantiated with a UIA element already containing a UI Automation cache (appropriate for generating control fields) but another UIA NVDAObject property (E.g. states) has a set of UIA properties of its own which should be bulk-fetched, and did not exist in the original cache.
"""
return {}
def _getUIACacheablePropertyValue(self,ID,ignoreDefault=False):
"""
Fetches the value for a UI Automation property from an element cache available in this core cycle. If not cached then a new value will be fetched.
"""
elementCache=self._coreCycleUIAPropertyCacheElementCache
# If we have a UIAElement whos own cache contains the property, fetch the value from there
cacheElement=elementCache.get(ID,None)
if cacheElement:
value=cacheElement.getCachedPropertyValueEx(ID,ignoreDefault)
else:
# The value is cached nowhere, so ask the UIAElement for its current value for the property
value=self.UIAElement.getCurrentPropertyValueEx(ID,ignoreDefault)
return value
def _prefetchUIACacheForPropertyIDs(self,IDs):
"""
Fetch values for all the given UI Automation property IDs in one cache request, making them available for this core cycle.
"""
elementCache=self._coreCycleUIAPropertyCacheElementCache
if elementCache:
# Ignore any IDs we already have cached values or cache UIAElements for
IDs={x for x in IDs if x not in elementCache}
if len(IDs)<2:
# Creating a UIA cache request for 1 or 0 properties is pointless
return
cacheRequest=UIAHandler.handler.clientObject.createCacheRequest()
for ID in IDs:
try:
cacheRequest.addProperty(ID)
except COMError:
log.debug("Couldn't add property ID %d to cache request, most likely unsupported on this version of Windows"%ID)
try:
cacheElement=self.UIAElement.buildUpdatedCache(cacheRequest)
except COMError:
log.debugWarning("IUIAutomationElement.buildUpdatedCache failed given IDs of %s"%IDs)
return
for ID in IDs:
elementCache[ID]=cacheElement
def findOverlayClasses(self,clsList):
UIAControlType=self.UIAElement.cachedControlType
UIAClassName=self.UIAElement.cachedClassName
if UIAClassName=="NetUITWMenuItem" and UIAControlType==UIAHandler.UIA_MenuItemControlTypeId and not self.name and not self.previous:
# Bounces focus from a netUI dead placeholder menu item when no item is selected up to the menu itself.
clsList.append(PlaceholderNetUITWMenuItem)
elif UIAClassName=="WpfTextView":
clsList.append(WpfTextView)
elif UIAClassName=="NetUIDropdownAnchor":
clsList.append(NetUIDropdownAnchor)
elif self.TextInfo==UIATextInfo and (UIAClassName=='_WwG' or self.windowClassName=='_WwG' or self.UIAElement.cachedAutomationID.startswith('UIA_AutomationId_Word_Content')):
from .wordDocument import WordDocument, WordDocumentNode
if self.role==controlTypes.ROLE_DOCUMENT:
clsList.append(WordDocument)
else:
clsList.append(WordDocumentNode)
# #5136: Windows 8.x and Windows 10 uses different window class and other attributes for toast notifications.
elif UIAClassName=="ToastContentHost" and UIAControlType==UIAHandler.UIA_ToolTipControlTypeId: #Windows 8.x
clsList.append(Toast_win8)
elif self.windowClassName=="Windows.UI.Core.CoreWindow" and UIAControlType==UIAHandler.UIA_WindowControlTypeId and "ToastView" in self.UIAElement.cachedAutomationId: # Windows 10
clsList.append(Toast_win10)
elif self.UIAElement.cachedFrameworkID in ("InternetExplorer","MicrosoftEdge"):
from . import edge
if UIAClassName in ("Internet Explorer_Server","WebView") and self.role==controlTypes.ROLE_PANE:
clsList.append(edge.EdgeHTMLRootContainer)
elif (self.UIATextPattern and
# #6998: Edge normally gives its root node a controlType of pane, but ARIA role="document" changes the controlType to document
self.role in (controlTypes.ROLE_PANE,controlTypes.ROLE_DOCUMENT) and
self.parent and (isinstance(self.parent,edge.EdgeHTMLRootContainer) or not isinstance(self.parent,edge.EdgeNode))
):
clsList.append(edge.EdgeHTMLRoot)
elif self.role==controlTypes.ROLE_LIST:
clsList.append(edge.EdgeList)
else:
clsList.append(edge.EdgeNode)
elif self.role==controlTypes.ROLE_DOCUMENT and self.UIAElement.cachedAutomationId=="Microsoft.Windows.PDF.DocumentView":
# PDFs
from . import edge
clsList.append(edge.EdgeHTMLRoot)
if UIAControlType==UIAHandler.UIA_ProgressBarControlTypeId:
clsList.append(ProgressBar)
if UIAClassName=="ControlPanelLink":
clsList.append(ControlPanelLink)
if UIAClassName=="UIColumnHeader":
clsList.append(UIColumnHeader)
elif UIAClassName=="UIItem":
clsList.append(UIItem)
elif UIAClassName=="SensitiveSlider":
clsList.append(SensitiveSlider)
if UIAControlType==UIAHandler.UIA_TreeItemControlTypeId:
clsList.append(TreeviewItem)
if UIAControlType==UIAHandler.UIA_MenuItemControlTypeId:
clsList.append(MenuItem)
# Some combo boxes and looping selectors do not expose value pattern.
elif (UIAControlType==UIAHandler.UIA_ComboBoxControlTypeId
# #5231: Announce values in time pickers by "transforming" them into combo box without value pattern objects.
or (UIAControlType==UIAHandler.UIA_ListControlTypeId and "LoopingSelector" in UIAClassName)):
try:
if not self._getUIACacheablePropertyValue(UIAHandler.UIA_IsValuePatternAvailablePropertyId):
clsList.append(ComboBoxWithoutValuePattern)
except COMError:
pass
elif UIAControlType==UIAHandler.UIA_ListItemControlTypeId:
clsList.append(ListItem)
# #5942: In Windows 10 build 14332 and later, Microsoft rewrote various dialog code including that of User Account Control.
# #8405: there are more dialogs scattered throughout Windows 10 and various apps.
# Dialog detection is a bit easier on build 17682 and later thanks to IsDialog property.
try:
isDialog = self._getUIACacheablePropertyValue(UIAHandler.UIA_IsDialogPropertyId)
except COMError:
# We can fallback to a known set of dialog classes for window elements.
isDialog = (self.UIAIsWindowElement and UIAClassName in UIAHandler.UIADialogClassNames)
if isDialog:
clsList.append(Dialog)
# #6241: Try detecting all possible suggestions containers and search fields scattered throughout Windows 10.
# In Windows 10, allow Start menu search box and Edge's address omnibar to participate in announcing appearance of auto-suggestions.
if self.UIAElement.cachedAutomationID in ("SearchTextBox", "TextBox", "addressEditBox"):
clsList.append(SearchField)
try:
# Nested block here in order to catch value error and variable binding error when attempting to access automation ID for invalid elements.
try:
# #6241: Raw UIA base tree walker is better than simply looking at self.parent when locating suggestion list items.
parentElement=UIAHandler.handler.baseTreeWalker.GetParentElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
# Sometimes, fetching parent (list control) via base tree walker fails, especially when dealing with suggestions in Windows10 Start menu.
# Oddly, we need to take care of context menu for Start search suggestions as well.
if parentElement.cachedAutomationId.lower() in ("suggestionslist", "contextmenu"):
clsList.append(SuggestionListItem)
except COMError:
pass
except ValueError:
pass
# Support Windows Console's UIA interface
if (
self.windowClassName == "ConsoleWindowClass"
and config.conf['UIA']['winConsoleImplementation'] == "UIA"
):
from . import winConsoleUIA
winConsoleUIA.findExtraOverlayClasses(self, clsList)
# Add editableText support if UIA supports a text pattern
if self.TextInfo==UIATextInfo:
clsList.append(EditableTextWithoutAutoSelectDetection)
clsList.append(UIA)
if self.UIAIsWindowElement:
super(UIA,self).findOverlayClasses(clsList)
if self.UIATextPattern:
#Since there is a UIA text pattern, there is no need to use the win32 edit support at all
import NVDAObjects.window.edit
for x in list(clsList):
if issubclass(x,NVDAObjects.window.edit.Edit):
clsList.remove(x)
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
UIAElement=None
windowHandle=kwargs.get('windowHandle')
if isinstance(relation,tuple):
UIAElement=UIAHandler.handler.clientObject.ElementFromPointBuildCache(POINT(relation[0],relation[1]),UIAHandler.handler.baseCacheRequest)
elif relation=="focus":
try:
UIAElement=UIAHandler.handler.clientObject.getFocusedElementBuildCache(UIAHandler.handler.baseCacheRequest)
# This object may be in a different window, so we need to recalculate the window handle.
kwargs['windowHandle']=None
except COMError:
log.debugWarning("getFocusedElement failed", exc_info=True)
else:
UIAElement=UIAHandler.handler.clientObject.ElementFromHandleBuildCache(windowHandle,UIAHandler.handler.baseCacheRequest)
if not UIAElement:
return False
kwargs['UIAElement']=UIAElement
return True
def getNormalizedUIATextRangeFromElement(self,UIAElement):
"""Simply fetches a UIA text range for the given UIAElement, allowing subclasses to process the range first."""
return UIATextRangeFromElement(self.UIATextPattern,UIAElement)
def __init__(self,windowHandle=None,UIAElement=None,initialUIACachedPropertyIDs=None):
"""
An NVDAObject for a UI Automation element.
@param windowHandle: if a UIAElement is not specifically given, then this windowHandle is used to fetch its root UIAElement
@type windowHandle: int
@param UIAElement: the UI Automation element that should be represented by this NVDAObject
The UI Automation element must have been created with a L{UIAHandler.handler.baseCacheRequest}
@type UIAElement: L{UIAHandler.IUIAutomationElement}
@param initialUIACachedPropertyIDs: Extra UI Automation properties the given UIAElement has already had cached with a UIA cache request that inherits from L{UIAHandler.handler.baseCacheRequest}.
Cached values of these properties will be available for the remainder of the current core cycle. After that, new values will be fetched.
@type initialUIACachedPropertyIDs: L{UIAHandler.IUIAutomationCacheRequest}
"""
if not UIAElement:
raise ValueError("needs a UIA element")
self.UIAElement=UIAElement
UIACachedWindowHandle=UIAElement.cachedNativeWindowHandle
self.UIAIsWindowElement=bool(UIACachedWindowHandle)
if not windowHandle:
windowHandle=UIAHandler.handler.getNearestWindowHandle(UIAElement)
if not windowHandle:
raise InvalidNVDAObject("no windowHandle")
super(UIA,self).__init__(windowHandle=windowHandle)
self.initialUIACachedPropertyIDs=initialUIACachedPropertyIDs
if initialUIACachedPropertyIDs:
elementCache=self._coreCycleUIAPropertyCacheElementCache
for ID in initialUIACachedPropertyIDs:
elementCache[ID]=self.UIAElement
def _isEqual(self,other):
if not isinstance(other,UIA):
return False
try:
return UIAHandler.handler.clientObject.CompareElements(self.UIAElement,other.UIAElement)
except:
return False
def _get_shouldAllowUIAFocusEvent(self):
try:
return bool(self._getUIACacheablePropertyValue(UIAHandler.UIA_HasKeyboardFocusPropertyId))
except COMError:
return True
_lastLiveRegionChangeInfo=(None,None) #: Keeps track of the last live region change (text, time)
def _get__shouldAllowUIALiveRegionChangeEvent(self):
"""
This property decides whether a live region change event should be allowed. It compaires live region event with the last one received, only allowing the event if the text (name) is different, or if the time since the last one is at least 0.5 seconds.
"""
oldText,oldTime=self._lastLiveRegionChangeInfo
newText=self.name
newTime=time.time()
self.__class__._lastLiveRegionChangeInfo=(newText,newTime)
if newText==oldText and oldTime is not None and (newTime-oldTime)<0.5:
return False
return True
def _getUIAPattern(self,ID,interface,cache=False):
punk=self.UIAElement.GetCachedPattern(ID) if cache else self.UIAElement.GetCurrentPattern(ID)
if punk:
return punk.QueryInterface(interface)
def _get_UIAInvokePattern(self):
self.UIAInvokePattern=self._getUIAPattern(UIAHandler.UIA_InvokePatternId,UIAHandler.IUIAutomationInvokePattern)
return self.UIAInvokePattern
def _get_UIAGridPattern(self):
self.UIAGridPattern=self._getUIAPattern(UIAHandler.UIA_GridPatternId,UIAHandler.IUIAutomationGridPattern)
return self.UIAGridPattern
def _get_UIATogglePattern(self):
self.UIATogglePattern=self._getUIAPattern(UIAHandler.UIA_TogglePatternId,UIAHandler.IUIAutomationTogglePattern)
return self.UIATogglePattern
def _get_UIASelectionItemPattern(self):
self.UIASelectionItemPattern=self._getUIAPattern(UIAHandler.UIA_SelectionItemPatternId,UIAHandler.IUIAutomationSelectionItemPattern)
return self.UIASelectionItemPattern
def _get_UIATextPattern(self):
self.UIATextPattern=self._getUIAPattern(UIAHandler.UIA_TextPatternId,UIAHandler.IUIAutomationTextPattern,cache=True)
return self.UIATextPattern
def _get_UIATextEditPattern(self):
if not isinstance(UIAHandler.handler.clientObject,UIAHandler.IUIAutomation3):
return None
self.UIATextEditPattern=self._getUIAPattern(UIAHandler.UIA_TextEditPatternId,UIAHandler.IUIAutomationTextEditPattern,cache=False)
return self.UIATextEditPattern
def _get_UIALegacyIAccessiblePattern(self):
self.UIALegacyIAccessiblePattern=self._getUIAPattern(UIAHandler.UIA_LegacyIAccessiblePatternId,UIAHandler.IUIAutomationLegacyIAccessiblePattern)
return self.UIALegacyIAccessiblePattern
_TextInfo=UIATextInfo
def _get_TextInfo(self):
if self.UIATextPattern: return self._TextInfo
textInfo=super(UIA,self).TextInfo
if textInfo is NVDAObjectTextInfo and self.UIAIsWindowElement and self.role==controlTypes.ROLE_WINDOW:
import displayModel
return displayModel.DisplayModelTextInfo
return textInfo
def setFocus(self):
self.UIAElement.setFocus()
def _get_devInfo(self):
info=super(UIA,self).devInfo
info.append("UIAElement: %r"%self.UIAElement)
try:
ret=self.UIAElement.currentAutomationID
except Exception as e:
ret="Exception: %s"%e
info.append("UIA automationID: %s"%ret)
try:
ret=self.UIAElement.cachedFrameworkID
except Exception as e:
ret="Exception: %s"%e
info.append("UIA frameworkID: %s"%ret)
try:
ret=str(self.UIAElement.getRuntimeID())
except Exception as e:
ret="Exception: %s"%e
info.append("UIA runtimeID: %s"%ret)
try:
ret=self.UIAElement.cachedProviderDescription
except Exception as e:
ret="Exception: %s"%e
info.append("UIA providerDescription: %s"%ret)
try:
ret=self.UIAElement.currentClassName
except Exception as e:
ret="Exception: %s"%e
info.append("UIA className: %s"%ret)
patternsAvailable = []
patternAvailableConsts = dict(
(const, name) for name, const in UIAHandler.__dict__.iteritems()
if name.startswith("UIA_Is") and name.endswith("PatternAvailablePropertyId")
)
self._prefetchUIACacheForPropertyIDs(list(patternAvailableConsts))
for const, name in patternAvailableConsts.iteritems():
try:
res = self._getUIACacheablePropertyValue(const)
except COMError:
res = False
if res:
# Every name has the same format, so the string indexes can be safely hardcoded here.
patternsAvailable.append(name[6:-19])
info.append("UIA patterns available: %s"%", ".join(patternsAvailable))
return info
def _get_name(self):
try:
return self._getUIACacheablePropertyValue(UIAHandler.UIA_NamePropertyId)
except COMError:
return ""
def _get_role(self):
role=UIAHandler.UIAControlTypesToNVDARoles.get(self.UIAElement.cachedControlType,controlTypes.ROLE_UNKNOWN)
if role==controlTypes.ROLE_BUTTON:
try:
s=self._getUIACacheablePropertyValue(UIAHandler.UIA_ToggleToggleStatePropertyId,True)
except COMError:
s=UIAHandler.handler.reservedNotSupportedValue
if s!=UIAHandler.handler.reservedNotSupportedValue:
role=controlTypes.ROLE_TOGGLEBUTTON
elif role in (controlTypes.ROLE_UNKNOWN,controlTypes.ROLE_PANE,controlTypes.ROLE_WINDOW) and self.windowHandle:
superRole=super(UIA,self).role
if superRole!=controlTypes.ROLE_WINDOW:
role=superRole
return role
def _get_description(self):
try:
return self._getUIACacheablePropertyValue(UIAHandler.UIA_HelpTextPropertyId) or ""
except COMError:
return ""
def _get_keyboardShortcut(self):
# Build the keyboard shortcuts list early for readability.
shortcuts = []
accessKey = self._getUIACacheablePropertyValue(UIAHandler.UIA_AccessKeyPropertyId)
# #6779: Don't add access key to the shortcut list if UIA says access key is None, resolves concatenation error in focus events, object navigation and so on.
# In rare cases, access key itself is None.
if accessKey:
shortcuts.append(accessKey)
acceleratorKey = self._getUIACacheablePropertyValue(UIAHandler.UIA_AcceleratorKeyPropertyId)
# Same case as access key.
if acceleratorKey:
shortcuts.append(acceleratorKey)
# #6790: Do not add two spaces unless both access key and accelerator are present in order to not waste string real estate.
return " ".join(shortcuts) if shortcuts else ""
_UIAStatesPropertyIDs={
UIAHandler.UIA_HasKeyboardFocusPropertyId,
UIAHandler.UIA_SelectionItemIsSelectedPropertyId,
UIAHandler.UIA_IsDataValidForFormPropertyId,
UIAHandler.UIA_IsRequiredForFormPropertyId,
UIAHandler.UIA_ValueIsReadOnlyPropertyId,
UIAHandler.UIA_ExpandCollapseExpandCollapseStatePropertyId,
UIAHandler.UIA_ToggleToggleStatePropertyId,
UIAHandler.UIA_IsKeyboardFocusablePropertyId,
UIAHandler.UIA_IsPasswordPropertyId,
UIAHandler.UIA_IsSelectionItemPatternAvailablePropertyId,
UIAHandler.UIA_IsEnabledPropertyId,
UIAHandler.UIA_IsOffscreenPropertyId,
} if UIAHandler.isUIAAvailable else set()
def _get_states(self):
states=set()
self._prefetchUIACacheForPropertyIDs(self._UIAStatesPropertyIDs)
try:
hasKeyboardFocus=self._getUIACacheablePropertyValue(UIAHandler.UIA_HasKeyboardFocusPropertyId)
except COMError:
hasKeyboardFocus=False
if hasKeyboardFocus:
states.add(controlTypes.STATE_FOCUSED)
if self._getUIACacheablePropertyValue(UIAHandler.UIA_IsKeyboardFocusablePropertyId):
states.add(controlTypes.STATE_FOCUSABLE)
if self._getUIACacheablePropertyValue(UIAHandler.UIA_IsPasswordPropertyId):
states.add(controlTypes.STATE_PROTECTED)
# Don't fetch the role unless we must, but never fetch it more than once.
role=None
if self._getUIACacheablePropertyValue(UIAHandler.UIA_IsSelectionItemPatternAvailablePropertyId):
role=self.role
states.add(controlTypes.STATE_CHECKABLE if role==controlTypes.ROLE_RADIOBUTTON else controlTypes.STATE_SELECTABLE)
if self._getUIACacheablePropertyValue(UIAHandler.UIA_SelectionItemIsSelectedPropertyId):
states.add(controlTypes.STATE_CHECKED if role==controlTypes.ROLE_RADIOBUTTON else controlTypes.STATE_SELECTED)
if not self._getUIACacheablePropertyValue(UIAHandler.UIA_IsEnabledPropertyId,True):
states.add(controlTypes.STATE_UNAVAILABLE)
try:
isOffScreen = self._getUIACacheablePropertyValue(UIAHandler.UIA_IsOffscreenPropertyId)
except COMError:
isOffScreen = False
if isOffScreen:
states.add(controlTypes.STATE_OFFSCREEN)
try:
isDataValid=self._getUIACacheablePropertyValue(UIAHandler.UIA_IsDataValidForFormPropertyId,True)
except COMError:
isDataValid=UIAHandler.handler.reservedNotSupportedValue
if not isDataValid:
states.add(controlTypes.STATE_INVALID_ENTRY)
if self._getUIACacheablePropertyValue(UIAHandler.UIA_IsRequiredForFormPropertyId):
states.add(controlTypes.STATE_REQUIRED)
try:
isReadOnly=self._getUIACacheablePropertyValue(UIAHandler.UIA_ValueIsReadOnlyPropertyId,True)
except COMError:
isReadOnly=UIAHandler.handler.reservedNotSupportedValue
if isReadOnly and isReadOnly!=UIAHandler.handler.reservedNotSupportedValue:
states.add(controlTypes.STATE_READONLY)
try:
s=self._getUIACacheablePropertyValue(UIAHandler.UIA_ExpandCollapseExpandCollapseStatePropertyId,True)
except COMError:
s=UIAHandler.handler.reservedNotSupportedValue
if s!=UIAHandler.handler.reservedNotSupportedValue:
if s==UIAHandler.ExpandCollapseState_Collapsed:
states.add(controlTypes.STATE_COLLAPSED)
elif s==UIAHandler.ExpandCollapseState_Expanded:
states.add(controlTypes.STATE_EXPANDED)
try:
s=self._getUIACacheablePropertyValue(UIAHandler.UIA_ToggleToggleStatePropertyId,True)
except COMError:
s=UIAHandler.handler.reservedNotSupportedValue
if s!=UIAHandler.handler.reservedNotSupportedValue:
if not role:
role=self.role
if role==controlTypes.ROLE_TOGGLEBUTTON:
if s==UIAHandler.ToggleState_On:
states.add(controlTypes.STATE_PRESSED)
else:
states.add(controlTypes.STATE_CHECKABLE)
if s==UIAHandler.ToggleState_On:
states.add(controlTypes.STATE_CHECKED)
return states
def _get_presentationType(self):
presentationType=super(UIA,self).presentationType
# UIA NVDAObjects can only be considered content if UI Automation considers them both a control and content.
if presentationType==self.presType_content and not (self.UIAElement.cachedIsContentElement and self.UIAElement.cachedIsControlElement):
presentationType=self.presType_layout
return presentationType
def correctAPIForRelation(self, obj, relation=None):
if obj and self.windowHandle != obj.windowHandle and not obj.UIAElement.cachedNativeWindowHandle:
# The target element is not the root element for the window, so don't change API class; i.e. always use UIA.
return obj
return super(UIA, self).correctAPIForRelation(obj, relation)
def _get_parent(self):
try:
parentElement=UIAHandler.handler.baseTreeWalker.GetParentElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
parentElement=None
if not parentElement:
return super(UIA,self).parent
if not parentElement.CachedNativeWindowHandle and not self.UIAElement.CachedNativeWindowHandle:
# Neither self or parent have a window handle themselves, so their nearest window handle will be the same.
# Cache this on the parent if cached on self, to avoid fetching it later.
try:
parentElement._nearestWindowHandle=self.UIAElement._nearestWindowHandle
except AttributeError:
# _nearestWindowHandle may not exist on self if self was instantiated given a windowHandle.
pass
return self.correctAPIForRelation(UIA(UIAElement=parentElement),relation="parent")
def _get_previous(self):
try:
previousElement=UIAHandler.handler.baseTreeWalker.GetPreviousSiblingElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not previousElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=previousElement))
def _get_next(self):
try:
nextElement=UIAHandler.handler.baseTreeWalker.GetNextSiblingElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not nextElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=nextElement))
def _get_firstChild(self):
try:
firstChildElement=UIAHandler.handler.baseTreeWalker.GetFirstChildElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not firstChildElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=firstChildElement))
def _get_lastChild(self):
try:
lastChildElement=UIAHandler.handler.baseTreeWalker.GetLastChildElementBuildCache(self.UIAElement,UIAHandler.handler.baseCacheRequest)
except COMError:
log.debugWarning("Tree walker failed", exc_info=True)
return None
if not lastChildElement:
return None
return self.correctAPIForRelation(UIA(UIAElement=lastChildElement))
def _get_children(self):
childrenCacheRequest=UIAHandler.handler.baseCacheRequest.clone()
childrenCacheRequest.TreeScope=UIAHandler.TreeScope_Children
try:
cachedChildren=self.UIAElement.buildUpdatedCache(childrenCacheRequest).getCachedChildren()
except COMError as e:
log.debugWarning("Could not fetch cached children from UIA element: %s"%e)
return super(UIA,self).children
children=[]
if not cachedChildren:
# GetCachedChildren returns null if there are no children.
return children
for index in xrange(cachedChildren.length):
e=cachedChildren.getElement(index)
windowHandle=self.windowHandle
children.append(self.correctAPIForRelation(UIA(windowHandle=windowHandle,UIAElement=e)))
return children
def _get_rowNumber(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridItemRowPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val+1
raise NotImplementedError
def _get_rowSpan(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridItemRowSpanPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
return 1
def _get_rowHeaderText(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_TableItemRowHeaderItemsPropertyId ,True)
if val==UIAHandler.handler.reservedNotSupportedValue:
raise NotImplementedError
val=val.QueryInterface(UIAHandler.IUIAutomationElementArray)
textList=[]
for i in xrange(val.length):
e=val.getElement(i)
if UIAHandler.handler.clientObject.compareElements(e,self.UIAElement):
continue
obj=UIA(windowHandle=self.windowHandle,UIAElement=e.buildUpdatedCache(UIAHandler.handler.baseCacheRequest))
if not obj: continue
text=obj.makeTextInfo(textInfos.POSITION_ALL).text
textList.append(text)
return " ".join(textList)
def _get_columnNumber(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridItemColumnPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val+1
raise NotImplementedError
def _get_columnSpan(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridItemColumnSpanPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
return 1
def _get_columnHeaderText(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_TableItemColumnHeaderItemsPropertyId ,True)
if val==UIAHandler.handler.reservedNotSupportedValue:
raise NotImplementedError
val=val.QueryInterface(UIAHandler.IUIAutomationElementArray)
textList=[]
for i in xrange(val.length):
e=val.getElement(i)
if UIAHandler.handler.clientObject.compareElements(e,self.UIAElement):
continue
obj=UIA(windowHandle=self.windowHandle,UIAElement=e.buildUpdatedCache(UIAHandler.handler.baseCacheRequest))
if not obj: continue
text=obj.makeTextInfo(textInfos.POSITION_ALL).text
textList.append(text)
return " ".join(textList)
def _get_rowCount(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridRowCountPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
raise NotImplementedError
def _get_columnCount(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridColumnCountPropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
raise NotImplementedError
def _get_table(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_GridItemContainingGridPropertyId ,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
e=val.QueryInterface(UIAHandler.IUIAutomationElement).buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
return UIA(UIAElement=e)
raise NotImplementedError
def _get_processID(self):
return self.UIAElement.cachedProcessId
def _get_location(self):
try:
r=self._getUIACacheablePropertyValue(UIAHandler.UIA_BoundingRectanglePropertyId)
except COMError:
return None
if r is None:
return
# r is a tuple of floats representing left, top, width and height.
return RectLTWH.fromFloatCollection(*r)
def _get_value(self):
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_RangeValueValuePropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
minVal=self._getUIACacheablePropertyValue(UIAHandler.UIA_RangeValueMinimumPropertyId,False)
maxVal=self._getUIACacheablePropertyValue(UIAHandler.UIA_RangeValueMaximumPropertyId,False)
if minVal==maxVal:
# There is no range.
return "0"
val=((val-minVal)/(maxVal-minVal))*100.0
return "%d"%round(val,4)
val=self._getUIACacheablePropertyValue(UIAHandler.UIA_ValueValuePropertyId,True)
if val!=UIAHandler.handler.reservedNotSupportedValue:
return val
def _get_actionCount(self):
if self.UIAInvokePattern:
return 1
return 0
def getActionName(self,index=None):
if not index:
index=self.defaultActionIndex
if index==0 and self.UIAInvokePattern:
return _("invoke")
raise NotImplementedError
def doAction(self,index=None):
if not index:
index=self.defaultActionIndex
if index==0:
if self.UIAInvokePattern:
self.UIAInvokePattern.Invoke()
elif self.UIATogglePattern:
self.UIATogglePattern.toggle()
elif self.UIASelectionItemPattern:
self.UIASelectionItemPattern.select()
return
raise NotImplementedError
def _get_hasFocus(self):
try:
return self._getUIACacheablePropertyValue(UIAHandler.UIA_HasKeyboardFocusPropertyId)
except COMError:
return False
def _get_hasIrrelevantLocation(self):
try:
isOffScreen = self._getUIACacheablePropertyValue(UIAHandler.UIA_IsOffscreenPropertyId)
except COMError:
isOffScreen = False
return isOffScreen or not self.location or not any(self.location)
def _get_positionInfo(self):
info=super(UIA,self).positionInfo or {}
itemIndex=0
try:
itemIndex=self._getUIACacheablePropertyValue(UIAHandler.UIA_PositionInSetPropertyId)
except COMError:
pass
if itemIndex>0:
info['indexInGroup']=itemIndex
itemCount=0
try:
itemCount=self._getUIACacheablePropertyValue(UIAHandler.UIA_SizeOfSetPropertyId)
except COMError:
pass
if itemCount>0:
info['similarItemsInGroup']=itemCount
try:
level=self._getUIACacheablePropertyValue(UIAHandler.UIA_LevelPropertyId)
except COMError:
level=None
if level is not None and level>0:
info["level"]=level
return info
def scrollIntoView(self):
pass
def _get_controllerFor(self):
e=self._getUIACacheablePropertyValue(UIAHandler.UIA_ControllerForPropertyId)
if UIAHandler.handler.clientObject.checkNotSupported(e):
return None
a=e.QueryInterface(UIAHandler.IUIAutomationElementArray)
objList=[]
for index in xrange(a.length):
e=a.getElement(index)
e=e.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)
obj=UIA(UIAElement=e)
if obj:
objList.append(obj)
return objList
def event_UIA_elementSelected(self):
self.event_stateChange()
def event_valueChange(self):
if isinstance(self, EditableTextWithoutAutoSelectDetection):
return
return super(UIA, self).event_valueChange()
def event_UIA_systemAlert(self):
"""
A base implementation for UI Automation's system Alert event.
This just reports the element that received the alert in speech and braille, similar to how focus is presented.
Skype for business toast notifications being one example.
"""
speech.speakObject(self, reason=controlTypes.REASON_FOCUS)
# Ideally, we wouldn't use getBrailleTextForProperties directly.
braille.handler.message(braille.getBrailleTextForProperties(name=self.name, role=self.role))
def event_UIA_notification(self, notificationKind=None, notificationProcessing=UIAHandler.NotificationProcessing_CurrentThenMostRecent, displayString=None, activityId=None):
"""
Introduced in Windows 10 Fall Creators Update (build 16299).
This base implementation announces all notifications from the UIA element.
Unlike other events, the text to be announced is not the name of the object, and parameters control how the incoming notification should be processed.
Subclasses can override this event and can react to notification processing instructions.
"""
# Do not announce notifications from background apps.
if self.appModule != api.getFocusObject().appModule:
return
if displayString:
if notificationProcessing in (UIAHandler.NotificationProcessing_ImportantMostRecent, UIAHandler.NotificationProcessing_MostRecent):
# These notifications superseed earlier notifications.
# Note that no distinction is made between important and non-important.
speech.cancelSpeech()
ui.message(displayString)
class TreeviewItem(UIA):
def _get_value(self):
return ""
def _get__level(self):
level=0
obj=self
while obj:
level+=1
parent=obj.parent=obj.parent
if not parent or parent==obj or parent.role!=controlTypes.ROLE_TREEVIEWITEM:
return level
obj=parent
return level
def _get_positionInfo(self):
info=super(TreeviewItem,self).positionInfo or {}
info['level']=self._level
return info
class MenuItem(UIA):
def _get_description(self):
name=self.name
description=super(MenuItem,self)._get_description()
if description!=name:
return description
else:
return None
class UIColumnHeader(UIA):
def _get_description(self):
description=super(UIColumnHeader,self).description
try:
itemStatus=self._getUIACacheablePropertyValue(UIAHandler.UIA_ItemStatusPropertyId)
except COMError:
itemStatus=""
return " ".join([x for x in (description,itemStatus) if x and not x.isspace()])
class UIItem(UIA):
"""UIA list items in an Items View repeate the name as the value"""
def _get_positionInfo(self):
info={}
itemIndex=0
try:
itemIndex=self._getUIACacheablePropertyValue(UIAHandler.handler.ItemIndex_PropertyId)
except COMError:
pass
if itemIndex>0:
info['indexInGroup']=itemIndex
try:
e=self._getUIACacheablePropertyValue(UIAHandler.UIA_SelectionItemSelectionContainerPropertyId)
if e: e=e.QueryInterface(UIAHandler.IUIAutomationElement)
except COMError:
e=None
if e:
try:
itemCount=e.getCurrentPropertyValue(UIAHandler.handler.ItemCount_PropertyId)
except COMError:
itemCount=0
if itemCount>0:
info['similarItemsInGroup']=itemCount
return info
def _get_value(self):
return ""
class SensitiveSlider(UIA):
"""A slider that tends to give focus to its thumb control"""
def event_focusEntered(self):
self.reportFocus()
def event_valueChange(self):
focusParent=api.getFocusObject().parent
if self==focusParent:
speech.speakObjectProperties(self,value=True,reason=controlTypes.REASON_CHANGE)
else:
super(SensitiveSlider,self).event_valueChange()
class ControlPanelLink(UIA):
def _get_description(self):
desc=super(ControlPanelLink,self).description
try:
i=desc.find('\n')
except:
i=None
if i:
desc=desc[i+1:]
return desc
class ComboBoxWithoutValuePattern(UIA):
"""A combo box without the Value pattern.
UIA combo boxes don't necessarily support the Value pattern unless they take arbitrary text values.
However, NVDA expects combo boxes to have a value and to fire valueChange events.
The value is obtained by retrieving the selected item's name.
The valueChange event is fired on this object by L{ListItem.event_stateChange}.
"""
def _get_UIASelectionPattern(self):
punk = self.UIAElement.GetCurrentPattern(UIAHandler.UIA_SelectionPatternId)
if punk:
self.UIASelectionPattern = punk.QueryInterface(UIAHandler.IUIAutomationSelectionPattern)
else:
self.UIASelectionPattern = None
return self.UIASelectionPattern
def _get_value(self):
try:
return self.UIASelectionPattern.GetCurrentSelection().GetElement(0).CurrentName
except (COMError, AttributeError):
return None
class ListItem(UIA):
def event_stateChange(self):
if not self.hasFocus:
parent = self.parent
focus=api.getFocusObject()
if parent and parent==focus and (isinstance(parent, ComboBoxWithoutValuePattern)
or (parent._getUIACacheablePropertyValue(UIAHandler.UIA_IsValuePatternAvailablePropertyId) and parent.windowClassName.startswith("Windows.UI.Core"))):
# #6337: This is an item in a combo box without the Value pattern or does not raise value change event.
# This item has been selected, so notify the combo box that its value has changed.
focus.event_valueChange()
super(ListItem, self).event_stateChange()
class Dialog(Dialog):
role=controlTypes.ROLE_DIALOG
class Toast_win8(Notification, UIA):
event_UIA_toolTipOpened=Notification.event_alert
class Toast_win10(Notification, UIA):
# #6096: Windows 10 build 14366 and later does not fire tooltip event when toasts appear.
if sys.getwindowsversion().build > 10586:
event_UIA_window_windowOpen=Notification.event_alert
else:
event_UIA_toolTipOpened=Notification.event_alert
# #7128: in Creators Update (build 15063 and later), due to possible UIA Core problem, toasts are announced repeatedly if UWP apps were used for a while.
# Therefore, have a private toast message consultant (toast timestamp and UIA element runtime ID) handy.
_lastToastTimestamp = None
_lastToastRuntimeID = None
def event_UIA_window_windowOpen(self):
if sys.getwindowsversion().build >= 15063:
toastTimestamp = time.time()
toastRuntimeID = self.UIAElement.getRuntimeID()
if toastRuntimeID == self._lastToastRuntimeID and toastTimestamp-self._lastToastTimestamp < 1.0:
return
self.__class__._lastToastTimestamp = toastTimestamp
self.__class__._lastToastRuntimeID = toastRuntimeID
Notification.event_alert(self)
#WpfTextView fires name state changes once a second, plus when IUIAutomationTextRange::GetAttributeValue is called.
#This causes major lags when using this control with Braille in NVDA. (#2759)
#For now just ignore the events.
class WpfTextView(UIA):
def event_nameChange(self):
return
def event_stateChange(self):
return
class SearchField(EditableTextWithSuggestions, UIA):
"""An edit field that presents suggestions based on a search term.
"""
def event_UIA_controllerFor(self):
# Only useful if suggestions appear and disappear.
if self == api.getFocusObject() and len(self.controllerFor)>0:
self.event_suggestionsOpened()
else:
self.event_suggestionsClosed()
class SuggestionListItem(UIA):
"""Recent Windows releases use suggestions lists for various things, including Start menu suggestions, Store, Settings app and so on.
"""
role=controlTypes.ROLE_LISTITEM
def event_UIA_elementSelected(self):
focusControllerFor=api.getFocusObject().controllerFor
if len(focusControllerFor)>0 and focusControllerFor[0].appModule is self.appModule and self.name:
speech.cancelSpeech()
api.setNavigatorObject(self, isFocus=True)
self.reportFocus()
# Display results as flash messages.
braille.handler.message(braille.getBrailleTextForProperties(name=self.name, role=self.role, positionInfo=self.positionInfo))
# NetUIDropdownAnchor comboBoxes (such as in the MS Office Options dialog)
class NetUIDropdownAnchor(UIA):
def _get_name(self):
name=super(NetUIDropdownAnchor,self).name
# In MS Office 2010, these combo boxes had no name.
# However, the name can be found as the direct previous sibling label element.
if not name and self.previous and self.previous.role==controlTypes.ROLE_STATICTEXT:
name=self.previous.name
return name
class PlaceholderNetUITWMenuItem(UIA):
""" Bounces focus from a netUI dead placeholder menu item when no item is selected up to the menu itself."""
shouldAllowUIAFocusEvent=True
def _get_focusRedirect(self):
# Locate the containing menu and focus that instead.
parent=self.parent
for count in xrange(4):
if not parent:
return
if parent.role==controlTypes.ROLE_POPUPMENU:
return parent
parent=parent.parent
| 1 | 25,717 | This call should use element, not 0 I think. | nvaccess-nvda | py |
@@ -223,6 +223,8 @@ type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
Enc encoder.MultiEncoder `config:"encoding"`
+ FetchUntilEmptyPage bool `config:"fetch_until_empty_page"`
+ PartialPageFetchThreshold int `config:"partial_page_fetch_threshold"`
}
// Fs represents a remote swift server | 1 | // Package swift provides an interface to the Swift object storage system
package swift
import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/ncw/swift/v2"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/bucket"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/pacer"
"github.com/rclone/rclone/lib/readers"
)
// Constants
const (
directoryMarkerContentType = "application/directory" // content type of directory marker objects
listChunks = 1000 // chunk size to read directory listings
defaultChunkSize = 5 * fs.Gibi
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// SharedOptions are shared between swift and hubic
var SharedOptions = []fs.Option{{
Name: "chunk_size",
Help: `Above this size files will be chunked into a _segments container.
Above this size files will be chunked into a _segments container. The
default for this is 5 GiB which is its maximum value.`,
Default: defaultChunkSize,
Advanced: true,
}, {
Name: "no_chunk",
Help: `Don't chunk files during streaming upload.
When doing streaming uploads (e.g. using rcat or mount) setting this
flag will cause the swift backend to not upload chunked files.
This will limit the maximum upload size to 5 GiB. However non chunked
files are easier to deal with and have an MD5SUM.
Rclone will still chunk files bigger than chunk_size when doing normal
copy operations.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
Advanced: true,
Default: (encoder.EncodeInvalidUtf8 |
encoder.EncodeSlash),
}}
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "swift",
Description: "OpenStack Swift (Rackspace Cloud Files, Memset Memstore, OVH)",
NewFs: NewFs,
Options: append([]fs.Option{{
Name: "env_auth",
Help: "Get swift credentials from environment variables in standard OpenStack form.",
Default: false,
Examples: []fs.OptionExample{
{
Value: "false",
Help: "Enter swift credentials in the next step.",
}, {
Value: "true",
Help: "Get swift credentials from environment vars.\nLeave other fields blank if using this.",
},
},
}, {
Name: "user",
Help: "User name to log in (OS_USERNAME).",
}, {
Name: "key",
Help: "API key or password (OS_PASSWORD).",
}, {
Name: "auth",
Help: "Authentication URL for server (OS_AUTH_URL).",
Examples: []fs.OptionExample{{
Value: "https://auth.api.rackspacecloud.com/v1.0",
Help: "Rackspace US",
}, {
Value: "https://lon.auth.api.rackspacecloud.com/v1.0",
Help: "Rackspace UK",
}, {
Value: "https://identity.api.rackspacecloud.com/v2.0",
Help: "Rackspace v2",
}, {
Value: "https://auth.storage.memset.com/v1.0",
Help: "Memset Memstore UK",
}, {
Value: "https://auth.storage.memset.com/v2.0",
Help: "Memset Memstore UK v2",
}, {
Value: "https://auth.cloud.ovh.net/v3",
Help: "OVH",
}},
}, {
Name: "user_id",
Help: "User ID to log in - optional - most swift systems use user and leave this blank (v3 auth) (OS_USER_ID).",
}, {
Name: "domain",
Help: "User domain - optional (v3 auth) (OS_USER_DOMAIN_NAME)",
}, {
Name: "tenant",
Help: "Tenant name - optional for v1 auth, this or tenant_id required otherwise (OS_TENANT_NAME or OS_PROJECT_NAME).",
}, {
Name: "tenant_id",
Help: "Tenant ID - optional for v1 auth, this or tenant required otherwise (OS_TENANT_ID).",
}, {
Name: "tenant_domain",
Help: "Tenant domain - optional (v3 auth) (OS_PROJECT_DOMAIN_NAME).",
}, {
Name: "region",
Help: "Region name - optional (OS_REGION_NAME).",
}, {
Name: "storage_url",
Help: "Storage URL - optional (OS_STORAGE_URL).",
}, {
Name: "auth_token",
Help: "Auth Token from alternate authentication - optional (OS_AUTH_TOKEN).",
}, {
Name: "application_credential_id",
Help: "Application Credential ID (OS_APPLICATION_CREDENTIAL_ID).",
}, {
Name: "application_credential_name",
Help: "Application Credential Name (OS_APPLICATION_CREDENTIAL_NAME).",
}, {
Name: "application_credential_secret",
Help: "Application Credential Secret (OS_APPLICATION_CREDENTIAL_SECRET).",
}, {
Name: "auth_version",
Help: "AuthVersion - optional - set to (1,2,3) if your auth URL has no version (ST_AUTH_VERSION).",
Default: 0,
}, {
Name: "endpoint_type",
Help: "Endpoint type to choose from the service catalogue (OS_ENDPOINT_TYPE).",
Default: "public",
Examples: []fs.OptionExample{{
Value: "public",
Help: "Public (default, choose this if not sure)",
}, {
Value: "internal",
Help: "Internal (use internal service net)",
}, {
Value: "admin",
Help: "Admin",
}},
}, {
Name: "leave_parts_on_error",
Help: `If true avoid calling abort upload on a failure.
It should be set to true for resuming uploads across different sessions.`,
Default: false,
Advanced: true,
}, {
Name: "storage_policy",
Help: `The storage policy to use when creating a new container.
This applies the specified storage policy when creating a new
container. The policy cannot be changed afterwards. The allowed
configuration values and their meaning depend on your Swift storage
provider.`,
Default: "",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "pcs",
Help: "OVH Public Cloud Storage",
}, {
Value: "pca",
Help: "OVH Public Cloud Archive",
}},
}}, SharedOptions...),
})
}
// Options defines the configuration for this backend
type Options struct {
EnvAuth bool `config:"env_auth"`
User string `config:"user"`
Key string `config:"key"`
Auth string `config:"auth"`
UserID string `config:"user_id"`
Domain string `config:"domain"`
Tenant string `config:"tenant"`
TenantID string `config:"tenant_id"`
TenantDomain string `config:"tenant_domain"`
Region string `config:"region"`
StorageURL string `config:"storage_url"`
AuthToken string `config:"auth_token"`
AuthVersion int `config:"auth_version"`
ApplicationCredentialID string `config:"application_credential_id"`
ApplicationCredentialName string `config:"application_credential_name"`
ApplicationCredentialSecret string `config:"application_credential_secret"`
LeavePartsOnError bool `config:"leave_parts_on_error"`
StoragePolicy string `config:"storage_policy"`
EndpointType string `config:"endpoint_type"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
NoChunk bool `config:"no_chunk"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote swift server
type Fs struct {
name string // name of this remote
root string // the path we are working on if any
features *fs.Features // optional features
opt Options // options for this backend
ci *fs.ConfigInfo // global config
c *swift.Connection // the connection to the swift server
rootContainer string // container part of root (if any)
rootDirectory string // directory part of root (if any)
cache *bucket.Cache // cache of container status
noCheckContainer bool // don't check the container before creating it
pacer *fs.Pacer // To pace the API calls
}
// Object describes a swift object
//
// Will definitely have info but maybe not meta
type Object struct {
fs *Fs // what this object is part of
remote string // The remote path
size int64
lastModified time.Time
contentType string
md5 string
headers swift.Headers // The object headers if known
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
return f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.rootContainer == "" {
return fmt.Sprintf("Swift root")
}
if f.rootDirectory == "" {
return fmt.Sprintf("Swift container %s", f.rootContainer)
}
return fmt.Sprintf("Swift container %s path %s", f.rootContainer, f.rootDirectory)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
401, // Unauthorized (e.g. "Token has expired")
408, // Request Timeout
409, // Conflict - various states that could be resolved on a retry
429, // Rate exceeded.
500, // Get occasional 500 Internal Server Error
503, // Service Unavailable/Slow Down - "Reduce your request rate"
504, // Gateway Time-out
}
// shouldRetry returns a boolean as to whether this err deserves to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
// If this is a swift.Error object extract the HTTP error code
if swiftError, ok := err.(*swift.Error); ok {
for _, e := range retryErrorCodes {
if swiftError.StatusCode == e {
return true, err
}
}
}
// Check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// shouldRetryHeaders returns a boolean as to whether this err
// deserves to be retried. It reads the headers passed in looking for
// `Retry-After`. It returns the err as a convenience
func shouldRetryHeaders(ctx context.Context, headers swift.Headers, err error) (bool, error) {
if swiftError, ok := err.(*swift.Error); ok && swiftError.StatusCode == 429 {
if value := headers["Retry-After"]; value != "" {
retryAfter, parseErr := strconv.Atoi(value)
if parseErr != nil {
fs.Errorf(nil, "Failed to parse Retry-After: %q: %v", value, parseErr)
} else {
duration := time.Second * time.Duration(retryAfter)
if duration <= 60*time.Second {
// Do a short sleep immediately
fs.Debugf(nil, "Sleeping for %v to obey Retry-After", duration)
time.Sleep(duration)
return true, err
}
// Delay a long sleep for a retry
return false, fserrors.NewErrorRetryAfter(duration)
}
}
}
return shouldRetry(ctx, err)
}
// parsePath parses a remote 'url'
func parsePath(path string) (root string) {
root = strings.Trim(path, "/")
return
}
// split returns container and containerPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (container, containerPath string) {
container, containerPath = bucket.Split(path.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(container), f.opt.Enc.FromStandardPath(containerPath)
}
// split returns container and containerPath from the object
func (o *Object) split() (container, containerPath string) {
return o.fs.split(o.remote)
}
// swiftConnection makes a connection to swift
func swiftConnection(ctx context.Context, opt *Options, name string) (*swift.Connection, error) {
ci := fs.GetConfig(ctx)
c := &swift.Connection{
// Keep these in the same order as the Config for ease of checking
UserName: opt.User,
ApiKey: opt.Key,
AuthUrl: opt.Auth,
UserId: opt.UserID,
Domain: opt.Domain,
Tenant: opt.Tenant,
TenantId: opt.TenantID,
TenantDomain: opt.TenantDomain,
Region: opt.Region,
StorageUrl: opt.StorageURL,
AuthToken: opt.AuthToken,
AuthVersion: opt.AuthVersion,
ApplicationCredentialId: opt.ApplicationCredentialID,
ApplicationCredentialName: opt.ApplicationCredentialName,
ApplicationCredentialSecret: opt.ApplicationCredentialSecret,
EndpointType: swift.EndpointType(opt.EndpointType),
ConnectTimeout: 10 * ci.ConnectTimeout, // Use the timeouts in the transport
Timeout: 10 * ci.Timeout, // Use the timeouts in the transport
Transport: fshttp.NewTransport(ctx),
}
if opt.EnvAuth {
err := c.ApplyEnvironment()
if err != nil {
return nil, fmt.Errorf("failed to read environment variables: %w", err)
}
}
StorageUrl, AuthToken := c.StorageUrl, c.AuthToken // nolint
if !c.Authenticated() {
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret == "" {
if c.UserName == "" && c.UserId == "" {
return nil, errors.New("user name or user id not found for authentication (and no storage_url+auth_token is provided)")
}
if c.ApiKey == "" {
return nil, errors.New("key not found")
}
}
if c.AuthUrl == "" {
return nil, errors.New("auth not found")
}
err := c.Authenticate(ctx) // fills in c.StorageUrl and c.AuthToken
if err != nil {
return nil, err
}
}
// Make sure we re-auth with the AuthToken and StorageUrl
// provided by wrapping the existing auth, so we can just
// override one or the other or both.
if StorageUrl != "" || AuthToken != "" {
// Re-write StorageURL and AuthToken if they are being
// overridden as c.Authenticate above will have
// overwritten them.
if StorageUrl != "" {
c.StorageUrl = StorageUrl
}
if AuthToken != "" {
c.AuthToken = AuthToken
}
c.Auth = newAuth(c.Auth, StorageUrl, AuthToken)
}
return c, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
const minChunkSize = fs.SizeSuffixBase
if cs < minChunkSize {
return fmt.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
// setRoot changes the root of the Fs
func (f *Fs) setRoot(root string) {
f.root = parsePath(root)
f.rootContainer, f.rootDirectory = bucket.Split(f.root)
}
// NewFsWithConnection constructs an Fs from the path, container:path
// and authenticated connection.
//
// if noCheckContainer is set then the Fs won't check the container
// exists before creating it.
func NewFsWithConnection(ctx context.Context, opt *Options, name, root string, c *swift.Connection, noCheckContainer bool) (fs.Fs, error) {
ci := fs.GetConfig(ctx)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
c: c,
noCheckContainer: noCheckContainer,
pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))),
cache: bucket.NewCache(),
}
f.setRoot(root)
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
BucketBasedRootOK: true,
SlowModTime: true,
}).Fill(ctx, f)
if f.rootContainer != "" && f.rootDirectory != "" {
// Check to see if the object exists - ignoring directory markers
var info swift.Object
var err error
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
info, rxHeaders, err = f.c.Object(ctx, f.rootContainer, encodedDirectory)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err == nil && info.ContentType != directoryMarkerContentType {
newRoot := path.Dir(f.root)
if newRoot == "." {
newRoot = ""
}
f.setRoot(newRoot)
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
return f, nil
}
// NewFs constructs an Fs from the path, container:path
func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, fmt.Errorf("swift: chunk size: %w", err)
}
c, err := swiftConnection(ctx, opt, name)
if err != nil {
return nil, err
}
return NewFsWithConnection(ctx, opt, name, root, c, false)
}
// Return an Object from a path
//
// If it can't be found it returns the error fs.ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *swift.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
// Note that due to a quirk of swift, dynamic large objects are
// returned as 0 bytes in the listing. Correct this here by
// making sure we read the full metadata for all 0 byte files.
// We don't read the metadata for directory marker objects.
if info != nil && info.Bytes == 0 && info.ContentType != "application/directory" {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err == fs.ErrorObjectNotFound {
// We have a dangling large object here so just return the original metadata
fs.Errorf(o, "dangling large object with no contents")
} else if err != nil {
return nil, err
} else {
return o, nil
}
}
if info != nil {
// Set info but not headers
err := o.decodeMetaData(info)
if err != nil {
return nil, err
}
} else {
err := o.readMetaData(ctx) // reads info and headers, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found it
// returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
return f.newObjectWithInfo(ctx, remote, nil)
}
// listFn is called from list and listContainerRoot to handle an object.
type listFn func(remote string, object *swift.Object, isDirectory bool) error
// listContainerRoot lists the objects into the function supplied from
// the container and directory supplied. The remote has prefix
// removed from it and if addContainer is set then it adds the
// container to the start.
//
// Set recurse to read sub directories
func (f *Fs) listContainerRoot(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn listFn) error {
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
if directory != "" && !strings.HasSuffix(directory, "/") {
directory += "/"
}
// Options for ObjectsWalk
opts := swift.ObjectsOpts{
Prefix: directory,
Limit: listChunks,
}
if !recurse {
opts.Delimiter = '/'
}
return f.c.ObjectsWalk(ctx, container, &opts, func(ctx context.Context, opts *swift.ObjectsOpts) (interface{}, error) {
var objects []swift.Object
var err error
err = f.pacer.Call(func() (bool, error) {
objects, err = f.c.Objects(ctx, container, opts)
return shouldRetry(ctx, err)
})
if err == nil {
for i := range objects {
object := &objects[i]
isDirectory := false
if !recurse {
isDirectory = strings.HasSuffix(object.Name, "/")
}
remote := f.opt.Enc.ToStandardPath(object.Name)
if !strings.HasPrefix(remote, prefix) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
if !includeDirMarkers && remote == prefix {
// If we have zero length directory markers ending in / then swift
// will return them in the listing for the directory which causes
// duplicate directories. Ignore them here.
continue
}
remote = remote[len(prefix):]
if addContainer {
remote = path.Join(container, remote)
}
err = fn(remote, object, isDirectory)
if err != nil {
break
}
}
}
return objects, err
})
}
type addEntryFn func(fs.DirEntry) error
// list the objects into the function supplied
func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, includeDirMarkers bool, fn addEntryFn) error {
err := f.listContainerRoot(ctx, container, directory, prefix, addContainer, recurse, includeDirMarkers, func(remote string, object *swift.Object, isDirectory bool) (err error) {
if isDirectory {
remote = strings.TrimRight(remote, "/")
d := fs.NewDir(remote, time.Time{}).SetSize(object.Bytes)
err = fn(d)
} else {
// newObjectWithInfo does a full metadata read on 0 size objects which might be dynamic large objects
var o fs.Object
o, err = f.newObjectWithInfo(ctx, remote, object)
if err != nil {
return err
}
if includeDirMarkers || o.Storable() {
err = fn(o)
}
}
return err
})
if err == swift.ContainerNotFound {
err = fs.ErrorDirNotFound
}
return err
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
if container == "" {
return nil, fs.ErrorListBucketRequired
}
// List the objects
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
})
if err != nil {
return nil, err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return entries, nil
}
// listContainers lists the containers
func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) {
var containers []swift.Container
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
for _, container := range containers {
f.cache.MarkOK(container.Name)
d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), time.Time{}).SetSize(container.Bytes).SetItems(container.Count)
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
}
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively than doing a directory traversal.
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
container, directory := f.split(dir)
list := walk.NewListRHelper(callback)
listR := func(container, directory, prefix string, addContainer bool) error {
return f.list(ctx, container, directory, prefix, addContainer, true, false, func(entry fs.DirEntry) error {
return list.Add(entry)
})
}
if container == "" {
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
container := entry.Remote()
err = listR(container, "", f.rootDirectory, true)
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
} else {
err = listR(container, directory, f.rootDirectory, f.rootContainer == "")
if err != nil {
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
}
return list.Flush()
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
var containers []swift.Container
var err error
err = f.pacer.Call(func() (bool, error) {
containers, err = f.c.ContainersAll(ctx, nil)
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("container listing failed: %w", err)
}
var total, objects int64
for _, c := range containers {
total += c.Bytes
objects += c.Count
}
usage := &fs.Usage{
Used: fs.NewUsageValue(total), // bytes in use
Objects: fs.NewUsageValue(objects), // objects in use
}
return usage, nil
}
// Put the object into the container
//
// Copy the reader in to the new object which is returned
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
headers: swift.Headers{}, // Empty object headers to stop readMetaData being called
}
return fs, fs.Update(ctx, in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(ctx, in, src, options...)
}
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
container, _ := f.split(dir)
return f.makeContainer(ctx, container)
}
// makeContainer creates the container if it doesn't exist
func (f *Fs) makeContainer(ctx context.Context, container string) error {
return f.cache.Create(container, func() error {
// Check to see if container exists first
var err error = swift.ContainerNotFound
if !f.noCheckContainer {
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = f.c.Container(ctx, container)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
}
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if f.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = f.opt.StoragePolicy
}
err = f.pacer.Call(func() (bool, error) {
err = f.c.ContainerCreate(ctx, container, headers)
return shouldRetry(ctx, err)
})
if err == nil {
fs.Infof(f, "Container %q created", container)
}
}
return err
}, nil)
}
// Rmdir deletes the container if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" || directory != "" {
return nil
}
err := f.cache.Remove(container, func() error {
err := f.pacer.Call(func() (bool, error) {
err := f.c.ContainerDelete(ctx, container)
return shouldRetry(ctx, err)
})
if err == nil {
fs.Infof(f, "Container %q removed", container)
}
return err
})
return err
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// Purge deletes all the files in the directory
//
// Implemented here so we can make sure we delete directory markers
func (f *Fs) Purge(ctx context.Context, dir string) error {
container, directory := f.split(dir)
if container == "" {
return fs.ErrorListBucketRequired
}
// Delete all the files including the directory markers
toBeDeleted := make(chan fs.Object, f.ci.Transfers)
delErr := make(chan error, 1)
go func() {
delErr <- operations.DeleteFiles(ctx, toBeDeleted)
}()
err := f.list(ctx, container, directory, f.rootDirectory, false, true, true, func(entry fs.DirEntry) error {
if o, ok := entry.(*Object); ok {
toBeDeleted <- o
}
return nil
})
close(toBeDeleted)
delError := <-delErr
if err == nil {
err = delError
}
if err != nil {
return err
}
return f.Rmdir(ctx, dir)
}
// Copy src to this remote using server-side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
dstContainer, dstPath := f.split(remote)
err := f.makeContainer(ctx, dstContainer)
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
isLargeObject, err := srcObj.isLargeObject(ctx)
if err != nil {
return nil, err
}
if isLargeObject {
/*handle large object*/
err = copyLargeObject(ctx, f, srcObj, dstContainer, dstPath)
} else {
srcContainer, srcPath := srcObj.split()
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, srcContainer, srcPath, dstContainer, dstPath, nil)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
}
if err != nil {
return nil, err
}
return f.NewObject(ctx, remote)
}
func copyLargeObject(ctx context.Context, f *Fs, src *Object, dstContainer string, dstPath string) error {
segmentsContainer := dstContainer + "_segments"
err := f.makeContainer(ctx, segmentsContainer)
if err != nil {
return err
}
segments, err := src.getSegmentsLargeObject(ctx)
if err != nil {
return err
}
if len(segments) == 0 {
return errors.New("could not copy object, list segments are empty")
}
nanoSeconds := time.Now().Nanosecond()
prefixSegment := fmt.Sprintf("%v/%v/%s", nanoSeconds, src.size, strings.ReplaceAll(uuid.New().String(), "-", ""))
copiedSegmentsLen := 10
for _, value := range segments {
if len(value) <= 0 {
continue
}
fragment := value[0]
if len(fragment) <= 0 {
continue
}
copiedSegmentsLen = len(value)
firstIndex := strings.Index(fragment, "/")
if firstIndex < 0 {
firstIndex = 0
} else {
firstIndex = firstIndex + 1
}
lastIndex := strings.LastIndex(fragment, "/")
if lastIndex < 0 {
lastIndex = len(fragment)
} else {
lastIndex = lastIndex - 1
}
prefixSegment = fragment[firstIndex:lastIndex]
break
}
copiedSegments := make([]string, copiedSegmentsLen)
defer handleCopyFail(ctx, f, segmentsContainer, copiedSegments, err)
for c, ss := range segments {
if len(ss) <= 0 {
continue
}
for _, s := range ss {
lastIndex := strings.LastIndex(s, "/")
if lastIndex <= 0 {
lastIndex = 0
} else {
lastIndex = lastIndex + 1
}
segmentName := dstPath + "/" + prefixSegment + "/" + s[lastIndex:]
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectCopy(ctx, c, s, segmentsContainer, segmentName, nil)
copiedSegments = append(copiedSegments, segmentName)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return err
}
}
}
m := swift.Metadata{}
headers := m.ObjectHeaders()
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s/%s", segmentsContainer, dstPath, prefixSegment))
headers["Content-Length"] = "0"
emptyReader := bytes.NewReader(nil)
err = f.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = f.c.ObjectPut(ctx, dstContainer, dstPath, emptyReader, true, "", src.contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
return err
}
//remove copied segments when copy process failed
func handleCopyFail(ctx context.Context, f *Fs, segmentsContainer string, segments []string, err error) {
fs.Debugf(f, "handle copy segment fail")
if err == nil {
return
}
if len(segmentsContainer) == 0 {
fs.Debugf(f, "invalid segments container")
return
}
if len(segments) == 0 {
fs.Debugf(f, "segments is empty")
return
}
fs.Debugf(f, "action delete segments what copied")
for _, v := range segments {
_ = f.c.ObjectDelete(ctx, segmentsContainer, v)
}
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
isDynamicLargeObject, err := o.isDynamicLargeObject(ctx)
if err != nil {
return "", err
}
isStaticLargeObject, err := o.isStaticLargeObject(ctx)
if err != nil {
return "", err
}
if isDynamicLargeObject || isStaticLargeObject {
fs.Debugf(o, "Returning empty Md5sum for swift large object")
return "", nil
}
return strings.ToLower(o.md5), nil
}
// hasHeader checks for the header passed in returning false if the
// object isn't found.
func (o *Object) hasHeader(ctx context.Context, header string) (bool, error) {
err := o.readMetaData(ctx)
if err != nil {
if err == fs.ErrorObjectNotFound {
return false, nil
}
return false, err
}
_, isDynamicLargeObject := o.headers[header]
return isDynamicLargeObject, nil
}
// isDynamicLargeObject checks for X-Object-Manifest header
func (o *Object) isDynamicLargeObject(ctx context.Context) (bool, error) {
return o.hasHeader(ctx, "X-Object-Manifest")
}
// isStaticLargeObjectFile checks for the X-Static-Large-Object header
func (o *Object) isStaticLargeObject(ctx context.Context) (bool, error) {
return o.hasHeader(ctx, "X-Static-Large-Object")
}
func (o *Object) isLargeObject(ctx context.Context) (result bool, err error) {
result, err = o.hasHeader(ctx, "X-Static-Large-Object")
if result {
return
}
result, err = o.hasHeader(ctx, "X-Object-Manifest")
if result {
return
}
return false, nil
}
func (o *Object) isInContainerVersioning(ctx context.Context, container string) (bool, error) {
_, headers, err := o.fs.c.Container(ctx, container)
if err != nil {
return false, err
}
xHistoryLocation := headers["X-History-Location"]
if len(xHistoryLocation) > 0 {
return true, nil
}
return false, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.size
}
// decodeMetaData sets the metadata in the object from a swift.Object
//
// Sets
// o.lastModified
// o.size
// o.md5
// o.contentType
func (o *Object) decodeMetaData(info *swift.Object) (err error) {
o.lastModified = info.LastModified
o.size = info.Bytes
o.md5 = info.Hash
o.contentType = info.ContentType
return nil
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
//
// it returns fs.ErrorObjectNotFound if the object isn't found
func (o *Object) readMetaData(ctx context.Context) (err error) {
if o.headers != nil {
return nil
}
var info swift.Object
var h swift.Headers
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
info, h, err = o.fs.c.Object(ctx, container, containerPath)
return shouldRetryHeaders(ctx, h, err)
})
if err != nil {
if err == swift.ObjectNotFound {
return fs.ErrorObjectNotFound
}
return err
}
o.headers = h
err = o.decodeMetaData(&info)
if err != nil {
return err
}
return nil
}
// ModTime returns the modification time of the object
//
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime(ctx context.Context) time.Time {
if o.fs.ci.UseServerModTime {
return o.lastModified
}
err := o.readMetaData(ctx)
if err != nil {
fs.Debugf(o, "Failed to read metadata: %s", err)
return o.lastModified
}
modTime, err := o.headers.ObjectMetadata().GetModTime()
if err != nil {
// fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
err := o.readMetaData(ctx)
if err != nil {
return err
}
meta := o.headers.ObjectMetadata()
meta.SetModTime(modTime)
newHeaders := meta.ObjectHeaders()
for k, v := range newHeaders {
o.headers[k] = v
}
// Include any other metadata from request
for k, v := range o.headers {
if strings.HasPrefix(k, "X-Object-") {
newHeaders[k] = v
}
}
container, containerPath := o.split()
return o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectUpdate(ctx, container, containerPath, newHeaders)
return shouldRetry(ctx, err)
})
}
// Storable returns if this object is storable
//
// It compares the Content-Type to directoryMarkerContentType - that
// makes it a directory marker which is not storable.
func (o *Object) Storable() bool {
return o.contentType != directoryMarkerContentType
}
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
fs.FixRangeOption(options, o.size)
headers := fs.OpenOptionHeaders(options)
_, isRanging := headers["Range"]
container, containerPath := o.split()
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
in, rxHeaders, err = o.fs.c.ObjectOpen(ctx, container, containerPath, !isRanging, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
return
}
// min returns the smallest of x, y
func min(x, y int64) int64 {
if x < y {
return x
}
return y
}
func (o *Object) getSegmentsLargeObject(ctx context.Context) (map[string][]string, error) {
container, objectName := o.split()
segmentContainer, segmentObjects, err := o.fs.c.LargeObjectGetSegments(ctx, container, objectName)
if err != nil {
fs.Debugf(o, "Failed to get list segments of object: %v", err)
return nil, err
}
var containerSegments = make(map[string][]string)
for _, segment := range segmentObjects {
if _, ok := containerSegments[segmentContainer]; !ok {
containerSegments[segmentContainer] = make([]string, 0, len(segmentObjects))
}
segments, _ := containerSegments[segmentContainer]
segments = append(segments, segment.Name)
containerSegments[segmentContainer] = segments
}
return containerSegments, nil
}
func (o *Object) removeSegmentsLargeObject(ctx context.Context, containerSegments map[string][]string) error {
if containerSegments == nil || len(containerSegments) <= 0 {
return nil
}
for container, segments := range containerSegments {
_, err := o.fs.c.BulkDelete(ctx, container, segments)
if err != nil {
fs.Debugf(o, "Failed to delete bulk segments %v", err)
return err
}
}
return nil
}
func (o *Object) getSegmentsDlo(ctx context.Context) (segmentsContainer string, prefix string, err error) {
if err = o.readMetaData(ctx); err != nil {
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
return
}
return dirManifest[:delimiter], dirManifest[delimiter+1:], nil
}
// urlEncode encodes a string so that it is a valid URL
//
// We don't use any of Go's standard methods as we need `/` not
// encoded but we need '&' encoded.
func urlEncode(str string) string {
var buf bytes.Buffer
for i := 0; i < len(str); i++ {
c := str[i]
if (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '/' || c == '.' || c == '_' || c == '-' {
_ = buf.WriteByte(c)
} else {
_, _ = buf.WriteString(fmt.Sprintf("%%%02X", c))
}
}
return buf.String()
}
// updateChunks updates the existing object using chunks to a separate
// container. It returns a string which prefixes current segments.
func (o *Object) updateChunks(ctx context.Context, in0 io.Reader, headers swift.Headers, size int64, contentType string) (string, error) {
container, containerPath := o.split()
segmentsContainer := container + "_segments"
// Create the segmentsContainer if it doesn't exist
var err error
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
_, rxHeaders, err = o.fs.c.Container(ctx, segmentsContainer)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err == swift.ContainerNotFound {
headers := swift.Headers{}
if o.fs.opt.StoragePolicy != "" {
headers["X-Storage-Policy"] = o.fs.opt.StoragePolicy
}
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ContainerCreate(ctx, segmentsContainer, headers)
return shouldRetry(ctx, err)
})
}
if err != nil {
return "", err
}
// Upload the chunks
left := size
i := 0
uniquePrefix := fmt.Sprintf("%s/%d", swift.TimeToFloatString(time.Now()), size)
segmentsPath := path.Join(containerPath, uniquePrefix)
in := bufio.NewReader(in0)
segmentInfos := make([]string, 0, (size/int64(o.fs.opt.ChunkSize))+1)
defer atexit.OnError(&err, func() {
if o.fs.opt.LeavePartsOnError {
return
}
fs.Debugf(o, "Delete segments when err raise %v", err)
if segmentInfos == nil || len(segmentInfos) == 0 {
return
}
_ctx := context.Background()
deleteChunks(_ctx, o, segmentsContainer, segmentInfos)
})()
for {
// can we read at least one byte?
if _, err = in.Peek(1); err != nil {
if left > 0 {
return "", err // read less than expected
}
fs.Debugf(o, "Uploading segments into %q seems done (%v)", segmentsContainer, err)
break
}
n := int64(o.fs.opt.ChunkSize)
if size != -1 {
n = min(left, n)
headers["Content-Length"] = strconv.FormatInt(n, 10) // set Content-Length as we know it
left -= n
}
segmentReader := io.LimitReader(in, n)
segmentPath := fmt.Sprintf("%s/%08d", segmentsPath, i)
fs.Debugf(o, "Uploading segment file %q into %q", segmentPath, segmentsContainer)
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(ctx, segmentsContainer, segmentPath, segmentReader, true, "", "", headers)
if err == nil {
segmentInfos = append(segmentInfos, segmentPath)
}
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return "", err
}
i++
}
// Upload the manifest
headers["X-Object-Manifest"] = urlEncode(fmt.Sprintf("%s/%s", segmentsContainer, segmentsPath))
headers["Content-Length"] = "0" // set Content-Length as we know it
emptyReader := bytes.NewReader(nil)
err = o.fs.pacer.Call(func() (bool, error) {
var rxHeaders swift.Headers
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, emptyReader, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err == nil {
//reset data
segmentInfos = nil
}
return uniquePrefix + "/", err
}
func deleteChunks(ctx context.Context, o *Object, segmentsContainer string, segmentInfos []string) {
if segmentInfos == nil || len(segmentInfos) == 0 {
return
}
for _, v := range segmentInfos {
fs.Debugf(o, "Delete segment file %q on %q", v, segmentsContainer)
e := o.fs.c.ObjectDelete(ctx, segmentsContainer, v)
if e != nil {
fs.Errorf(o, "Error occurred in delete segment file %q on %q, error: %q", v, segmentsContainer, e)
}
}
}
// Update the object with the contents of the io.Reader, modTime and size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
container, containerPath := o.split()
if container == "" {
return fserrors.FatalError(errors.New("can't upload files to the root"))
}
err := o.fs.makeContainer(ctx, container)
if err != nil {
return err
}
size := src.Size()
modTime := src.ModTime(ctx)
// Note whether this is a dynamic large object before starting
isLargeObject, err := o.isLargeObject(ctx)
if err != nil {
return err
}
//capture segments before upload
var segmentsContainer map[string][]string
if isLargeObject {
segmentsContainer, _ = o.getSegmentsLargeObject(ctx)
}
// Set the mtime
m := swift.Metadata{}
m.SetModTime(modTime)
contentType := fs.MimeType(ctx, src)
headers := m.ObjectHeaders()
fs.OpenOptionAddHeaders(options, headers)
if size > int64(o.fs.opt.ChunkSize) || (size == -1 && !o.fs.opt.NoChunk) {
_, err = o.updateChunks(ctx, in, headers, size, contentType)
if err != nil {
return err
}
o.headers = nil // wipe old metadata
} else {
var inCount *readers.CountingReader
if size >= 0 {
headers["Content-Length"] = strconv.FormatInt(size, 10) // set Content-Length if we know it
} else {
// otherwise count the size for later
inCount = readers.NewCountingReader(in)
in = inCount
}
var rxHeaders swift.Headers
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
rxHeaders, err = o.fs.c.ObjectPut(ctx, container, containerPath, in, true, "", contentType, headers)
return shouldRetryHeaders(ctx, rxHeaders, err)
})
if err != nil {
return err
}
// set Metadata since ObjectPut checked the hash and length so we know the
// object has been safely uploaded
o.lastModified = modTime
o.size = size
o.md5 = rxHeaders["Etag"]
o.contentType = contentType
o.headers = headers
if inCount != nil {
// update the size if streaming from the reader
o.size = int64(inCount.BytesRead())
}
}
isInContainerVersioning, _ := o.isInContainerVersioning(ctx, container)
// If file was a large object and the container is not enable versioning then remove old/all segments
if isLargeObject && len(segmentsContainer) > 0 && !isInContainerVersioning {
err := o.removeSegmentsLargeObject(ctx, segmentsContainer)
if err != nil {
fs.Logf(o, "Failed to remove old segments - carrying on with upload: %v", err)
}
}
// Read the metadata from the newly created object if necessary
return o.readMetaData(ctx)
}
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
container, containerPath := o.split()
//check object is large object
isLargeObject, err := o.isLargeObject(ctx)
if err != nil {
return err
}
//check container has enabled version to reserve segment when delete
isInContainerVersioning := false
if isLargeObject {
isInContainerVersioning, err = o.isInContainerVersioning(ctx, container)
if err != nil {
return err
}
}
//capture segments object if this object is large object
var containerSegments map[string][]string
if isLargeObject {
containerSegments, err = o.getSegmentsLargeObject(ctx)
if err != nil {
return err
}
}
// Remove file/manifest first
err = o.fs.pacer.Call(func() (bool, error) {
err = o.fs.c.ObjectDelete(ctx, container, containerPath)
return shouldRetry(ctx, err)
})
if err != nil {
return err
}
if !isLargeObject || isInContainerVersioning {
return nil
}
if isLargeObject {
return o.removeSegmentsLargeObject(ctx, containerSegments)
}
return nil
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType(ctx context.Context) string {
return o.contentType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Purger = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)
| 1 | 13,455 | This is missing the definition of `fetch_until_empty_page` - it needs to go in the config section above. | rclone-rclone | go |
@@ -170,8 +170,8 @@ module Selenium
def macosx_path
path = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
- path = "~/Applications/Firefox.app/Contents/MacOS/firefox-bin" unless File.exist?(path)
- path = Platform.find_binary("firefox-bin") unless File.exist?(path)
+ path = "~/Applications/Firefox.app/Contents/MacOS/firefox-bin" unless File.exist?(File.expand_path(path))
+ path = Platform.find_binary("firefox-bin") unless File.exist?(File.expand_path?(path))
path
end | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Firefox
# @api private
class Binary
NO_FOCUS_LIBRARY_NAME = "x_ignore_nofocus.so"
NO_FOCUS_LIBRARIES = [
["#{WebDriver.root}/selenium/webdriver/firefox/native/linux/amd64/#{NO_FOCUS_LIBRARY_NAME}", "amd64/#{NO_FOCUS_LIBRARY_NAME}"],
["#{WebDriver.root}/selenium/webdriver/firefox/native/linux/x86/#{NO_FOCUS_LIBRARY_NAME}", "x86/#{NO_FOCUS_LIBRARY_NAME}"],
]
WAIT_TIMEOUT = 90
QUIT_TIMEOUT = 5
def start_with(profile, profile_path, *args)
if Platform.cygwin?
profile_path = Platform.cygwin_path(profile_path, :windows => true)
elsif Platform.windows?
profile_path = profile_path.gsub("/", "\\")
end
ENV['XRE_CONSOLE_LOG'] = profile.log_file if profile.log_file
ENV['XRE_PROFILE_PATH'] = profile_path
ENV['MOZ_NO_REMOTE'] = '1' # able to launch multiple instances
ENV['MOZ_CRASHREPORTER_DISABLE'] = '1' # disable breakpad
ENV['NO_EM_RESTART'] = '1' # prevent the binary from detaching from the console
if Platform.linux? && (profile.native_events? || profile.load_no_focus_lib?)
modify_link_library_path profile_path
end
execute(*args)
cope_with_mac_strangeness(args) if Platform.mac?
end
def quit
return unless @process
@process.poll_for_exit QUIT_TIMEOUT
rescue ChildProcess::TimeoutError
# ok, force quit
@process.stop QUIT_TIMEOUT
end
def wait
return unless @process
begin
@process.poll_for_exit(WAIT_TIMEOUT)
rescue ChildProcess::TimeoutError => e
@process.stop
raise e
end
end
private
def execute(*extra_args)
args = [self.class.path, "-no-remote"] + extra_args
@process = ChildProcess.build(*args)
@process.io.inherit! if $DEBUG
@process.start
end
def cope_with_mac_strangeness(args)
sleep 0.3
if @process.crashed?
# ok, trying a restart
sleep 7
execute(*args)
end
# ensure we're ok
sleep 0.3
if @process.crashed?
raise Error::WebDriverError, "unable to start Firefox cleanly, args: #{args.inspect}"
end
end
def modify_link_library_path(profile_path)
paths = []
NO_FOCUS_LIBRARIES.each do |from, to|
dest = File.join(profile_path, to)
FileUtils.mkdir_p File.dirname(dest)
FileUtils.cp from, dest
paths << File.expand_path(File.dirname(dest))
end
paths += ENV['LD_LIBRARY_PATH'].to_s.split(File::PATH_SEPARATOR)
ENV['LD_LIBRARY_PATH'] = paths.uniq.join(File::PATH_SEPARATOR)
ENV['LD_PRELOAD'] = NO_FOCUS_LIBRARY_NAME
end
class << self
#
# @api private
#
# @see Firefox.path=
#
def path=(path)
Platform.assert_executable(path)
@path = path
end
def path
@path ||= case Platform.os
when :macosx
macosx_path
when :windows
windows_path
when :linux, :unix
Platform.find_binary("firefox3", "firefox2", "firefox") || "/usr/bin/firefox"
else
raise Error::WebDriverError, "unknown platform: #{Platform.os}"
end
@path = Platform.cygwin_path(@path) if Platform.cygwin?
unless File.file?(@path.to_s)
raise Error::WebDriverError, "Could not find Firefox binary (os=#{Platform.os}). Make sure Firefox is installed or set the path manually with #{self}.path="
end
@path
end
def version
@version = case Platform.os
when :macosx
`#{path} -v`.strip[/[^\s]*$/][/^\d+/].to_i
when :windows
`\"#{path}\" -v | more`.strip[/[^\s]*$/][/^\d+/].to_i
when :linux
`#{path} -v`.strip[/[^\s]*$/][/^\d+/].to_i
else
0
end
end
private
def windows_path
windows_registry_path || Platform.find_in_program_files("\\Mozilla Firefox\\firefox.exe") || Platform.find_binary("firefox")
end
def macosx_path
path = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
path = "~/Applications/Firefox.app/Contents/MacOS/firefox-bin" unless File.exist?(path)
path = Platform.find_binary("firefox-bin") unless File.exist?(path)
path
end
def windows_registry_path
require 'win32/registry'
lm = Win32::Registry::HKEY_LOCAL_MACHINE
lm.open("SOFTWARE\\Mozilla\\Mozilla Firefox") do |reg|
main = lm.open("SOFTWARE\\Mozilla\\Mozilla Firefox\\#{reg.keys[0]}\\Main")
if entry = main.find { |key, type, data| key =~ /pathtoexe/i }
return entry.last
end
end
rescue LoadError
# older JRuby or IronRuby does not have win32/registry
rescue Win32::Registry::Error
end
end # class << self
end # Binary
end # Firefox
end # WebDriver
end # Selenium
| 1 | 12,881 | I don't think this one needs to be changed since the only previous path is already an absolute one. | SeleniumHQ-selenium | rb |
@@ -200,7 +200,7 @@ public class BaseExpireSnapshotsSparkAction
}
private Dataset<Row> buildValidFileDF(TableMetadata metadata) {
- Table staticTable = newStaticTable(metadata, this.table.io());
+ Table staticTable = newStaticTable(metadata, this.table.io(), table.locationProvider());
return appendTypeString(buildValidDataFileDF(staticTable), DATA_FILE)
.union(appendTypeString(buildManifestFileDF(staticTable), MANIFEST))
.union(appendTypeString(buildManifestListDF(staticTable), MANIFEST_LIST)); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.actions;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
import org.apache.iceberg.HasTableOperations;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.TableOperations;
import org.apache.iceberg.actions.BaseExpireSnapshotsActionResult;
import org.apache.iceberg.actions.BaseSparkAction;
import org.apache.iceberg.actions.ExpireSnapshots;
import org.apache.iceberg.exceptions.NotFoundException;
import org.apache.iceberg.exceptions.ValidationException;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Sets;
import org.apache.iceberg.spark.JobGroupInfo;
import org.apache.iceberg.util.PropertyUtil;
import org.apache.iceberg.util.Tasks;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.TableProperties.GC_ENABLED;
import static org.apache.iceberg.TableProperties.GC_ENABLED_DEFAULT;
/**
* An action that performs the same operation as {@link org.apache.iceberg.ExpireSnapshots} but uses Spark
* to determine the delta in files between the pre and post-expiration table metadata. All of the same
* restrictions of {@link org.apache.iceberg.ExpireSnapshots} also apply to this action.
* <p>
* This action first leverages {@link org.apache.iceberg.ExpireSnapshots} to expire snapshots and then
* uses metadata tables to find files that can be safely deleted. This is done by anti-joining two Datasets
* that contain all manifest and data files before and after the expiration. The snapshot expiration
* will be fully committed before any deletes are issued.
* <p>
* This operation performs a shuffle so the parallelism can be controlled through 'spark.sql.shuffle.partitions'.
* <p>
* Deletes are still performed locally after retrieving the results from the Spark executors.
*/
@SuppressWarnings("UnnecessaryAnonymousClass")
public class BaseExpireSnapshotsSparkAction
extends BaseSparkAction<ExpireSnapshots, ExpireSnapshots.Result> implements ExpireSnapshots {
private static final Logger LOG = LoggerFactory.getLogger(BaseExpireSnapshotsSparkAction.class);
private static final String DATA_FILE = "Data File";
private static final String MANIFEST = "Manifest";
private static final String MANIFEST_LIST = "Manifest List";
private static final String STREAM_RESULTS = "stream-results";
// Creates an executor service that runs each task in the thread that invokes execute/submit.
private static final ExecutorService DEFAULT_DELETE_EXECUTOR_SERVICE = null;
private final Table table;
private final TableOperations ops;
private final Consumer<String> defaultDelete = new Consumer<String>() {
@Override
public void accept(String file) {
ops.io().deleteFile(file);
}
};
private final Set<Long> expiredSnapshotIds = Sets.newHashSet();
private Long expireOlderThanValue = null;
private Integer retainLastValue = null;
private Consumer<String> deleteFunc = defaultDelete;
private ExecutorService deleteExecutorService = DEFAULT_DELETE_EXECUTOR_SERVICE;
private Dataset<Row> expiredFiles = null;
public BaseExpireSnapshotsSparkAction(SparkSession spark, Table table) {
super(spark);
this.table = table;
this.ops = ((HasTableOperations) table).operations();
ValidationException.check(
PropertyUtil.propertyAsBoolean(table.properties(), GC_ENABLED, GC_ENABLED_DEFAULT),
"Cannot expire snapshots: GC is disabled (deleting files may corrupt other tables)");
}
@Override
protected ExpireSnapshots self() {
return this;
}
@Override
public BaseExpireSnapshotsSparkAction executeDeleteWith(ExecutorService executorService) {
this.deleteExecutorService = executorService;
return this;
}
@Override
public BaseExpireSnapshotsSparkAction expireSnapshotId(long snapshotId) {
expiredSnapshotIds.add(snapshotId);
return this;
}
@Override
public BaseExpireSnapshotsSparkAction expireOlderThan(long timestampMillis) {
this.expireOlderThanValue = timestampMillis;
return this;
}
@Override
public BaseExpireSnapshotsSparkAction retainLast(int numSnapshots) {
Preconditions.checkArgument(1 <= numSnapshots,
"Number of snapshots to retain must be at least 1, cannot be: %s", numSnapshots);
this.retainLastValue = numSnapshots;
return this;
}
@Override
public BaseExpireSnapshotsSparkAction deleteWith(Consumer<String> newDeleteFunc) {
this.deleteFunc = newDeleteFunc;
return this;
}
/**
* Expires snapshots and commits the changes to the table, returning a Dataset of files to delete.
* <p>
* This does not delete data files. To delete data files, run {@link #execute()}.
* <p>
* This may be called before or after {@link #execute()} is called to return the expired file list.
*
* @return a Dataset of files that are no longer referenced by the table
*/
public Dataset<Row> expire() {
if (expiredFiles == null) {
// fetch metadata before expiration
Dataset<Row> originalFiles = buildValidFileDF(ops.current());
// perform expiration
org.apache.iceberg.ExpireSnapshots expireSnapshots = table.expireSnapshots().cleanExpiredFiles(false);
for (long id : expiredSnapshotIds) {
expireSnapshots = expireSnapshots.expireSnapshotId(id);
}
if (expireOlderThanValue != null) {
expireSnapshots = expireSnapshots.expireOlderThan(expireOlderThanValue);
}
if (retainLastValue != null) {
expireSnapshots = expireSnapshots.retainLast(retainLastValue);
}
expireSnapshots.commit();
// fetch metadata after expiration
Dataset<Row> validFiles = buildValidFileDF(ops.refresh());
// determine expired files
this.expiredFiles = originalFiles.except(validFiles);
}
return expiredFiles;
}
@Override
public ExpireSnapshots.Result execute() {
JobGroupInfo info = newJobGroupInfo("EXPIRE-SNAPSHOTS", "EXPIRE-SNAPSHOTS");
return withJobGroupInfo(info, this::doExecute);
}
private ExpireSnapshots.Result doExecute() {
boolean streamResults = PropertyUtil.propertyAsBoolean(options(), STREAM_RESULTS, false);
if (streamResults) {
return deleteFiles(expire().toLocalIterator());
} else {
return deleteFiles(expire().collectAsList().iterator());
}
}
private Dataset<Row> appendTypeString(Dataset<Row> ds, String type) {
return ds.select(new Column("file_path"), functions.lit(type).as("file_type"));
}
private Dataset<Row> buildValidFileDF(TableMetadata metadata) {
Table staticTable = newStaticTable(metadata, this.table.io());
return appendTypeString(buildValidDataFileDF(staticTable), DATA_FILE)
.union(appendTypeString(buildManifestFileDF(staticTable), MANIFEST))
.union(appendTypeString(buildManifestListDF(staticTable), MANIFEST_LIST));
}
/**
* Deletes files passed to it based on their type.
*
* @param expired an Iterator of Spark Rows of the structure (path: String, type: String)
* @return Statistics on which files were deleted
*/
private BaseExpireSnapshotsActionResult deleteFiles(Iterator<Row> expired) {
AtomicLong dataFileCount = new AtomicLong(0L);
AtomicLong manifestCount = new AtomicLong(0L);
AtomicLong manifestListCount = new AtomicLong(0L);
Tasks.foreach(expired)
.retry(3).stopRetryOn(NotFoundException.class).suppressFailureWhenFinished()
.executeWith(deleteExecutorService)
.onFailure((fileInfo, exc) -> {
String file = fileInfo.getString(0);
String type = fileInfo.getString(1);
LOG.warn("Delete failed for {}: {}", type, file, exc);
})
.run(fileInfo -> {
String file = fileInfo.getString(0);
String type = fileInfo.getString(1);
deleteFunc.accept(file);
switch (type) {
case DATA_FILE:
dataFileCount.incrementAndGet();
LOG.trace("Deleted Data File: {}", file);
break;
case MANIFEST:
manifestCount.incrementAndGet();
LOG.debug("Deleted Manifest: {}", file);
break;
case MANIFEST_LIST:
manifestListCount.incrementAndGet();
LOG.debug("Deleted Manifest List: {}", file);
break;
}
});
LOG.info("Deleted {} total files", dataFileCount.get() + manifestCount.get() + manifestListCount.get());
return new BaseExpireSnapshotsActionResult(dataFileCount.get(), manifestCount.get(), manifestListCount.get());
}
}
| 1 | 35,098 | Since we kept the original constructor we might not need this change now. | apache-iceberg | java |
@@ -130,6 +130,7 @@ public class HiveTableOperations extends BaseMetastoreTableOperations {
}
refreshFromMetadataLocation(metadataLocation);
+ LOG.debug("Refreshed [{}]", fullName);
}
@Override | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.hive;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hive.common.StatsSetupConst;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.TableType;
import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.LockComponent;
import org.apache.hadoop.hive.metastore.api.LockLevel;
import org.apache.hadoop.hive.metastore.api.LockRequest;
import org.apache.hadoop.hive.metastore.api.LockResponse;
import org.apache.hadoop.hive.metastore.api.LockState;
import org.apache.hadoop.hive.metastore.api.LockType;
import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.iceberg.BaseMetastoreTableOperations;
import org.apache.iceberg.Schema;
import org.apache.iceberg.TableMetadata;
import org.apache.iceberg.common.DynMethods;
import org.apache.iceberg.exceptions.AlreadyExistsException;
import org.apache.iceberg.exceptions.CommitFailedException;
import org.apache.iceberg.exceptions.NoSuchIcebergTableException;
import org.apache.iceberg.exceptions.NoSuchTableException;
import org.apache.iceberg.hadoop.HadoopFileIO;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.apache.thrift.TException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* TODO we should be able to extract some more commonalities to BaseMetastoreTableOperations to
* avoid code duplication between this class and Metacat Tables.
*/
public class HiveTableOperations extends BaseMetastoreTableOperations {
private static final Logger LOG = LoggerFactory.getLogger(HiveTableOperations.class);
private static final String HIVE_ACQUIRE_LOCK_STATE_TIMEOUT_MS = "iceberg.hive.lock-timeout-ms";
private static final long HIVE_ACQUIRE_LOCK_STATE_TIMEOUT_MS_DEFAULT = 3 * 60 * 1000; // 3 minutes
private static final DynMethods.UnboundMethod ALTER_TABLE = DynMethods.builder("alter_table")
.impl(HiveMetaStoreClient.class, "alter_table_with_environmentContext",
String.class, String.class, Table.class, EnvironmentContext.class)
.impl(HiveMetaStoreClient.class, "alter_table",
String.class, String.class, Table.class, EnvironmentContext.class)
.build();
private final HiveClientPool metaClients;
private final String fullName;
private final String database;
private final String tableName;
private final Configuration conf;
private final long lockAcquireTimeout;
private FileIO fileIO;
protected HiveTableOperations(Configuration conf, HiveClientPool metaClients,
String catalogName, String database, String table) {
this.conf = conf;
this.metaClients = metaClients;
this.fullName = catalogName + "." + database + "." + table;
this.database = database;
this.tableName = table;
this.lockAcquireTimeout =
conf.getLong(HIVE_ACQUIRE_LOCK_STATE_TIMEOUT_MS, HIVE_ACQUIRE_LOCK_STATE_TIMEOUT_MS_DEFAULT);
}
@Override
public FileIO io() {
if (fileIO == null) {
fileIO = new HadoopFileIO(conf);
}
return fileIO;
}
@Override
protected void doRefresh() {
String metadataLocation = null;
try {
Table table = metaClients.run(client -> client.getTable(database, tableName));
validateTableIsIceberg(table, fullName);
metadataLocation = table.getParameters().get(METADATA_LOCATION_PROP);
} catch (NoSuchObjectException e) {
if (currentMetadataLocation() != null) {
throw new NoSuchTableException(String.format("No such table: %s.%s", database, tableName));
}
} catch (TException e) {
String errMsg = String.format("Failed to get table info from metastore %s.%s", database, tableName);
throw new RuntimeException(errMsg, e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during refresh", e);
}
refreshFromMetadataLocation(metadataLocation);
}
@Override
protected void doCommit(TableMetadata base, TableMetadata metadata) {
String newMetadataLocation = writeNewMetadata(metadata, currentVersion() + 1);
boolean threw = true;
Optional<Long> lockId = Optional.empty();
try {
lockId = Optional.of(acquireLock());
// TODO add lock heart beating for cases where default lock timeout is too low.
Table tbl;
if (base != null) {
tbl = metaClients.run(client -> client.getTable(database, tableName));
tbl.setSd(storageDescriptor(metadata)); // set to pickup any schema changes
} else {
final long currentTimeMillis = System.currentTimeMillis();
tbl = new Table(tableName,
database,
System.getProperty("user.name"),
(int) currentTimeMillis / 1000,
(int) currentTimeMillis / 1000,
Integer.MAX_VALUE,
storageDescriptor(metadata),
Collections.emptyList(),
new HashMap<>(),
null,
null,
TableType.EXTERNAL_TABLE.toString());
tbl.getParameters().put("EXTERNAL", "TRUE"); // using the external table type also requires this
}
String metadataLocation = tbl.getParameters().get(METADATA_LOCATION_PROP);
String baseMetadataLocation = base != null ? base.metadataFileLocation() : null;
if (!Objects.equals(baseMetadataLocation, metadataLocation)) {
throw new CommitFailedException(
"Base metadata location '%s' is not same as the current table metadata location '%s' for %s.%s",
baseMetadataLocation, metadataLocation, database, tableName);
}
setParameters(newMetadataLocation, tbl);
if (base != null) {
metaClients.run(client -> {
EnvironmentContext envContext = new EnvironmentContext(
ImmutableMap.of(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE)
);
ALTER_TABLE.invoke(client, database, tableName, tbl, envContext);
return null;
});
} else {
metaClients.run(client -> {
client.createTable(tbl);
return null;
});
}
threw = false;
} catch (org.apache.hadoop.hive.metastore.api.AlreadyExistsException e) {
throw new AlreadyExistsException("Table already exists: %s.%s", database, tableName);
} catch (TException | UnknownHostException e) {
if (e.getMessage() != null && e.getMessage().contains("Table/View 'HIVE_LOCKS' does not exist")) {
throw new RuntimeException("Failed to acquire locks from metastore because 'HIVE_LOCKS' doesn't " +
"exist, this probably happened when using embedded metastore or doesn't create a " +
"transactional meta table. To fix this, use an alternative metastore", e);
}
throw new RuntimeException(String.format("Metastore operation failed for %s.%s", database, tableName), e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted during commit", e);
} finally {
if (threw) {
// if anything went wrong, clean up the uncommitted metadata file
io().deleteFile(newMetadataLocation);
}
unlock(lockId);
}
}
private void setParameters(String newMetadataLocation, Table tbl) {
Map<String, String> parameters = tbl.getParameters();
if (parameters == null) {
parameters = new HashMap<>();
}
parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH));
parameters.put(METADATA_LOCATION_PROP, newMetadataLocation);
if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) {
parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation());
}
tbl.setParameters(parameters);
}
private StorageDescriptor storageDescriptor(TableMetadata metadata) {
final StorageDescriptor storageDescriptor = new StorageDescriptor();
storageDescriptor.setCols(columns(metadata.schema()));
storageDescriptor.setLocation(metadata.location());
storageDescriptor.setOutputFormat("org.apache.hadoop.mapred.FileOutputFormat");
storageDescriptor.setInputFormat("org.apache.hadoop.mapred.FileInputFormat");
SerDeInfo serDeInfo = new SerDeInfo();
serDeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe");
storageDescriptor.setSerdeInfo(serDeInfo);
return storageDescriptor;
}
private List<FieldSchema> columns(Schema schema) {
return schema.columns().stream()
.map(col -> new FieldSchema(col.name(), HiveTypeConverter.convert(col.type()), ""))
.collect(Collectors.toList());
}
private long acquireLock() throws UnknownHostException, TException, InterruptedException {
final LockComponent lockComponent = new LockComponent(LockType.EXCLUSIVE, LockLevel.TABLE, database);
lockComponent.setTablename(tableName);
final LockRequest lockRequest = new LockRequest(Lists.newArrayList(lockComponent),
System.getProperty("user.name"),
InetAddress.getLocalHost().getHostName());
LockResponse lockResponse = metaClients.run(client -> client.lock(lockRequest));
LockState state = lockResponse.getState();
long lockId = lockResponse.getLockid();
final long start = System.currentTimeMillis();
long duration = 0;
boolean timeout = false;
while (!timeout && state.equals(LockState.WAITING)) {
lockResponse = metaClients.run(client -> client.checkLock(lockId));
state = lockResponse.getState();
// check timeout
duration = System.currentTimeMillis() - start;
if (duration > lockAcquireTimeout) {
timeout = true;
} else {
Thread.sleep(50);
}
}
// timeout and do not have lock acquired
if (timeout && !state.equals(LockState.ACQUIRED)) {
throw new CommitFailedException(String.format("Timed out after %s ms waiting for lock on %s.%s",
duration, database, tableName));
}
if (!state.equals(LockState.ACQUIRED)) {
throw new CommitFailedException(String.format("Could not acquire the lock on %s.%s, " +
"lock request ended in state %s", database, tableName, state));
}
return lockId;
}
private void unlock(Optional<Long> lockId) {
if (lockId.isPresent()) {
try {
doUnlock(lockId.get());
} catch (Exception e) {
LOG.warn("Failed to unlock {}.{}", database, tableName, e);
}
}
}
// visible for testing
protected void doUnlock(long lockId) throws TException, InterruptedException {
metaClients.run(client -> {
client.unlock(lockId);
return null;
});
}
static void validateTableIsIceberg(Table table, String fullName) {
String tableType = table.getParameters().get(TABLE_TYPE_PROP);
NoSuchIcebergTableException.check(tableType != null && tableType.equalsIgnoreCase(ICEBERG_TABLE_TYPE_VALUE),
"Not an iceberg table: %s (type=%s)", fullName, tableType);
NoSuchIcebergTableException.check(table.getParameters().get(METADATA_LOCATION_PROP) != null,
"Not an iceberg table: %s missing %s", fullName, METADATA_LOCATION_PROP);
}
}
| 1 | 23,824 | There is already a log in `BaseMetastoreTableOperations` for this. It has the location, but not the table name. Maybe just add table name to that one. | apache-iceberg | java |
@@ -21,6 +21,8 @@ import (
"net/http"
"time"
+ "github.com/pipe-cd/pipe/pkg/filestore/minio"
+
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/spf13/cobra"
"go.uber.org/zap" | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"errors"
"fmt"
"net/http"
"time"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/spf13/cobra"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"github.com/pipe-cd/pipe/pkg/admin"
"github.com/pipe-cd/pipe/pkg/app/api/api"
"github.com/pipe-cd/pipe/pkg/app/api/applicationlivestatestore"
"github.com/pipe-cd/pipe/pkg/app/api/authhandler"
"github.com/pipe-cd/pipe/pkg/app/api/commandstore"
"github.com/pipe-cd/pipe/pkg/app/api/pipedtokenverifier"
"github.com/pipe-cd/pipe/pkg/app/api/service/webservice"
"github.com/pipe-cd/pipe/pkg/app/api/stagelogstore"
"github.com/pipe-cd/pipe/pkg/cache/rediscache"
"github.com/pipe-cd/pipe/pkg/cli"
"github.com/pipe-cd/pipe/pkg/config"
"github.com/pipe-cd/pipe/pkg/datastore"
"github.com/pipe-cd/pipe/pkg/datastore/firestore"
"github.com/pipe-cd/pipe/pkg/datastore/mongodb"
"github.com/pipe-cd/pipe/pkg/filestore"
"github.com/pipe-cd/pipe/pkg/filestore/gcs"
"github.com/pipe-cd/pipe/pkg/jwt"
"github.com/pipe-cd/pipe/pkg/model"
"github.com/pipe-cd/pipe/pkg/redis"
"github.com/pipe-cd/pipe/pkg/rpc"
"github.com/pipe-cd/pipe/pkg/version"
)
var (
defaultSigningMethod = jwtgo.SigningMethodHS256
)
type httpHandler interface {
Register(func(pattern string, handler func(http.ResponseWriter, *http.Request)))
}
type server struct {
pipedAPIPort int
webAPIPort int
httpPort int
adminPort int
cacheAddress string
gracePeriod time.Duration
tls bool
certFile string
keyFile string
tokenSigningKeyFile string
configFile string
useFakeResponse bool
enableGRPCReflection bool
}
// NewCommand creates a new cobra command for executing api server.
func NewCommand() *cobra.Command {
s := &server{
pipedAPIPort: 9080,
webAPIPort: 9081,
httpPort: 9082,
adminPort: 9085,
cacheAddress: "cache:6379",
gracePeriod: 30 * time.Second,
}
cmd := &cobra.Command{
Use: "server",
Short: "Start running API server.",
RunE: cli.WithContext(s.run),
}
cmd.Flags().IntVar(&s.pipedAPIPort, "piped-api-port", s.pipedAPIPort, "The port number used to run a grpc server that serving serves incoming piped requests.")
cmd.Flags().IntVar(&s.webAPIPort, "web-api-port", s.webAPIPort, "The port number used to run a grpc server that serves incoming web requests.")
cmd.Flags().IntVar(&s.httpPort, "http-port", s.httpPort, "The port number used to run a http server that serves incoming http requests such as auth callbacks or webhook events.")
cmd.Flags().IntVar(&s.adminPort, "admin-port", s.adminPort, "The port number used to run a HTTP server for admin tasks such as metrics, healthz.")
cmd.Flags().StringVar(&s.cacheAddress, "cache-address", s.cacheAddress, "The address to cache service.")
cmd.Flags().DurationVar(&s.gracePeriod, "grace-period", s.gracePeriod, "How long to wait for graceful shutdown.")
cmd.Flags().BoolVar(&s.tls, "tls", s.tls, "Whether running the gRPC server with TLS or not.")
cmd.Flags().StringVar(&s.certFile, "cert-file", s.certFile, "The path to the TLS certificate file.")
cmd.Flags().StringVar(&s.keyFile, "key-file", s.keyFile, "The path to the TLS key file.")
cmd.Flags().StringVar(&s.tokenSigningKeyFile, "token-signing-key-file", s.tokenSigningKeyFile, "The path to key file used to sign ID token.")
cmd.Flags().StringVar(&s.configFile, "config-file", s.configFile, "The path to the configuration file.")
// For debugging early in development
cmd.Flags().BoolVar(&s.useFakeResponse, "use-fake-response", s.useFakeResponse, "Whether the server responds fake response or not.")
cmd.Flags().BoolVar(&s.enableGRPCReflection, "enable-grpc-reflection", s.enableGRPCReflection, "Whether to enable the reflection service or not.")
return cmd
}
func (s *server) run(ctx context.Context, t cli.Telemetry) error {
group, ctx := errgroup.WithContext(ctx)
// Load control plane configuration from the specified file.
cfg, err := s.loadConfig()
if err != nil {
t.Logger.Error("failed to load control-plane configuration",
zap.String("config-file", s.configFile),
zap.Error(err),
)
return err
}
var (
pipedAPIServer *rpc.Server
webAPIServer *rpc.Server
)
ds, err := s.createDatastore(ctx, cfg, t.Logger)
if err != nil {
t.Logger.Error("failed creating datastore", zap.Error(err))
return err
}
defer func() {
if err := ds.Close(); err != nil {
t.Logger.Error("failed closing datastore client", zap.Error(err))
}
}()
fs, err := s.createFilestore(ctx, cfg, t.Logger)
if err != nil {
t.Logger.Error("failed creating firestore", zap.Error(err))
return err
}
defer func() {
if err := fs.Close(); err != nil {
t.Logger.Error("failed closing firestore client", zap.Error(err))
}
}()
rd := redis.NewRedis(s.cacheAddress, "")
defer func() {
if err := rd.Close(); err != nil {
t.Logger.Error("failed closing redis client", zap.Error(err))
}
}()
cache := rediscache.NewTTLCache(rd, cfg.Cache.TTL.Duration())
sls := stagelogstore.NewStore(fs, cache, t.Logger)
alss := applicationlivestatestore.NewStore(fs, cache, t.Logger)
cmds := commandstore.NewStore(ds, cache, t.Logger)
// Start a gRPC server for handling PipedAPI requests.
{
var (
verifier = pipedtokenverifier.NewVerifier(ctx, cfg, ds)
service = api.NewPipedAPI(ds, sls, alss, cmds, t.Logger)
opts = []rpc.Option{
rpc.WithPort(s.pipedAPIPort),
rpc.WithGracePeriod(s.gracePeriod),
rpc.WithLogger(t.Logger),
rpc.WithLogUnaryInterceptor(t.Logger),
rpc.WithPipedTokenAuthUnaryInterceptor(verifier, t.Logger),
rpc.WithRequestValidationUnaryInterceptor(),
}
)
if s.tls {
opts = append(opts, rpc.WithTLS(s.certFile, s.keyFile))
}
if s.enableGRPCReflection {
opts = append(opts, rpc.WithGRPCReflection())
}
pipedAPIServer = rpc.NewServer(service, opts...)
group.Go(func() error {
return pipedAPIServer.Run(ctx)
})
}
// Start a gRPC server for handling WebAPI requests.
{
verifier, err := jwt.NewVerifier(defaultSigningMethod, s.tokenSigningKeyFile)
if err != nil {
t.Logger.Error("failed to create a new JWT verifier", zap.Error(err))
return err
}
var service rpc.Service
if s.useFakeResponse {
service = api.NewFakeWebAPI()
} else {
service = api.NewWebAPI(ds, sls, alss, cmds, t.Logger)
}
opts := []rpc.Option{
rpc.WithPort(s.webAPIPort),
rpc.WithGracePeriod(s.gracePeriod),
rpc.WithLogger(t.Logger),
rpc.WithJWTAuthUnaryInterceptor(verifier, webservice.NewRBACAuthorizer(), t.Logger),
rpc.WithRequestValidationUnaryInterceptor(),
}
if s.tls {
opts = append(opts, rpc.WithTLS(s.certFile, s.keyFile))
}
if s.enableGRPCReflection {
opts = append(opts, rpc.WithGRPCReflection())
}
webAPIServer = rpc.NewServer(service, opts...)
group.Go(func() error {
return webAPIServer.Run(ctx)
})
}
// Start an http server for handling incoming http requests such as auth callbacks or webhook events.
{
signer, err := jwt.NewSigner(defaultSigningMethod, s.tokenSigningKeyFile)
if err != nil {
t.Logger.Error("failed to create a new signer", zap.Error(err))
return err
}
mux := http.NewServeMux()
httpServer := &http.Server{
Addr: fmt.Sprintf(":%d", s.httpPort),
Handler: mux,
}
handlers := []httpHandler{
authhandler.NewHandler(signer, cfg.APIURL, cfg.StateKey, datastore.NewProjectStore(ds), t.Logger),
}
for _, h := range handlers {
h.Register(mux.HandleFunc)
}
group.Go(func() error {
return runHTTPServer(ctx, httpServer, s.gracePeriod, t.Logger)
})
}
// Start running admin server.
{
var (
ver = []byte(version.Get().Version)
admin = admin.NewAdmin(s.adminPort, s.gracePeriod, t.Logger)
)
admin.HandleFunc("/version", func(w http.ResponseWriter, r *http.Request) {
w.Write(ver)
})
admin.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("ok"))
})
admin.Handle("/metrics", t.PrometheusMetricsHandler())
group.Go(func() error {
return admin.Run(ctx)
})
}
// Wait until all components have finished.
// A terminating signal or a finish of any components
// could trigger the finish of server.
// This ensures that all components are good or no one.
if err := group.Wait(); err != nil {
t.Logger.Error("failed while running", zap.Error(err))
return err
}
return nil
}
func runHTTPServer(ctx context.Context, httpServer *http.Server, gracePeriod time.Duration, logger *zap.Logger) error {
doneCh := make(chan error, 1)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
logger.Info("start running http server")
if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
logger.Error("failed to listen and http server", zap.Error(err))
doneCh <- err
}
doneCh <- nil
}()
<-ctx.Done()
ctx, _ = context.WithTimeout(context.Background(), gracePeriod)
logger.Info("stopping http server")
if err := httpServer.Shutdown(ctx); err != nil {
logger.Error("failed to shutdown http server", zap.Error(err))
}
return <-doneCh
}
func (s *server) loadConfig() (*config.ControlPlaneSpec, error) {
cfg, err := config.LoadFromYAML(s.configFile)
if err != nil {
return nil, err
}
if cfg.Kind != config.KindControlPlane {
return nil, fmt.Errorf("wrong configuration kind for control-plane: %v", cfg.Kind)
}
return cfg.ControlPlaneSpec, nil
}
func (s *server) createDatastore(ctx context.Context, cfg *config.ControlPlaneSpec, logger *zap.Logger) (datastore.DataStore, error) {
switch cfg.Datastore.Type {
case model.DataStoreFirestore:
fsConfig := cfg.Datastore.FirestoreConfig
options := []firestore.Option{
firestore.WithCredentialsFile(fsConfig.CredentialsFile),
firestore.WithLogger(logger),
}
return firestore.NewFireStore(ctx, fsConfig.Project, fsConfig.Namespace, fsConfig.Environment, options...)
case model.DataStoreDynamoDB:
return nil, errors.New("dynamodb is unimplemented yet")
case model.DataStoreMongoDB:
mdConfig := cfg.Datastore.MongoDBConfig
options := []mongodb.Option{
mongodb.WithLogger(logger),
}
return mongodb.NewMongoDB(ctx, mdConfig.URL, mdConfig.Database, options...)
default:
return nil, fmt.Errorf("unknown datastore type %q", cfg.Datastore.Type)
}
}
func (s *server) createFilestore(ctx context.Context, cfg *config.ControlPlaneSpec, logger *zap.Logger) (filestore.Store, error) {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
if cfg.Filestore.GCSConfig != nil {
gcsConfig := cfg.Filestore.GCSConfig
options := []gcs.Option{
gcs.WithLogger(logger),
}
if gcsConfig.CredentialsFile != "" {
options = append(options, gcs.WithCredentialsFile(gcsConfig.CredentialsFile))
}
client, err := gcs.NewStore(ctx, gcsConfig.Bucket, options...)
if err != nil {
return nil, err
}
return client, nil
}
if cfg.Filestore.S3Config != nil {
return nil, errors.New("s3 is unimplemented now")
}
//return nil, errors.New("filestore configuration is invalid")
return nil, nil
}
| 1 | 9,533 | This should be in the last group. | pipe-cd-pipe | go |
@@ -106,6 +106,7 @@ namespace Datadog.Trace.Agent
{
// stop retrying
Log.Error(exception, "An error occurred while sending traces to the agent at {0}", _tracesEndpoint);
+ _statsd?.Send();
return false;
}
| 1 | using System;
using System.Collections.Generic;
using System.Net.Sockets;
using System.Threading.Tasks;
using Datadog.Trace.Agent.MessagePack;
using Datadog.Trace.DogStatsd;
using Datadog.Trace.Logging;
using Datadog.Trace.PlatformHelpers;
using Datadog.Trace.Vendors.Newtonsoft.Json;
using Datadog.Trace.Vendors.StatsdClient;
namespace Datadog.Trace.Agent
{
internal class Api : IApi
{
private const string TracesPath = "/v0.4/traces";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.For<Api>();
private readonly IApiRequestFactory _apiRequestFactory;
private readonly IStatsd _statsd;
private readonly FormatterResolverWrapper _formatterResolver = new FormatterResolverWrapper(SpanFormatterResolver.Instance);
private readonly string _containerId;
private readonly FrameworkDescription _frameworkDescription;
private Uri _tracesEndpoint; // The Uri may be reassigned dynamically so that retry attempts may attempt updated Agent ports
public Api(Uri baseEndpoint, IApiRequestFactory apiRequestFactory, IStatsd statsd)
{
Log.Debug("Creating new Api");
_tracesEndpoint = new Uri(baseEndpoint, TracesPath);
_statsd = statsd;
_containerId = ContainerMetadata.GetContainerId();
_apiRequestFactory = apiRequestFactory ?? new ApiWebRequestFactory();
// report runtime details
try
{
_frameworkDescription = FrameworkDescription.Create();
if (_frameworkDescription != null)
{
Log.Information(_frameworkDescription.ToString());
}
}
catch (Exception e)
{
Log.SafeLogError(e, "Error getting framework description");
}
}
public void SetBaseEndpoint(Uri baseEndpoint)
{
_tracesEndpoint = new Uri(baseEndpoint, TracesPath);
}
public async Task<bool> SendTracesAsync(Span[][] traces)
{
// retry up to 5 times with exponential back-off
var retryLimit = 5;
var retryCount = 1;
var sleepDuration = 100; // in milliseconds
var traceIds = GetUniqueTraceIds(traces);
while (true)
{
var request = _apiRequestFactory.Create(_tracesEndpoint);
// Set additional headers
request.AddHeader(AgentHttpHeaderNames.TraceCount, traceIds.Count.ToString());
if (_frameworkDescription != null)
{
request.AddHeader(AgentHttpHeaderNames.LanguageInterpreter, _frameworkDescription.Name);
request.AddHeader(AgentHttpHeaderNames.LanguageVersion, _frameworkDescription.ProductVersion);
}
if (_containerId != null)
{
request.AddHeader(AgentHttpHeaderNames.ContainerId, _containerId);
}
bool success = false;
Exception exception = null;
try
{
success = await SendTracesAsync(traces, request).ConfigureAwait(false);
}
catch (Exception ex)
{
#if DEBUG
if (ex.InnerException is InvalidOperationException ioe)
{
Log.Error(ex, "An error occurred while sending traces to the agent at {0}", _tracesEndpoint);
return false;
}
#endif
exception = ex;
}
// Error handling block
if (!success)
{
// Exit if we've hit our retry limit
if (retryCount >= retryLimit)
{
// stop retrying
Log.Error(exception, "An error occurred while sending traces to the agent at {0}", _tracesEndpoint);
return false;
}
// Before retry delay
bool isSocketException = false;
Exception innerException = exception;
while (innerException != null)
{
if (innerException is SocketException)
{
isSocketException = true;
break;
}
innerException = innerException.InnerException;
}
if (isSocketException)
{
Log.Error(exception, "Unable to communicate with the trace agent at {0}", _tracesEndpoint);
TracingProcessManager.TryForceTraceAgentRefresh();
}
// Execute retry delay
await Task.Delay(sleepDuration).ConfigureAwait(false);
retryCount++;
sleepDuration *= 2;
// After retry delay
if (isSocketException)
{
// Ensure we have the most recent port before trying again
TracingProcessManager.TraceAgentMetadata.ForcePortFileRead();
}
continue;
}
_statsd?.Send();
return true;
}
}
private static HashSet<ulong> GetUniqueTraceIds(Span[][] traces)
{
var uniqueTraceIds = new HashSet<ulong>();
foreach (var trace in traces)
{
foreach (var span in trace)
{
uniqueTraceIds.Add(span.TraceId);
}
}
return uniqueTraceIds;
}
private async Task<bool> SendTracesAsync(Span[][] traces, IApiRequest request)
{
IApiResponse response = null;
try
{
try
{
_statsd?.AppendIncrementCount(TracerMetricNames.Api.Requests);
response = await request.PostAsync(traces, _formatterResolver).ConfigureAwait(false);
}
catch
{
// count only network/infrastructure errors, not valid responses with error status codes
// (which are handled below)
_statsd?.AppendIncrementCount(TracerMetricNames.Api.Errors);
throw;
}
if (_statsd != null)
{
// don't bother creating the tags array if trace metrics are disabled
string[] tags = { $"status:{response.StatusCode}" };
// count every response, grouped by status code
_statsd.AppendIncrementCount(TracerMetricNames.Api.Responses, tags: tags);
}
// Attempt a retry if the status code is not SUCCESS
if (response.StatusCode < 200 || response.StatusCode >= 300)
{
return false;
}
try
{
if (response.ContentLength > 0 && Tracer.Instance.Sampler != null)
{
var responseContent = await response.ReadAsStringAsync().ConfigureAwait(false);
var apiResponse = JsonConvert.DeserializeObject<ApiResponse>(responseContent);
Tracer.Instance.Sampler.SetDefaultSampleRates(apiResponse?.RateByService);
}
}
catch (Exception ex)
{
Log.Error(ex, "Traces sent successfully to the Agent at {0}, but an error occurred deserializing the response.", _tracesEndpoint);
}
}
finally
{
response?.Dispose();
}
return true;
}
internal class ApiResponse
{
[JsonProperty("rate_by_service")]
public Dictionary<string, float> RateByService { get; set; }
}
}
}
| 1 | 17,592 | I believe you should either remove this or rename the PR. You can't change the behavior of the tracer (even to fix a bug) in a PR named "unit test improvements" | DataDog-dd-trace-dotnet | .cs |
@@ -0,0 +1,8 @@
+// +k8s:deepcopy-gen=package,register
+// +k8s:conversion-gen=github.com/munnerz/cert-manager/pkg/apis/certmanager
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// Package v1alpha1 is the v1alpha1 version of the API.
+// +groupName=certmanager.k8s.io
+package v1alpha1 | 1 | 1 | 10,629 | I've gone with this group name, however I'm open to suggestions on alternatives! | jetstack-cert-manager | go |
|
@@ -32,6 +32,7 @@ public class RpcApis {
public static final RpcApi TX_POOL = new RpcApi("TXPOOL");
public static final RpcApi TRACE = new RpcApi("TRACE");
public static final RpcApi PLUGINS = new RpcApi("PLUGINS");
+ public static final RpcApi QUORUM = new RpcApi("QUORUM");
public static final List<RpcApi> DEFAULT_JSON_RPC_APIS = Arrays.asList(ETH, NET, WEB3);
| 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.api.jsonrpc;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
public class RpcApis {
public static final RpcApi ETH = new RpcApi("ETH");
public static final RpcApi DEBUG = new RpcApi("DEBUG");
public static final RpcApi MINER = new RpcApi("MINER");
public static final RpcApi NET = new RpcApi("NET");
public static final RpcApi PERM = new RpcApi("PERM");
public static final RpcApi WEB3 = new RpcApi("WEB3");
public static final RpcApi ADMIN = new RpcApi("ADMIN");
public static final RpcApi EEA = new RpcApi("EEA");
public static final RpcApi PRIV = new RpcApi("PRIV");
public static final RpcApi TX_POOL = new RpcApi("TXPOOL");
public static final RpcApi TRACE = new RpcApi("TRACE");
public static final RpcApi PLUGINS = new RpcApi("PLUGINS");
public static final List<RpcApi> DEFAULT_JSON_RPC_APIS = Arrays.asList(ETH, NET, WEB3);
public static Optional<RpcApi> valueOf(final String name) {
if (name.equals(ETH.getCliValue())) {
return Optional.of(ETH);
} else if (name.equals(DEBUG.getCliValue())) {
return Optional.of(DEBUG);
} else if (name.equals(MINER.getCliValue())) {
return Optional.of(MINER);
} else if (name.equals(NET.getCliValue())) {
return Optional.of(NET);
} else if (name.equals(PERM.getCliValue())) {
return Optional.of(PERM);
} else if (name.equals(WEB3.getCliValue())) {
return Optional.of(WEB3);
} else if (name.equals(ADMIN.getCliValue())) {
return Optional.of(ADMIN);
} else if (name.equals(EEA.getCliValue())) {
return Optional.of(EEA);
} else if (name.equals(PRIV.getCliValue())) {
return Optional.of(PRIV);
} else if (name.equals(TX_POOL.getCliValue())) {
return Optional.of(TX_POOL);
} else if (name.equals(TRACE.getCliValue())) {
return Optional.of(TRACE);
} else if (name.equals(PLUGINS.getCliValue())) {
return Optional.of(PLUGINS);
} else {
return Optional.empty();
}
}
public static String getValue(final RpcApi rpcapi) {
return rpcapi.getCliValue();
}
}
| 1 | 24,284 | should this be GOQUORUM | hyperledger-besu | java |
@@ -49,7 +49,7 @@ type (
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
// Validate for the byzantine node uses the actual block validator and returns the opposite
-func (v *byzVal) Validate(blk *blockchain.Block, tipHeight uint64, tipHash hash.Hash32B) error {
+func (v *byzVal) Validate(blk *blockchain.Block, tipHeight uint64, tipHash hash.Hash32B, checkCoinbase bool) error {
//err := v.val.Validate(blk, tipHeight, tipHash)
//if err != nil {
// return nil | 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package sim
import (
"context"
"encoding/hex"
"flag"
"fmt"
"net"
"os"
"runtime/pprof"
"strconv"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"github.com/iotexproject/iotex-core/actpool"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
pb "github.com/iotexproject/iotex-core/consensus/sim/proto"
"github.com/iotexproject/iotex-core/iotxaddress"
"github.com/iotexproject/iotex-core/logger"
"github.com/iotexproject/iotex-core/network"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/iotexproject/iotex-core/pkg/keypair"
)
const (
port = ":50051"
dummyMsgType = 1999
)
// server is used to implement message.SimulatorServer.
type (
server struct {
nodes []Sim // slice of Consensus objects
}
byzVal struct {
val blockchain.Validator
}
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
// Validate for the byzantine node uses the actual block validator and returns the opposite
func (v *byzVal) Validate(blk *blockchain.Block, tipHeight uint64, tipHash hash.Hash32B) error {
//err := v.val.Validate(blk, tipHeight, tipHash)
//if err != nil {
// return nil
//}
//return errors.New("")
return nil
}
// Ping implements simulator.SimulatorServer
func (s *server) Init(in *pb.InitRequest, stream pb.Simulator_InitServer) error {
nPlayers := in.NBF + in.NFS + in.NHonest
ctx := context.Background()
for i := 0; i < int(nPlayers); i++ {
cfg := config.Default
// s.nodes = make([]consensus.Sim, in.NPlayers)
// allocate all the necessary space now because otherwise nodes will get copied and create pointer issues
cfg.Consensus.Scheme = config.RollDPoSScheme
cfg.Consensus.RollDPoS.DelegateInterval = time.Millisecond
cfg.Consensus.RollDPoS.ProposerInterval = 0
cfg.Consensus.RollDPoS.UnmatchedEventTTL = 1000 * time.Second
cfg.Consensus.RollDPoS.RoundStartTTL = 1000 * time.Second
cfg.Consensus.RollDPoS.AcceptProposeTTL = 1000 * time.Second
cfg.Consensus.RollDPoS.AcceptPrevoteTTL = 1000 * time.Second
cfg.Consensus.RollDPoS.AcceptVoteTTL = 1000 * time.Second
// handle node address, delegate addresses, etc.
cfg.Network.Host = "127.0.0.1"
cfg.Network.Port = 10000
cfg.Network.NumPeersLowerBound = 6
cfg.Network.NumPeersUpperBound = 12
// create public/private key pair and address
addr, err := iotxaddress.NewAddress(iotxaddress.IsTestnet, iotxaddress.ChainID)
if err != nil {
logger.Error().Err(err).Msg("failed to create public/private key pair together with the address derived.")
}
cfg.Chain.ProducerPrivKey = keypair.EncodePrivateKey(addr.PrivateKey)
cfg.Chain.ProducerPubKey = keypair.EncodePublicKey(addr.PublicKey)
// set chain database path
cfg.Chain.ChainDBPath = "./chain" + strconv.Itoa(i) + ".db"
cfg.Chain.TrieDBPath = "./trie" + strconv.Itoa(i) + ".db"
bc := blockchain.NewBlockchain(&cfg, blockchain.DefaultStateFactoryOption(), blockchain.BoltDBDaoOption())
if err := bc.Start(ctx); err != nil {
logger.Panic().Err(err).Msg("error when starting blockchain")
}
if i >= int(in.NFS+in.NHonest) { // is byzantine node
val := bc.Validator()
byzVal := &byzVal{val: val}
bc.SetValidator(byzVal)
}
overlay := network.NewOverlay(&cfg.Network)
ap, err := actpool.NewActPool(bc, cfg.ActPool)
if err != nil {
logger.Fatal().Err(err).Msg("Fail to create actpool")
}
var node Sim
if i < int(in.NHonest) {
node = NewSim(&cfg, bc, ap, overlay)
} else if i < int(in.NHonest+in.NFS) {
s.nodes = append(s.nodes, nil)
continue
} else {
node = NewSimByzantine(&cfg, bc, ap, overlay)
}
s.nodes = append(s.nodes, node)
done := make(chan bool)
node.SetDoneStream(done)
err = node.Start(ctx)
if err != nil {
logger.Fatal().Err(err).Msg("Fail to start node")
}
fmt.Printf("Node %d initialized and consensus engine started\n", i)
time.Sleep(2 * time.Millisecond)
<-done
fmt.Printf("Node %d initialization ended\n", i)
//s.nodes = append(s.nodes, node)
}
for i := 0; i < int(in.NFS); i++ {
s.nodes = append(s.nodes, nil)
}
fmt.Printf("Simulator initialized with %d players\n", nPlayers)
return nil
}
// Ping implements simulator.SimulatorServer
func (s *server) Ping(in *pb.Request, stream pb.Simulator_PingServer) error {
fmt.Println()
fmt.Printf("Node %d pinged; opened message stream\n", in.PlayerID)
msgValue, err := hex.DecodeString(in.Value)
if err != nil {
logger.Error().Msg("Could not decode message value into byte array")
}
done := make(chan bool)
s.nodes[in.PlayerID].SetStream(&stream)
s.nodes[in.PlayerID].SendUnsent()
// message type of 1999 means that it's a dummy message to allow the engine to pass back proposed blocks
if in.InternalMsgType != dummyMsgType {
msg := CombineMsg(in.InternalMsgType, msgValue)
err = s.nodes[in.PlayerID].HandleViewChange(msg, done)
if err != nil {
logger.Error().Err(err).Msg("failed to handle view change")
}
time.Sleep(2 * time.Millisecond)
<-done // wait until done
}
fmt.Println("closed message stream")
return nil
}
func (s *server) Exit(context context.Context, in *pb.Empty) (*pb.Empty, error) {
defer os.Exit(0)
defer pprof.StopCPUProfile()
return &pb.Empty{}, nil
}
func main() {
flag.Parse()
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
logger.Fatal().Err(err).Msg("failed to create file")
}
err = pprof.StartCPUProfile(f)
if err != nil {
logger.Fatal().Err(err).Msg("failed to start CPU profile")
}
}
lis, err := net.Listen("tcp", port)
if err != nil {
logger.Fatal().Err(err).Msg("failed to listen")
}
s := grpc.NewServer()
pb.RegisterSimulatorServer(s, &server{})
// Register reflection service on gRPC server.
reflection.Register(s)
if err := s.Serve(lis); err != nil {
logger.Fatal().Err(err).Msg("failed to serve")
}
}
| 1 | 12,154 | I think checkCoinbase is not skipping checking coinbase. Instead, true-> checking num(coinbase tx) = 1, false -> checking num(coinbase tx) = 0. | iotexproject-iotex-core | go |
@@ -24,14 +24,13 @@ import (
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/state"
- "github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_chainmanager"
"github.com/iotexproject/iotex-election/test/mock/mock_committee"
"github.com/iotexproject/iotex-election/types"
)
-func initConstruct(t *testing.T) (Protocol, context.Context, factory.WorkingSet, *types.ElectionResult) {
+func initConstruct(t *testing.T) (Protocol, context.Context, protocol.StateManager, *types.ElectionResult) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish() | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package poll
import (
"context"
"math/big"
"strings"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/require"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/vote/candidatesutil"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/test/mock/mock_chainmanager"
"github.com/iotexproject/iotex-election/test/mock/mock_committee"
"github.com/iotexproject/iotex-election/types"
)
func initConstruct(t *testing.T) (Protocol, context.Context, factory.WorkingSet, *types.ElectionResult) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
cfg := config.Default
ctx := protocol.WithRunActionsCtx(
context.Background(),
protocol.RunActionsCtx{
BlockHeight: 123456,
},
)
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
require.NoError(err)
require.NoError(sf.Start(ctx))
defer func() {
require.NoError(sf.Stop(ctx))
}()
ws, err := sf.NewWorkingSet()
require.NoError(err)
committee := mock_committee.NewMockCommittee(ctrl)
r := types.NewElectionResultForTest(time.Now())
committee.EXPECT().ResultByHeight(uint64(123456)).Return(r, nil).AnyTimes()
committee.EXPECT().HeightByTime(gomock.Any()).Return(uint64(123456), nil).AnyTimes()
p, err := NewGovernanceChainCommitteeProtocol(
nil,
committee,
uint64(123456),
func(uint64) (time.Time, error) { return time.Now(), nil },
func(uint64) uint64 { return 1 },
func(uint64) uint64 { return 1 },
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Chain.PollInitialCandidatesInterval,
)
require.NoError(err)
return p, ctx, ws, r
}
func TestInitialize(t *testing.T) {
require := require.New(t)
p, ctx, ws, r := initConstruct(t)
require.NoError(p.Initialize(ctx, ws))
var sc state.CandidateList
require.NoError(ws.State(candidatesutil.ConstructKey(1), &sc))
candidates, err := state.CandidatesToMap(sc)
require.NoError(err)
require.Equal(2, len(candidates))
for _, d := range r.Delegates() {
operator := string(d.OperatorAddress())
addr, err := address.FromString(operator)
require.NoError(err)
c, ok := candidates[hash.BytesToHash160(addr.Bytes())]
require.True(ok)
require.Equal(addr.String(), c.Address)
}
}
func TestHandle(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
sm := mock_chainmanager.NewMockStateManager(ctrl)
cb := db.NewCachedBatch()
sm.EXPECT().GetCachedBatch().Return(cb).AnyTimes()
sm.EXPECT().State(gomock.Any(), gomock.Any()).Return(state.ErrStateNotExist).AnyTimes()
sm.EXPECT().PutState(gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
sm.EXPECT().Snapshot().Return(1).AnyTimes()
p, ctx, ws, _ := initConstruct(t)
require.NoError(p.Initialize(ctx, ws))
// wrong action
recipientAddr := identityset.Address(28)
senderKey := identityset.PrivateKey(27)
tsf, err := action.NewTransfer(0, big.NewInt(10), recipientAddr.String(), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(tsf).Build()
selp, err := action.Sign(elp, senderKey)
require.NoError(err)
require.NotNil(selp)
// Case 1: wrong action type
receipt, err := p.Handle(ctx, selp.Action(), nil)
require.NoError(err)
require.Nil(receipt)
// Case 2: right action type,setCandidates error
var sc state.CandidateList
require.NoError(ws.State(candidatesutil.ConstructKey(1), &sc))
act := action.NewPutPollResult(1, 123456, sc)
elp = bd.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act).Build()
selp, err = action.Sign(elp, senderKey)
require.NoError(err)
require.NotNil(selp)
receipt, err = p.Handle(ctx, selp.Action(), sm)
require.Error(err)
require.Nil(receipt)
// Case 3: all right
p3, ctx3, ws3, _ := initConstruct(t)
require.NoError(p3.Initialize(ctx3, ws3))
var sc3 state.CandidateList
require.NoError(ws3.State(candidatesutil.ConstructKey(1), &sc3))
act3 := action.NewPutPollResult(1, 1, sc3)
elp = bd.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act3).Build()
selp3, err := action.Sign(elp, senderKey)
require.NoError(err)
require.NotNil(selp3)
receipt, err = p.Handle(ctx3, selp3.Action(), sm)
require.NoError(err)
require.NotNil(receipt)
}
func TestProtocol_Validate(t *testing.T) {
require := require.New(t)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
p, ctx, ws, _ := initConstruct(t)
require.NoError(p.Initialize(ctx, ws))
// wrong action
recipientAddr := identityset.Address(28)
senderKey := identityset.PrivateKey(27)
tsf, err := action.NewTransfer(0, big.NewInt(10), recipientAddr.String(), []byte{}, uint64(100000), big.NewInt(10))
require.NoError(err)
bd := &action.EnvelopeBuilder{}
elp := bd.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(tsf).Build()
selp, err := action.Sign(elp, senderKey)
require.NoError(err)
require.NotNil(selp)
// Case 1: wrong action type
require.NoError(p.Validate(ctx, selp.Action()))
// Case 2: Only producer could create this protocol
p2, ctx2, ws2, _ := initConstruct(t)
require.NoError(p2.Initialize(ctx2, ws2))
var sc2 state.CandidateList
require.NoError(ws2.State(candidatesutil.ConstructKey(1), &sc2))
act2 := action.NewPutPollResult(1, 1, sc2)
elp = bd.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act2).Build()
selp2, err := action.Sign(elp, senderKey)
require.NoError(err)
require.NotNil(selp2)
caller, err := address.FromBytes(selp.SrcPubkey().Hash())
require.NoError(err)
ctx2 = protocol.WithValidateActionsCtx(
context.Background(),
protocol.ValidateActionsCtx{
BlockHeight: 1,
ProducerAddr: recipientAddr.String(),
Caller: caller,
},
)
err = p.Validate(ctx2, selp2.Action())
require.True(strings.Contains(err.Error(), "Only producer could create this protocol"))
// Case 3: duplicate candidate
p3, ctx3, ws3, _ := initConstruct(t)
require.NoError(p3.Initialize(ctx3, ws3))
var sc3 state.CandidateList
require.NoError(ws3.State(candidatesutil.ConstructKey(1), &sc3))
sc3 = append(sc3, &state.Candidate{"1", big.NewInt(10), "2", nil})
sc3 = append(sc3, &state.Candidate{"1", big.NewInt(10), "2", nil})
act3 := action.NewPutPollResult(1, 1, sc3)
elp = bd.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act3).Build()
selp3, err := action.Sign(elp, senderKey)
require.NoError(err)
require.NotNil(selp3)
ctx3 = protocol.WithValidateActionsCtx(
context.Background(),
protocol.ValidateActionsCtx{
BlockHeight: 1,
ProducerAddr: identityset.Address(27).String(),
Caller: caller,
},
)
err = p.Validate(ctx3, selp3.Action())
require.True(strings.Contains(err.Error(), "duplicate candidate"))
// Case 4: delegate's length is not equal
p4, ctx4, ws4, _ := initConstruct(t)
require.NoError(p4.Initialize(ctx4, ws4))
var sc4 state.CandidateList
require.NoError(ws4.State(candidatesutil.ConstructKey(1), &sc4))
sc4 = append(sc4, &state.Candidate{"1", big.NewInt(10), "2", nil})
act4 := action.NewPutPollResult(1, 1, sc4)
bd4 := &action.EnvelopeBuilder{}
elp4 := bd4.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act4).Build()
selp4, err := action.Sign(elp4, senderKey)
require.NoError(err)
require.NotNil(selp4)
ctx4 = protocol.WithValidateActionsCtx(
context.Background(),
protocol.ValidateActionsCtx{
BlockHeight: 1,
ProducerAddr: identityset.Address(27).String(),
Caller: caller,
},
)
err = p4.Validate(ctx4, selp4.Action())
require.True(strings.Contains(err.Error(), "the proposed delegate list length"))
// Case 5: candidate's vote is not equal
p5, ctx5, ws5, _ := initConstruct(t)
require.NoError(p5.Initialize(ctx5, ws5))
var sc5 state.CandidateList
require.NoError(ws5.State(candidatesutil.ConstructKey(1), &sc5))
sc5[0].Votes = big.NewInt(10)
act5 := action.NewPutPollResult(1, 1, sc5)
bd5 := &action.EnvelopeBuilder{}
elp5 := bd5.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act5).Build()
selp5, err := action.Sign(elp5, senderKey)
require.NoError(err)
require.NotNil(selp5)
ctx5 = protocol.WithValidateActionsCtx(
context.Background(),
protocol.ValidateActionsCtx{
BlockHeight: 1,
ProducerAddr: identityset.Address(27).String(),
Caller: caller,
},
)
err = p5.Validate(ctx5, selp5.Action())
require.True(strings.Contains(err.Error(), "delegates are not as expected"))
// Case 6: all good
p6, ctx6, ws6, _ := initConstruct(t)
require.NoError(p6.Initialize(ctx6, ws6))
var sc6 state.CandidateList
require.NoError(ws6.State(candidatesutil.ConstructKey(1), &sc6))
act6 := action.NewPutPollResult(1, 1, sc6)
bd6 := &action.EnvelopeBuilder{}
elp6 := bd6.SetGasLimit(uint64(100000)).
SetGasPrice(big.NewInt(10)).
SetAction(act6).Build()
selp6, err := action.Sign(elp6, senderKey)
require.NoError(err)
require.NotNil(selp6)
caller6, err := address.FromBytes(selp6.SrcPubkey().Hash())
require.NoError(err)
ctx6 = protocol.WithValidateActionsCtx(
context.Background(),
protocol.ValidateActionsCtx{
BlockHeight: 1,
ProducerAddr: identityset.Address(27).String(),
Caller: caller6,
},
)
err = p6.Validate(ctx6, selp6.Action())
require.NoError(err)
}
| 1 | 19,623 | same here, move it out | iotexproject-iotex-core | go |
@@ -42,11 +42,17 @@
//@HEADER
*/
+#ifndef KOKKOS_TOOLS_INDEPENDENT_BUILD
#include <Kokkos_Macros.hpp>
#include <Kokkos_Tuners.hpp>
+#endif
+
#include <impl/Kokkos_Profiling.hpp>
-#if defined(KOKKOS_ENABLE_LIBDL)
+#include <impl/Kokkos_Profiling_Interface.hpp>
+
+#if defined(KOKKOS_ENABLE_LIBDL) || defined(KOKKOS_TOOLS_INDEPENDENT_BUILD)
#include <dlfcn.h>
+#define KOKKOS_TOOLS_ENABLE_LIBDL
#endif
#include <algorithm> | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Macros.hpp>
#include <Kokkos_Tuners.hpp>
#include <impl/Kokkos_Profiling.hpp>
#if defined(KOKKOS_ENABLE_LIBDL)
#include <dlfcn.h>
#endif
#include <algorithm>
#include <array>
#include <cstring>
#include <iostream>
#include <memory>
#include <stack>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace Kokkos {
namespace Tools {
namespace Experimental {
namespace Impl {
void tool_invoked_fence(const uint32_t /* devID */) {
/**
* Currently the function ignores the device ID,
* Eventually we want to support fencing only
* a given stream/resource
*/
Kokkos::fence(
"Kokkos::Tools::Experimental::Impl::tool_invoked_fence: Tool Requested "
"Fence");
}
} // namespace Impl
#ifdef KOKKOS_ENABLE_TUNING
static size_t kernel_name_context_variable_id;
static size_t kernel_type_context_variable_id;
static std::unordered_map<size_t, std::unordered_set<size_t>>
features_per_context;
static std::unordered_set<size_t> active_features;
static std::unordered_map<size_t, VariableValue> feature_values;
static std::unordered_map<size_t, VariableInfo> variable_metadata;
#endif
static EventSet current_callbacks;
static EventSet backup_callbacks;
static EventSet no_profiling;
static Kokkos::Tools::Experimental::ToolSettings tool_requirements;
bool eventSetsEqual(const EventSet& l, const EventSet& r) {
return l.init == r.init && l.finalize == r.finalize &&
l.parse_args == r.parse_args && l.print_help == r.print_help &&
l.begin_parallel_for == r.begin_parallel_for &&
l.end_parallel_for == r.end_parallel_for &&
l.begin_parallel_reduce == r.begin_parallel_reduce &&
l.end_parallel_reduce == r.end_parallel_reduce &&
l.begin_parallel_scan == r.begin_parallel_scan &&
l.end_parallel_scan == r.end_parallel_scan &&
l.push_region == r.push_region && l.pop_region == r.pop_region &&
l.allocate_data == r.allocate_data &&
l.deallocate_data == r.deallocate_data &&
l.create_profile_section == r.create_profile_section &&
l.start_profile_section == r.start_profile_section &&
l.stop_profile_section == r.stop_profile_section &&
l.destroy_profile_section == r.destroy_profile_section &&
l.profile_event == r.profile_event &&
l.begin_deep_copy == r.begin_deep_copy &&
l.end_deep_copy == r.end_deep_copy && l.begin_fence == r.begin_fence &&
l.end_fence == r.end_fence && l.sync_dual_view == r.sync_dual_view &&
l.modify_dual_view == r.modify_dual_view &&
l.declare_metadata == r.declare_metadata &&
l.request_tool_settings == r.request_tool_settings &&
l.provide_tool_programming_interface ==
r.provide_tool_programming_interface &&
l.declare_input_type == r.declare_input_type &&
l.declare_output_type == r.declare_output_type &&
l.end_tuning_context == r.end_tuning_context &&
l.begin_tuning_context == r.begin_tuning_context &&
l.request_output_values == r.request_output_values &&
l.declare_optimization_goal == r.declare_optimization_goal;
}
enum class MayRequireGlobalFencing : bool { No, Yes };
template <typename Callback, typename... Args>
inline void invoke_kokkosp_callback(
MayRequireGlobalFencing may_require_global_fencing,
const Callback& callback, Args&&... args) {
if (callback != nullptr) {
// two clause if statement
// may_require_global_fencing: "if this callback ever needs a fence", AND
// if the tool requires global fencing (default true, but tools can
// overwrite)
if (may_require_global_fencing == MayRequireGlobalFencing::Yes &&
(Kokkos::Tools::Experimental::tool_requirements
.requires_global_fencing)) {
Kokkos::fence(
"Kokkos::Tools::invoke_kokkosp_callback: Kokkos Profile Tool Fence");
}
(*callback)(std::forward<Args>(args)...);
}
}
} // namespace Experimental
bool profileLibraryLoaded() {
return !Experimental::eventSetsEqual(Experimental::current_callbacks,
Experimental::no_profiling);
}
void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
uint64_t* kernelID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.begin_parallel_for, kernelPrefix.c_str(),
devID, kernelID);
#ifdef KOKKOS_ENABLE_TUNING
if (Kokkos::tune_internals()) {
auto context_id = Experimental::get_new_context_id();
Experimental::begin_context(context_id);
Experimental::VariableValue contextValues[] = {
Experimental::make_variable_value(
Experimental::kernel_name_context_variable_id, kernelPrefix),
Experimental::make_variable_value(
Experimental::kernel_type_context_variable_id, "parallel_for")};
Experimental::set_input_values(context_id, 2, contextValues);
}
#endif
}
void endParallelFor(const uint64_t kernelID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.end_parallel_for, kernelID);
#ifdef KOKKOS_ENABLE_TUNING
if (Kokkos::tune_internals()) {
Experimental::end_context(Experimental::get_current_context_id());
}
#endif
}
void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
uint64_t* kernelID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.begin_parallel_scan, kernelPrefix.c_str(),
devID, kernelID);
#ifdef KOKKOS_ENABLE_TUNING
if (Kokkos::tune_internals()) {
auto context_id = Experimental::get_new_context_id();
Experimental::begin_context(context_id);
Experimental::VariableValue contextValues[] = {
Experimental::make_variable_value(
Experimental::kernel_name_context_variable_id, kernelPrefix),
Experimental::make_variable_value(
Experimental::kernel_type_context_variable_id, "parallel_for")};
Experimental::set_input_values(context_id, 2, contextValues);
}
#endif
}
void endParallelScan(const uint64_t kernelID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.end_parallel_scan, kernelID);
#ifdef KOKKOS_ENABLE_TUNING
if (Kokkos::tune_internals()) {
Experimental::end_context(Experimental::get_current_context_id());
}
#endif
}
void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
uint64_t* kernelID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.begin_parallel_reduce,
kernelPrefix.c_str(), devID, kernelID);
#ifdef KOKKOS_ENABLE_TUNING
if (Kokkos::tune_internals()) {
auto context_id = Experimental::get_new_context_id();
Experimental::begin_context(context_id);
Experimental::VariableValue contextValues[] = {
Experimental::make_variable_value(
Experimental::kernel_name_context_variable_id, kernelPrefix),
Experimental::make_variable_value(
Experimental::kernel_type_context_variable_id, "parallel_for")};
Experimental::set_input_values(context_id, 2, contextValues);
}
#endif
}
void endParallelReduce(const uint64_t kernelID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.end_parallel_reduce, kernelID);
#ifdef KOKKOS_ENABLE_TUNING
if (Kokkos::tune_internals()) {
Experimental::end_context(Experimental::get_current_context_id());
}
#endif
}
void pushRegion(const std::string& kName) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.push_region, kName.c_str());
}
void popRegion() {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::Yes,
Experimental::current_callbacks.pop_region);
}
void allocateData(const SpaceHandle space, const std::string label,
const void* ptr, const uint64_t size) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.allocate_data, space, label.c_str(), ptr,
size);
}
void deallocateData(const SpaceHandle space, const std::string label,
const void* ptr, const uint64_t size) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.deallocate_data, space, label.c_str(),
ptr, size);
}
void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
const void* dst_ptr, const SpaceHandle src_space,
const std::string src_label, const void* src_ptr,
const uint64_t size) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.begin_deep_copy, dst_space,
dst_label.c_str(), dst_ptr, src_space, src_label.c_str(), src_ptr, size);
#ifdef KOKKOS_ENABLE_TUNING
if (Experimental::current_callbacks.begin_deep_copy != nullptr) {
if (Kokkos::tune_internals()) {
auto context_id = Experimental::get_new_context_id();
Experimental::begin_context(context_id);
Experimental::VariableValue contextValues[] = {
Experimental::make_variable_value(
Experimental::kernel_name_context_variable_id,
"deep_copy_kernel"),
Experimental::make_variable_value(
Experimental::kernel_type_context_variable_id, "deep_copy")};
Experimental::set_input_values(context_id, 2, contextValues);
}
}
#endif
}
void endDeepCopy() {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.end_deep_copy);
#ifdef KOKKOS_ENABLE_TUNING
if (Experimental::current_callbacks.end_deep_copy != nullptr) {
if (Kokkos::tune_internals()) {
Experimental::end_context(Experimental::get_current_context_id());
}
}
#endif
}
void beginFence(const std::string name, const uint32_t deviceId,
uint64_t* handle) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.begin_fence, name.c_str(), deviceId,
handle);
}
void endFence(const uint64_t handle) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.end_fence, handle);
}
void createProfileSection(const std::string& sectionName, uint32_t* secID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.create_profile_section,
sectionName.c_str(), secID);
}
void startSection(const uint32_t secID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.start_profile_section, secID);
}
void stopSection(const uint32_t secID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.stop_profile_section, secID);
}
void destroyProfileSection(const uint32_t secID) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.destroy_profile_section, secID);
}
void markEvent(const std::string& eventName) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.profile_event, eventName.c_str());
}
bool printHelp(const std::string& args) {
if (Experimental::current_callbacks.print_help == nullptr) {
return false;
}
std::string arg0 = args.substr(0, args.find_first_of(' '));
const char* carg0 = arg0.c_str();
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.print_help, const_cast<char*>(carg0));
return true;
}
void parseArgs(int _argc, char** _argv) {
if (Experimental::current_callbacks.parse_args != nullptr && _argc > 0) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.parse_args, _argc, _argv);
}
}
void parseArgs(const std::string& args) {
if (Experimental::current_callbacks.parse_args == nullptr) {
return;
}
using strvec_t = std::vector<std::string>;
auto tokenize = [](const std::string& line, const std::string& delimiters) {
strvec_t _result{};
std::size_t _bidx = 0; // position that is the beginning of the new string
std::size_t _didx = 0; // position of the delimiter in the string
while (_bidx < line.length() && _didx < line.length()) {
// find the first character (starting at _didx) that is not a delimiter
_bidx = line.find_first_not_of(delimiters, _didx);
// if no more non-delimiter chars, done
if (_bidx == std::string::npos) break;
// starting at the position of the new string, find the next delimiter
_didx = line.find_first_of(delimiters, _bidx);
// starting at the position of the new string, get the characters
// between this position and the next delimiter
std::string _tmp = line.substr(_bidx, _didx - _bidx);
// don't add empty strings
if (!_tmp.empty()) _result.emplace_back(_tmp);
}
return _result;
};
auto vargs = tokenize(args, " \t");
if (vargs.size() == 0) return;
auto _argc = static_cast<int>(vargs.size());
char** _argv = new char*[_argc + 1];
_argv[vargs.size()] = nullptr;
for (int i = 0; i < _argc; ++i) {
auto& _str = vargs.at(i);
_argv[i] = new char[_str.length() + 1];
std::memcpy(_argv[i], _str.c_str(), _str.length() * sizeof(char));
_argv[i][_str.length()] = '\0';
}
parseArgs(_argc, _argv);
for (int i = 0; i < _argc; ++i) {
delete[] _argv[i];
}
delete[] _argv;
}
SpaceHandle make_space_handle(const char* space_name) {
SpaceHandle handle;
strncpy(handle.name, space_name, 63);
return handle;
}
template <typename Callback>
void lookup_function(void* dlopen_handle, const std::string& basename,
Callback& callback) {
#ifdef KOKKOS_ENABLE_LIBDL
// dlsym returns a pointer to an object, while we want to assign to
// pointer to function A direct cast will give warnings hence, we have to
// workaround the issue by casting pointer to pointers.
void* p = dlsym(dlopen_handle, basename.c_str());
callback = *reinterpret_cast<Callback*>(&p);
#endif
}
void initialize(const std::string& profileLibrary) {
// Make sure initialize calls happens only once
static int is_initialized = 0;
if (is_initialized) return;
is_initialized = 1;
auto invoke_init_callbacks = []() {
Experimental::invoke_kokkosp_callback(
Kokkos::Tools::Experimental::MayRequireGlobalFencing::No,
Kokkos::Tools::Experimental::current_callbacks.init, 0,
(uint64_t)KOKKOSP_INTERFACE_VERSION, (uint32_t)0, nullptr);
Experimental::tool_requirements.requires_global_fencing = true;
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.request_tool_settings, 1,
&Experimental::tool_requirements);
Experimental::ToolProgrammingInterface actions;
actions.fence = &Experimental::Impl::tool_invoked_fence;
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.provide_tool_programming_interface, 1,
actions);
};
#ifdef KOKKOS_ENABLE_LIBDL
void* firstProfileLibrary = nullptr;
if (profileLibrary.empty()) {
invoke_init_callbacks();
return;
}
char* envProfileLibrary = const_cast<char*>(profileLibrary.c_str());
const auto envProfileCopy =
std::make_unique<char[]>(strlen(envProfileLibrary) + 1);
sprintf(envProfileCopy.get(), "%s", envProfileLibrary);
char* profileLibraryName = strtok(envProfileCopy.get(), ";");
if ((profileLibraryName != nullptr) &&
(strcmp(profileLibraryName, "") != 0)) {
firstProfileLibrary = dlopen(profileLibraryName, RTLD_NOW | RTLD_GLOBAL);
if (firstProfileLibrary == nullptr) {
std::cerr << "Error: Unable to load KokkosP library: "
<< profileLibraryName << std::endl;
std::cerr << "dlopen(" << profileLibraryName
<< ", RTLD_NOW | RTLD_GLOBAL) failed with " << dlerror()
<< '\n';
} else {
#ifdef KOKKOS_ENABLE_PROFILING_LOAD_PRINT
std::cout << "KokkosP: Library Loaded: " << profileLibraryName
<< std::endl;
#endif
lookup_function(
firstProfileLibrary, "kokkosp_begin_parallel_scan",
Kokkos::Tools::Experimental::current_callbacks.begin_parallel_scan);
lookup_function(
firstProfileLibrary, "kokkosp_begin_parallel_for",
Kokkos::Tools::Experimental::current_callbacks.begin_parallel_for);
lookup_function(
firstProfileLibrary, "kokkosp_begin_parallel_reduce",
Kokkos::Tools::Experimental::current_callbacks.begin_parallel_reduce);
lookup_function(
firstProfileLibrary, "kokkosp_end_parallel_scan",
Kokkos::Tools::Experimental::current_callbacks.end_parallel_scan);
lookup_function(
firstProfileLibrary, "kokkosp_end_parallel_for",
Kokkos::Tools::Experimental::current_callbacks.end_parallel_for);
lookup_function(
firstProfileLibrary, "kokkosp_end_parallel_reduce",
Kokkos::Tools::Experimental::current_callbacks.end_parallel_reduce);
lookup_function(firstProfileLibrary, "kokkosp_init_library",
Kokkos::Tools::Experimental::current_callbacks.init);
lookup_function(firstProfileLibrary, "kokkosp_finalize_library",
Kokkos::Tools::Experimental::current_callbacks.finalize);
lookup_function(
firstProfileLibrary, "kokkosp_push_profile_region",
Kokkos::Tools::Experimental::current_callbacks.push_region);
lookup_function(
firstProfileLibrary, "kokkosp_pop_profile_region",
Kokkos::Tools::Experimental::current_callbacks.pop_region);
lookup_function(
firstProfileLibrary, "kokkosp_allocate_data",
Kokkos::Tools::Experimental::current_callbacks.allocate_data);
lookup_function(
firstProfileLibrary, "kokkosp_deallocate_data",
Kokkos::Tools::Experimental::current_callbacks.deallocate_data);
lookup_function(
firstProfileLibrary, "kokkosp_begin_deep_copy",
Kokkos::Tools::Experimental::current_callbacks.begin_deep_copy);
lookup_function(
firstProfileLibrary, "kokkosp_end_deep_copy",
Kokkos::Tools::Experimental::current_callbacks.end_deep_copy);
lookup_function(
firstProfileLibrary, "kokkosp_begin_fence",
Kokkos::Tools::Experimental::current_callbacks.begin_fence);
lookup_function(firstProfileLibrary, "kokkosp_end_fence",
Kokkos::Tools::Experimental::current_callbacks.end_fence);
lookup_function(
firstProfileLibrary, "kokkosp_dual_view_sync",
Kokkos::Tools::Experimental::current_callbacks.sync_dual_view);
lookup_function(
firstProfileLibrary, "kokkosp_dual_view_modify",
Kokkos::Tools::Experimental::current_callbacks.modify_dual_view);
lookup_function(
firstProfileLibrary, "kokkosp_declare_metadata",
Kokkos::Tools::Experimental::current_callbacks.declare_metadata);
lookup_function(firstProfileLibrary, "kokkosp_create_profile_section",
Kokkos::Tools::Experimental::current_callbacks
.create_profile_section);
lookup_function(
firstProfileLibrary, "kokkosp_start_profile_section",
Kokkos::Tools::Experimental::current_callbacks.start_profile_section);
lookup_function(
firstProfileLibrary, "kokkosp_stop_profile_section",
Kokkos::Tools::Experimental::current_callbacks.stop_profile_section);
lookup_function(firstProfileLibrary, "kokkosp_destroy_profile_section",
Kokkos::Tools::Experimental::current_callbacks
.destroy_profile_section);
lookup_function(
firstProfileLibrary, "kokkosp_profile_event",
Kokkos::Tools::Experimental::current_callbacks.profile_event);
#ifdef KOKKOS_ENABLE_TUNING
lookup_function(
firstProfileLibrary, "kokkosp_declare_output_type",
Kokkos::Tools::Experimental::current_callbacks.declare_output_type);
lookup_function(
firstProfileLibrary, "kokkosp_declare_input_type",
Kokkos::Tools::Experimental::current_callbacks.declare_input_type);
lookup_function(
firstProfileLibrary, "kokkosp_request_values",
Kokkos::Tools::Experimental::current_callbacks.request_output_values);
lookup_function(
firstProfileLibrary, "kokkosp_end_context",
Kokkos::Tools::Experimental::current_callbacks.end_tuning_context);
lookup_function(
firstProfileLibrary, "kokkosp_begin_context",
Kokkos::Tools::Experimental::current_callbacks.begin_tuning_context);
lookup_function(firstProfileLibrary, "kokkosp_declare_optimization_goal",
Kokkos::Tools::Experimental::current_callbacks
.declare_optimization_goal);
#endif // KOKKOS_ENABLE_TUNING
lookup_function(
firstProfileLibrary, "kokkosp_print_help",
Kokkos::Tools::Experimental::current_callbacks.print_help);
lookup_function(
firstProfileLibrary, "kokkosp_parse_args",
Kokkos::Tools::Experimental::current_callbacks.parse_args);
lookup_function(firstProfileLibrary,
"kokkosp_provide_tool_programming_interface",
Kokkos::Tools::Experimental::current_callbacks
.provide_tool_programming_interface);
lookup_function(
firstProfileLibrary, "kokkosp_request_tool_settings",
Kokkos::Tools::Experimental::current_callbacks.request_tool_settings);
}
}
#else
(void)profileLibrary;
#endif // KOKKOS_ENABLE_LIBDL
invoke_init_callbacks();
#ifdef KOKKOS_ENABLE_TUNING
Experimental::VariableInfo kernel_name;
kernel_name.type = Experimental::ValueType::kokkos_value_string;
kernel_name.category =
Experimental::StatisticalCategory::kokkos_value_categorical;
kernel_name.valueQuantity =
Experimental::CandidateValueType::kokkos_value_unbounded;
std::array<std::string, 4> candidate_values = {
"parallel_for",
"parallel_reduce",
"parallel_scan",
"parallel_copy",
};
Experimental::SetOrRange kernel_type_variable_candidates =
Experimental::make_candidate_set(4, candidate_values.data());
Experimental::kernel_name_context_variable_id =
Experimental::declare_input_type("kokkos.kernel_name", kernel_name);
Experimental::VariableInfo kernel_type;
kernel_type.type = Experimental::ValueType::kokkos_value_string;
kernel_type.category =
Experimental::StatisticalCategory::kokkos_value_categorical;
kernel_type.valueQuantity =
Experimental::CandidateValueType::kokkos_value_set;
kernel_type.candidates = kernel_type_variable_candidates;
Experimental::kernel_type_context_variable_id =
Experimental::declare_input_type("kokkos.kernel_type", kernel_type);
#endif
Experimental::no_profiling.init = nullptr;
Experimental::no_profiling.finalize = nullptr;
Experimental::no_profiling.begin_parallel_for = nullptr;
Experimental::no_profiling.begin_parallel_scan = nullptr;
Experimental::no_profiling.begin_parallel_reduce = nullptr;
Experimental::no_profiling.end_parallel_scan = nullptr;
Experimental::no_profiling.end_parallel_for = nullptr;
Experimental::no_profiling.end_parallel_reduce = nullptr;
Experimental::no_profiling.push_region = nullptr;
Experimental::no_profiling.pop_region = nullptr;
Experimental::no_profiling.allocate_data = nullptr;
Experimental::no_profiling.deallocate_data = nullptr;
Experimental::no_profiling.begin_deep_copy = nullptr;
Experimental::no_profiling.end_deep_copy = nullptr;
Experimental::no_profiling.create_profile_section = nullptr;
Experimental::no_profiling.start_profile_section = nullptr;
Experimental::no_profiling.stop_profile_section = nullptr;
Experimental::no_profiling.destroy_profile_section = nullptr;
Experimental::no_profiling.profile_event = nullptr;
Experimental::no_profiling.declare_input_type = nullptr;
Experimental::no_profiling.declare_output_type = nullptr;
Experimental::no_profiling.request_output_values = nullptr;
Experimental::no_profiling.end_tuning_context = nullptr;
}
void finalize() {
// Make sure finalize calls happens only once
static int is_finalized = 0;
if (is_finalized) return;
is_finalized = 1;
if (Experimental::current_callbacks.finalize != nullptr) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.finalize);
Experimental::pause_tools();
}
#ifdef KOKKOS_ENABLE_TUNING
// clean up string candidate set
for (auto& metadata_pair : Experimental::variable_metadata) {
auto metadata = metadata_pair.second;
if ((metadata.type == Experimental::ValueType::kokkos_value_string) &&
(metadata.valueQuantity ==
Experimental::CandidateValueType::kokkos_value_set)) {
auto candidate_set = metadata.candidates.set;
delete[] candidate_set.values.string_value;
}
}
#endif
}
void syncDualView(const std::string& label, const void* const ptr,
bool to_device) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.sync_dual_view, label.c_str(), ptr,
to_device);
}
void modifyDualView(const std::string& label, const void* const ptr,
bool on_device) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.modify_dual_view, label.c_str(), ptr,
on_device);
}
void declareMetadata(const std::string& key, const std::string& value) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.declare_metadata, key.c_str(),
value.c_str());
}
} // namespace Tools
namespace Tools {
namespace Experimental {
void set_init_callback(initFunction callback) {
current_callbacks.init = callback;
}
void set_finalize_callback(finalizeFunction callback) {
current_callbacks.finalize = callback;
}
void set_parse_args_callback(parseArgsFunction callback) {
current_callbacks.parse_args = callback;
}
void set_print_help_callback(printHelpFunction callback) {
current_callbacks.print_help = callback;
}
void set_begin_parallel_for_callback(beginFunction callback) {
current_callbacks.begin_parallel_for = callback;
}
void set_end_parallel_for_callback(endFunction callback) {
current_callbacks.end_parallel_for = callback;
}
void set_begin_parallel_reduce_callback(beginFunction callback) {
current_callbacks.begin_parallel_reduce = callback;
}
void set_end_parallel_reduce_callback(endFunction callback) {
current_callbacks.end_parallel_reduce = callback;
}
void set_begin_parallel_scan_callback(beginFunction callback) {
current_callbacks.begin_parallel_scan = callback;
}
void set_end_parallel_scan_callback(endFunction callback) {
current_callbacks.end_parallel_scan = callback;
}
void set_push_region_callback(pushFunction callback) {
current_callbacks.push_region = callback;
}
void set_pop_region_callback(popFunction callback) {
current_callbacks.pop_region = callback;
}
void set_allocate_data_callback(allocateDataFunction callback) {
current_callbacks.allocate_data = callback;
}
void set_deallocate_data_callback(deallocateDataFunction callback) {
current_callbacks.deallocate_data = callback;
}
void set_create_profile_section_callback(
createProfileSectionFunction callback) {
current_callbacks.create_profile_section = callback;
}
void set_start_profile_section_callback(startProfileSectionFunction callback) {
current_callbacks.start_profile_section = callback;
}
void set_stop_profile_section_callback(stopProfileSectionFunction callback) {
current_callbacks.stop_profile_section = callback;
}
void set_destroy_profile_section_callback(
destroyProfileSectionFunction callback) {
current_callbacks.destroy_profile_section = callback;
}
void set_profile_event_callback(profileEventFunction callback) {
current_callbacks.profile_event = callback;
}
void set_begin_deep_copy_callback(beginDeepCopyFunction callback) {
current_callbacks.begin_deep_copy = callback;
}
void set_end_deep_copy_callback(endDeepCopyFunction callback) {
current_callbacks.end_deep_copy = callback;
}
void set_begin_fence_callback(beginFenceFunction callback) {
current_callbacks.begin_fence = callback;
}
void set_end_fence_callback(endFenceFunction callback) {
current_callbacks.end_fence = callback;
}
void set_dual_view_sync_callback(dualViewSyncFunction callback) {
current_callbacks.sync_dual_view = callback;
}
void set_dual_view_modify_callback(dualViewModifyFunction callback) {
current_callbacks.modify_dual_view = callback;
}
void set_declare_metadata_callback(declareMetadataFunction callback) {
current_callbacks.declare_metadata = callback;
}
void set_declare_output_type_callback(outputTypeDeclarationFunction callback) {
current_callbacks.declare_output_type = callback;
}
void set_declare_input_type_callback(inputTypeDeclarationFunction callback) {
current_callbacks.declare_input_type = callback;
}
void set_request_output_values_callback(requestValueFunction callback) {
current_callbacks.request_output_values = callback;
}
void set_end_context_callback(contextEndFunction callback) {
current_callbacks.end_tuning_context = callback;
}
void set_begin_context_callback(contextBeginFunction callback) {
current_callbacks.begin_tuning_context = callback;
}
void set_declare_optimization_goal_callback(
optimizationGoalDeclarationFunction callback) {
current_callbacks.declare_optimization_goal = callback;
}
void pause_tools() {
backup_callbacks = current_callbacks;
current_callbacks = no_profiling;
}
void resume_tools() { current_callbacks = backup_callbacks; }
EventSet get_callbacks() { return current_callbacks; }
void set_callbacks(EventSet new_events) { current_callbacks = new_events; }
} // namespace Experimental
} // namespace Tools
namespace Profiling {
bool profileLibraryLoaded() { return Kokkos::Tools::profileLibraryLoaded(); }
void beginParallelFor(const std::string& kernelPrefix, const uint32_t devID,
uint64_t* kernelID) {
Kokkos::Tools::beginParallelFor(kernelPrefix, devID, kernelID);
}
void beginParallelReduce(const std::string& kernelPrefix, const uint32_t devID,
uint64_t* kernelID) {
Kokkos::Tools::beginParallelReduce(kernelPrefix, devID, kernelID);
}
void beginParallelScan(const std::string& kernelPrefix, const uint32_t devID,
uint64_t* kernelID) {
Kokkos::Tools::beginParallelScan(kernelPrefix, devID, kernelID);
}
void endParallelFor(const uint64_t kernelID) {
Kokkos::Tools::endParallelFor(kernelID);
}
void endParallelReduce(const uint64_t kernelID) {
Kokkos::Tools::endParallelReduce(kernelID);
}
void endParallelScan(const uint64_t kernelID) {
Kokkos::Tools::endParallelScan(kernelID);
}
void pushRegion(const std::string& kName) { Kokkos::Tools::pushRegion(kName); }
void popRegion() { Kokkos::Tools::popRegion(); }
void createProfileSection(const std::string& sectionName, uint32_t* secID) {
Kokkos::Tools::createProfileSection(sectionName, secID);
}
void destroyProfileSection(const uint32_t secID) {
Kokkos::Tools::destroyProfileSection(secID);
}
void startSection(const uint32_t secID) { Kokkos::Tools::startSection(secID); }
void stopSection(const uint32_t secID) { Kokkos::Tools::stopSection(secID); }
void markEvent(const std::string& eventName) {
Kokkos::Tools::markEvent(eventName);
}
void allocateData(const SpaceHandle handle, const std::string name,
const void* data, const uint64_t size) {
Kokkos::Tools::allocateData(handle, name, data, size);
}
void deallocateData(const SpaceHandle space, const std::string label,
const void* ptr, const uint64_t size) {
Kokkos::Tools::deallocateData(space, label, ptr, size);
}
void beginDeepCopy(const SpaceHandle dst_space, const std::string dst_label,
const void* dst_ptr, const SpaceHandle src_space,
const std::string src_label, const void* src_ptr,
const uint64_t size) {
Kokkos::Tools::beginDeepCopy(dst_space, dst_label, dst_ptr, src_space,
src_label, src_ptr, size);
}
void endDeepCopy() { Kokkos::Tools::endDeepCopy(); }
void finalize() { Kokkos::Tools::finalize(); }
void initialize(const std::string& profileLibrary) {
Kokkos::Tools::initialize(profileLibrary);
}
bool printHelp(const std::string& args) {
return Kokkos::Tools::printHelp(args);
}
void parseArgs(const std::string& args) { Kokkos::Tools::parseArgs(args); }
void parseArgs(int _argc, char** _argv) {
Kokkos::Tools::parseArgs(_argc, _argv);
}
SpaceHandle make_space_handle(const char* space_name) {
return Kokkos::Tools::make_space_handle(space_name);
}
} // namespace Profiling
} // namespace Kokkos
// Tuning
namespace Kokkos {
namespace Tools {
namespace Experimental {
static size_t& get_context_counter() {
static size_t x;
return x;
}
static size_t& get_variable_counter() {
static size_t x;
return ++x;
}
size_t get_new_context_id() { return ++get_context_counter(); }
size_t get_current_context_id() { return get_context_counter(); }
void decrement_current_context_id() { --get_context_counter(); }
size_t get_new_variable_id() { return get_variable_counter(); }
size_t declare_output_type(const std::string& variableName, VariableInfo info) {
size_t variableId = get_new_variable_id();
#ifdef KOKKOS_ENABLE_TUNING
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.declare_output_type, variableName.c_str(),
variableId, &info);
variable_metadata[variableId] = info;
#else
(void)variableName;
(void)info;
#endif
return variableId;
}
size_t declare_input_type(const std::string& variableName, VariableInfo info) {
size_t variableId = get_new_variable_id();
#ifdef KOKKOS_ENABLE_TUNING
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.declare_input_type, variableName.c_str(),
variableId, &info);
variable_metadata[variableId] = info;
#else
(void)variableName;
(void)info;
#endif
return variableId;
}
void set_input_values(size_t contextId, size_t count, VariableValue* values) {
#ifdef KOKKOS_ENABLE_TUNING
if (features_per_context.find(contextId) == features_per_context.end()) {
features_per_context[contextId] = std::unordered_set<size_t>();
}
for (size_t x = 0; x < count; ++x) {
values[x].metadata = &variable_metadata[values[x].type_id];
features_per_context[contextId].insert(values[x].type_id);
active_features.insert(values[x].type_id);
feature_values[values[x].type_id] = values[x];
}
#else
(void)contextId;
(void)count;
(void)values;
#endif
}
#include <iostream>
void request_output_values(size_t contextId, size_t count,
VariableValue* values) {
#ifdef KOKKOS_ENABLE_TUNING
std::vector<size_t> context_ids;
std::vector<VariableValue> context_values;
for (auto id : active_features) {
context_values.push_back(feature_values[id]);
}
if (Experimental::current_callbacks.request_output_values != nullptr) {
for (size_t x = 0; x < count; ++x) {
values[x].metadata = &variable_metadata[values[x].type_id];
}
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.request_output_values, contextId,
context_values.size(), context_values.data(), count, values);
}
#else
(void)contextId;
(void)count;
(void)values;
#endif
}
#ifdef KOKKOS_ENABLE_TUNING
static std::unordered_map<size_t, size_t> optimization_goals;
#endif
void begin_context(size_t contextId) {
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.begin_tuning_context, contextId);
}
void end_context(size_t contextId) {
#ifdef KOKKOS_ENABLE_TUNING
for (auto id : features_per_context[contextId]) {
active_features.erase(id);
}
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.end_tuning_context, contextId,
feature_values[optimization_goals[contextId]]);
optimization_goals.erase(contextId);
decrement_current_context_id();
#else
(void)contextId;
#endif
}
bool have_tuning_tool() {
#ifdef KOKKOS_ENABLE_TUNING
return (Experimental::current_callbacks.request_output_values != nullptr);
#else
return false;
#endif
}
VariableValue make_variable_value(size_t id, int64_t val) {
VariableValue variable_value;
variable_value.type_id = id;
variable_value.value.int_value = val;
return variable_value;
}
VariableValue make_variable_value(size_t id, double val) {
VariableValue variable_value;
variable_value.type_id = id;
variable_value.value.double_value = val;
return variable_value;
}
VariableValue make_variable_value(size_t id, const std::string& val) {
VariableValue variable_value;
variable_value.type_id = id;
strncpy(variable_value.value.string_value, val.c_str(),
KOKKOS_TOOLS_TUNING_STRING_LENGTH - 1);
return variable_value;
}
SetOrRange make_candidate_set(size_t size, std::string* data) {
SetOrRange value_set;
value_set.set.values.string_value = new TuningString[size];
for (size_t x = 0; x < size; ++x) {
strncpy(value_set.set.values.string_value[x], data[x].c_str(),
KOKKOS_TOOLS_TUNING_STRING_LENGTH - 1);
}
value_set.set.size = size;
return value_set;
}
SetOrRange make_candidate_set(size_t size, int64_t* data) {
SetOrRange value_set;
value_set.set.size = size;
value_set.set.values.int_value = data;
return value_set;
}
SetOrRange make_candidate_set(size_t size, double* data) {
SetOrRange value_set;
value_set.set.size = size;
value_set.set.values.double_value = data;
return value_set;
}
SetOrRange make_candidate_range(double lower, double upper, double step,
bool openLower = false,
bool openUpper = false) {
SetOrRange value_range;
value_range.range.lower.double_value = lower;
value_range.range.upper.double_value = upper;
value_range.range.step.double_value = step;
value_range.range.openLower = openLower;
value_range.range.openUpper = openUpper;
return value_range;
}
SetOrRange make_candidate_range(int64_t lower, int64_t upper, int64_t step,
bool openLower = false,
bool openUpper = false) {
SetOrRange value_range;
value_range.range.lower.int_value = lower;
value_range.range.upper.int_value = upper;
value_range.range.step.int_value = step;
value_range.range.openLower = openLower;
value_range.range.openUpper = openUpper;
return value_range;
}
size_t get_new_context_id();
size_t get_current_context_id();
void decrement_current_context_id();
size_t get_new_variable_id();
void declare_optimization_goal(const size_t context,
const OptimizationGoal& goal) {
#ifdef KOKKOS_ENABLE_TUNING
Experimental::invoke_kokkosp_callback(
Experimental::MayRequireGlobalFencing::No,
Experimental::current_callbacks.declare_optimization_goal, context, goal);
optimization_goals[context] = goal.type_id;
#else
(void)context;
(void)goal;
#endif
}
} // end namespace Experimental
} // end namespace Tools
} // end namespace Kokkos
| 1 | 31,347 | Can you elaborate on why you need different behavior depending on Tools being built independently or not? | kokkos-kokkos | cpp |
@@ -18,7 +18,8 @@ class AnnotationsController < ApplicationController
guid_save = guidance.present? ? guidance.save : true
if ex_save && guid_save
- redirect_to admin_show_phase_path(id: @question.section.phase_id, section_id: @question.section_id, question_id: @question.id, edit: 'true'), notice: _('Information was successfully created.')
+ typ = (!ex_save && !guid_save ? 'example answer and guidance' : (!guid_save ? 'guidance' : 'example answer'))
+ redirect_to admin_show_phase_path(id: @question.section.phase_id, section_id: @question.section_id, question_id: @question.id, edit: 'true'), notice: success_message(typ, _('created'))
else
@section = @question.section
@phase = @section.phase | 1 | class AnnotationsController < ApplicationController
respond_to :html
after_action :verify_authorized
#create annotations
def admin_create
# authorize the question (includes to reduce queries)
@question = Question.includes(section: { phase: :template}).find(params[:question_id])
authorize @question
if params[:example_answer_text].present?
example_answer = init_annotation(params[:example_answer_text], @question, current_user.org, Annotation.types[:example_answer])
end
if params[:guidance_text].present?
guidance = init_annotation(params[:guidance_text], @question, current_user.org, Annotation.types[:guidance])
end
# if they dont exist, no requirement for them to be saved
ex_save = example_answer.present? ? example_answer.save : true
guid_save = guidance.present? ? guidance.save : true
if ex_save && guid_save
redirect_to admin_show_phase_path(id: @question.section.phase_id, section_id: @question.section_id, question_id: @question.id, edit: 'true'), notice: _('Information was successfully created.')
else
@section = @question.section
@phase = @section.phase
@open = true
@sections = @phase.sections
@section_id = @section.id
@question_id = @example_answer.question
if !ex_save && !guid_save
flash[:notice] = failed_create_error(example_answer, _('example answer')) + '\n' +
failed_create_error(gudiance, _('guidance'))
elsif !guid_save
flash[:notice] = failed_create_error(gudiance, _('guidance'))
elsif !ex_save
flash[:notice] = failed_create_error(example_answer, _('example answer'))
end
render "phases/admin_show"
end
end
#update a example answer of a template
def admin_update
@question = Question.includes(section: { phase: :template}).find(params[:question_id])
if params[:guidance_id].present?
guidance = Annotation.includes(question: {section: {phase: :template}}).find(params[:guidance_id])
authorize guidance
end
if params[:example_answer_id].present?
example_answer = Annotation.includes(question: {section: {phase: :template}}).find(params[:example_answer_id])
authorize example_answer
end
verify_authorized
# if guidance present, update
if params[:guidance_text].present?
if guidance.present?
guidance.text = params[:guidance_text]
else
guidance = init_annotation(params[:guidance_text], @question, current_user.org, Annotation.types[:guidance])
end
end
# if example answer present, update
if params[:example_answer_text].present?
if example_answer.present?
example_answer.text = params[:example_answer_text]
else
example_answer = init_annotation(params[:example_answer_text], @question, current_user.org, Annotation.types[:example_answer])
end
end
# only required to save if we updated/created one
ex_save = example_answer.present? ? example_answer.save : true
guid_save = guidance.present? ? guidance.save : true
@section = @question.section
@phase = @section.phase
if ex_save && guid_save
redirect_to admin_show_phase_path(id: @phase.id, section_id: @section.id, question_id: @question.id, edit: 'true'), notice: _('Information was successfully updated.')
else
if !ex_save && !guid_save
flash[:notice] = failed_create_error(example_answer, _('example answer')) + '\n' +
failed_create_error(gudiance, _('guidance'))
elsif !guid_save
flash[:notice] = failed_create_error(gudiance, _('guidance'))
elsif !ex_save
flash[:notice] = failed_create_error(example_answer, _('example answer'))
end
render action: "phases/admin_show"
end
end
#delete an annotation
def admin_destroy
@example_answer = Annotation.includes(question: { section: {phase: :template}}).find(params[:id])
authorize @example_answer
@question = @example_answer.question
@section = @question.section
@phase = @section.phase
if @example_answer.destroy
redirect_to admin_show_phase_path(id: @phase.id, section_id: @section.id, edit: 'true'), notice: _('Information was successfully deleted.')
else
redirect_to admin_show_phase_path(id: @phase.id, section_id: @section.id, edit: 'true'), notice: flash[:notice] = failed_destroy_error(@example_answer, _('example answer'))
end
end
private
def init_annotation(text, question, org, type)
annotation = Annotation.new
annotation.org = org
annotation.question = question
annotation.text = text
annotation.type = type
return annotation
end
end | 1 | 16,750 | because of the above if statement will requires both ex_save and guid_save to be true, this code will always return 'example answer'. This should be revised with `example_answer.present?` and `guidance.present?` | DMPRoadmap-roadmap | rb |
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "golang.org/x/net/context"
"github.com/sonm-io/core/insonmnia/structs"
pb "github.com/sonm-io/core/proto" | 1 | package marketplace
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/sonm-io/core/insonmnia/structs"
pb "github.com/sonm-io/core/proto"
)
func makeOrder() *pb.Order {
return &pb.Order{
Price: 1,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
}
}
func TestInMemOrderStorage_CreateOrder(t *testing.T) {
s := NewInMemoryStorage()
order := makeOrder()
o, _ := structs.NewOrder(order)
created, err := s.CreateOrder(o)
assert.NoError(t, err)
assert.NotEmpty(t, created.GetID(), "order must have id after creation")
}
func TestNewInMemoryStorage_CreateOrder_Errors(t *testing.T) {
cases := []struct {
fn func() *pb.Order
err error
}{
{
fn: func() *pb.Order {
order := makeOrder()
order.Price = 0
return order
},
err: errPriceIsZero,
},
{
fn: func() *pb.Order {
order := makeOrder()
order.Slot = nil
return order
},
err: errSlotIsNil,
},
{
fn: func() *pb.Order {
order := makeOrder()
order.Slot.Resources = nil
return order
},
err: errResourcesIsNil,
},
{
fn: func() *pb.Order {
return nil
},
err: errOrderIsNil,
},
}
for i, cc := range cases {
_, err := structs.NewOrder(cc.fn())
assert.EqualError(t, err, cc.err.Error(), fmt.Sprintf("%d", i))
}
}
func TestInMemOrderStorage_DeleteOrder(t *testing.T) {
s := NewInMemoryStorage()
o, err := structs.NewOrder(makeOrder())
assert.NoError(t, err)
order, err := s.CreateOrder(o)
assert.NoError(t, err)
err = s.DeleteOrder(order.GetID())
assert.NoError(t, err)
}
func TestInMemOrderStorage_DeleteOrder_NotExists(t *testing.T) {
s := NewInMemoryStorage()
err := s.DeleteOrder("1234")
assert.EqualError(t, err, errOrderNotFound.Error())
}
func TestInMemOrderStorage_GetOrderByID(t *testing.T) {
s := NewInMemoryStorage()
order, err := structs.NewOrder(makeOrder())
assert.NoError(t, err)
created, err := s.CreateOrder(order)
assert.NoError(t, err)
assert.NotEmpty(t, created.GetID())
found, err := s.GetOrderByID(created.GetID())
assert.NoError(t, err)
assert.Equal(t, created.GetID(), found.GetID())
assert.Equal(t, created.GetPrice(), found.GetPrice())
}
func TestInMemOrderStorage_GetOrderByID_NotExists(t *testing.T) {
s := NewInMemoryStorage()
order, err := s.GetOrderByID("1234")
assert.Nil(t, order)
assert.EqualError(t, err, errOrderNotFound.Error())
}
func TestInMemOrderStorage_GetOrders_NilParams(t *testing.T) {
s := NewInMemoryStorage()
_, err := s.GetOrders(nil)
assert.EqualError(t, err, errSearchParamsIsNil.Error())
}
func TestInMemOrderStorage_GetOrders_NilSlot(t *testing.T) {
s := NewInMemoryStorage()
_, err := s.GetOrders(&searchParams{})
assert.EqualError(t, err, errSlotIsNil.Error())
}
func TestNewOrder(t *testing.T) {
cases := []struct {
ord *pb.Order
err error
}{
{
ord: nil,
err: errOrderIsNil,
},
{
ord: &pb.Order{
Price: 0,
Slot: &pb.Slot{},
},
err: errPriceIsZero,
},
{
ord: &pb.Order{
Price: 1,
},
err: errSlotIsNil,
},
{
ord: &pb.Order{
Price: 1,
Slot: &pb.Slot{},
},
err: errResourcesIsNil,
},
}
for i, cc := range cases {
_, err := structs.NewOrder(cc.ord)
assert.EqualError(t, err, cc.err.Error(), fmt.Sprintf("%d", i))
}
}
func TestCompareWithType(t *testing.T) {
cases := []struct {
slotT pb.OrderType
slot *pb.Slot
order *pb.Order
mustMatch bool
}{
{
slotT: pb.OrderType_ANY,
slot: &pb.Slot{
Resources: &pb.Resources{},
},
order: &pb.Order{
OrderType: pb.OrderType_BID,
Price: 1,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
},
mustMatch: true,
},
{
slotT: pb.OrderType_ANY,
slot: &pb.Slot{
Resources: &pb.Resources{},
},
order: &pb.Order{
OrderType: pb.OrderType_ASK,
Price: 1,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
},
mustMatch: true,
},
{
slotT: pb.OrderType_ASK,
slot: &pb.Slot{
Resources: &pb.Resources{},
},
order: &pb.Order{
OrderType: pb.OrderType_ASK,
Price: 1,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
},
mustMatch: true,
},
{
slotT: pb.OrderType_ASK,
slot: &pb.Slot{
Resources: &pb.Resources{},
},
order: &pb.Order{
OrderType: pb.OrderType_BID,
Price: 1,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
},
mustMatch: false,
},
}
for i, cc := range cases {
ord, err := structs.NewOrder(cc.order)
assert.NoError(t, err)
sl, err := structs.NewSlot(cc.slot)
assert.NoError(t, err)
isMatch := compareOrderAndSlot(sl, ord, cc.slotT)
assert.Equal(t, cc.mustMatch, isMatch, fmt.Sprintf("%d", i))
}
}
func TestInMemOrderStorage_GetOrders_Count(t *testing.T) {
stor := NewInMemoryStorage()
for i := 0; i < 100; i++ {
ord, err := structs.NewOrder(&pb.Order{
Price: 1,
OrderType: pb.OrderType_BID,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
})
assert.NoError(t, err)
_, err = stor.CreateOrder(ord)
assert.NoError(t, err)
}
sl, err := structs.NewSlot(&pb.Slot{
Resources: &pb.Resources{},
})
assert.NoError(t, err)
search := &searchParams{
slot: sl,
count: 3,
orderType: pb.OrderType_BID,
}
found, err := stor.GetOrders(search)
assert.NoError(t, err)
assert.Equal(t, int(search.count), len(found))
}
func TestInMemOrderStorage_GetOrders_Count2(t *testing.T) {
stor := NewInMemoryStorage()
for i := 0; i < 100; i++ {
bid, err := structs.NewOrder(&pb.Order{
Price: 1,
OrderType: pb.OrderType_BID,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
})
assert.NoError(t, err)
_, err = stor.CreateOrder(bid)
assert.NoError(t, err)
}
ask, err := structs.NewOrder(&pb.Order{
Price: 1,
OrderType: pb.OrderType_ASK,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
})
assert.NoError(t, err)
_, err = stor.CreateOrder(ask)
assert.NoError(t, err)
sl, err := structs.NewSlot(&pb.Slot{
Resources: &pb.Resources{},
})
assert.NoError(t, err)
search := &searchParams{
slot: sl,
count: 10,
orderType: pb.OrderType_ASK,
}
found, err := stor.GetOrders(search)
assert.NoError(t, err)
assert.Equal(t, 1, len(found))
}
func TestInMemOrderStorage_GetOrders_Count3(t *testing.T) {
stor := NewInMemoryStorage()
for i := 0; i < 100; i++ {
var ot pb.OrderType
if i%2 == 0 {
ot = pb.OrderType_ASK
} else {
ot = pb.OrderType_BID
}
bid, err := structs.NewOrder(&pb.Order{
Price: 1,
OrderType: ot,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
})
assert.NoError(t, err)
_, err = stor.CreateOrder(bid)
assert.NoError(t, err)
}
sl, err := structs.NewSlot(&pb.Slot{
Resources: &pb.Resources{},
})
assert.NoError(t, err)
search := []*searchParams{
{
slot: sl,
count: 5,
orderType: pb.OrderType_ANY,
},
{
slot: sl,
count: 10,
orderType: pb.OrderType_ASK,
},
{
slot: sl,
count: 50,
orderType: pb.OrderType_BID,
},
}
for _, ss := range search {
found, err := stor.GetOrders(ss)
assert.NoError(t, err)
assert.Equal(t, int(ss.count), len(found))
}
}
func TestMarketplace_GetOrders(t *testing.T) {
mp := NewMarketplace("")
req := &pb.GetOrdersRequest{
Slot: nil,
Count: 0,
OrderType: pb.OrderType_ANY,
}
_, err := mp.GetOrders(nil, req)
assert.EqualError(t, err, errSlotIsNil.Error())
}
func TestInMemOrderStorage_GetOrders_Ordering(t *testing.T) {
// check if order is sorted
stor := NewInMemoryStorage()
for i := 100; i > 0; i-- {
bid, err := structs.NewOrder(&pb.Order{
Price: int64(i + 1),
OrderType: pb.OrderType_BID,
Slot: &pb.Slot{
Resources: &pb.Resources{},
},
})
assert.NoError(t, err)
_, err = stor.CreateOrder(bid)
assert.NoError(t, err)
}
sl, err := structs.NewSlot(&pb.Slot{
Resources: &pb.Resources{},
})
assert.NoError(t, err)
search := &searchParams{
slot: sl,
count: 10,
orderType: pb.OrderType_BID,
}
found, err := stor.GetOrders(search)
assert.NoError(t, err)
assert.Equal(t, int(search.count), len(found))
for i := 1; i < len(found); i++ {
p1 := found[i-1].GetPrice()
p2 := found[i].GetPrice()
ok := p1 > p2
assert.True(t, ok, fmt.Sprintf("iter %d :: %d should be gt %d", i, p1, p2))
}
}
| 1 | 5,962 | why not context from stdlib? | sonm-io-core | go |
@@ -48,7 +48,7 @@ func GenerateCRC32(
return Checksum{
Value: checksum,
Version: payloadVersion,
- Flavor: FlavorIEEECRC32OverThriftBinary,
+ Flavor: FlavorIEEECRC32OverProto3Binary,
}, nil
}
| 1 | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package checksum
import (
"bytes"
"encoding/binary"
"fmt"
"hash/crc32"
"github.com/gogo/protobuf/proto"
)
// GenerateCRC32 generates an IEEE crc32 checksum on the
// serilized byte array of the given thrift object. The
// serialization proto used will be of type thriftRW
func GenerateCRC32(
payload proto.Marshaler,
payloadVersion int,
) (Checksum, error) {
payloadBytes, err := payload.Marshal()
if err != nil {
return Checksum{}, err
}
crc := crc32.ChecksumIEEE(payloadBytes)
checksum := make([]byte, 4)
binary.BigEndian.PutUint32(checksum, crc)
return Checksum{
Value: checksum,
Version: payloadVersion,
Flavor: FlavorIEEECRC32OverThriftBinary,
}, nil
}
// Verify verifies that the checksum generated from the
// given thrift object matches the specified expected checksum
// Return ErrMismatch when checksums mismatch
func Verify(
payload proto.Marshaler,
checksum Checksum,
) error {
if !checksum.Flavor.IsValid() || checksum.Flavor != FlavorIEEECRC32OverThriftBinary {
return fmt.Errorf("unknown checksum flavor %v", checksum.Flavor)
}
expected, err := GenerateCRC32(payload, checksum.Version)
if err != nil {
return err
}
if !bytes.Equal(expected.Value, checksum.Value) {
return ErrMismatch
}
return nil
}
| 1 | 9,454 | Just saw this and wasn't able to hold myself from renaming :-). | temporalio-temporal | go |
@@ -278,9 +278,7 @@ describe('src/Core', () => {
plugins: {},
totalProgress: 0
})
- expect(core.plugins.acquirer[0].mocks.uninstall.mock.calls.length).toEqual(
- 1
- )
+ expect(core.plugins[Object.keys(core.plugins)[0]].length).toEqual(0)
})
describe('upload hooks', () => { | 1 | const fs = require('fs')
const path = require('path')
const Core = require('./Core')
const utils = require('./Utils')
const Plugin = require('./Plugin')
const AcquirerPlugin1 = require('../../test/mocks/acquirerPlugin1')
const AcquirerPlugin2 = require('../../test/mocks/acquirerPlugin2')
const InvalidPlugin = require('../../test/mocks/invalidPlugin')
const InvalidPluginWithoutId = require('../../test/mocks/invalidPluginWithoutId')
const InvalidPluginWithoutType = require('../../test/mocks/invalidPluginWithoutType')
jest.mock('cuid', () => {
return () => 'cjd09qwxb000dlql4tp4doz8h'
})
const sampleImage = fs.readFileSync(path.join(__dirname, '../../test/resources/image.jpg'))
describe('src/Core', () => {
const RealCreateObjectUrl = global.URL.createObjectURL
beforeEach(() => {
jest.spyOn(utils, 'findDOMElement').mockImplementation(path => {
return 'some config...'
})
global.URL.createObjectURL = jest.fn().mockReturnValue('newUrl')
})
afterEach(() => {
global.URL.createObjectURL = RealCreateObjectUrl
})
it('should expose a class', () => {
const core = Core()
expect(core.constructor.name).toEqual('Uppy')
})
it('should have a string `id` option that defaults to "uppy"', () => {
const core = Core()
expect(core.getID()).toEqual('uppy')
const core2 = Core({ id: 'profile' })
expect(core2.getID()).toEqual('profile')
})
describe('plugins', () => {
it('should add a plugin to the plugin stack', () => {
const core = Core()
core.use(AcquirerPlugin1)
expect(Object.keys(core.plugins.acquirer).length).toEqual(1)
})
it('should prevent the same plugin from being added more than once', () => {
const core = Core()
core.use(AcquirerPlugin1)
expect(() => {
core.use(AcquirerPlugin1)
}).toThrowErrorMatchingSnapshot()
})
it('should not be able to add an invalid plugin', () => {
const core = Core()
expect(() => {
core.use(InvalidPlugin)
}).toThrowErrorMatchingSnapshot()
})
it('should not be able to add a plugin that has no id', () => {
const core = Core()
expect(() =>
core.use(InvalidPluginWithoutId)
).toThrowErrorMatchingSnapshot()
})
it('should not be able to add a plugin that has no type', () => {
const core = Core()
expect(() =>
core.use(InvalidPluginWithoutType)
).toThrowErrorMatchingSnapshot()
})
it('should return the plugin that matches the specified name', () => {
const core = new Core()
expect(core.getPlugin('foo')).toEqual(null)
core.use(AcquirerPlugin1)
const plugin = core.getPlugin('TestSelector1')
expect(plugin.id).toEqual('TestSelector1')
expect(plugin instanceof Plugin)
})
it('should call the specified method on all the plugins', () => {
const core = new Core()
core.use(AcquirerPlugin1)
core.use(AcquirerPlugin2)
core.iteratePlugins(plugin => {
plugin.run('hello')
})
expect(core.plugins.acquirer[0].mocks.run.mock.calls.length).toEqual(1)
expect(core.plugins.acquirer[0].mocks.run.mock.calls[0]).toEqual([
'hello'
])
expect(core.plugins.acquirer[1].mocks.run.mock.calls.length).toEqual(1)
expect(core.plugins.acquirer[1].mocks.run.mock.calls[0]).toEqual([
'hello'
])
})
it('should uninstall and the remove the specified plugin', () => {
const core = new Core()
core.use(AcquirerPlugin1)
core.use(AcquirerPlugin2)
expect(Object.keys(core.plugins.acquirer).length).toEqual(2)
const plugin = core.getPlugin('TestSelector1')
core.removePlugin(plugin)
expect(Object.keys(core.plugins.acquirer).length).toEqual(1)
expect(plugin.mocks.uninstall.mock.calls.length).toEqual(1)
expect(core.plugins.acquirer[0].mocks.run.mock.calls.length).toEqual(0)
})
})
describe('state', () => {
it('should update all the plugins with the new state when the updateAll method is called', () => {
const core = new Core()
core.use(AcquirerPlugin1)
core.use(AcquirerPlugin2)
core.updateAll({ foo: 'bar' })
expect(core.plugins.acquirer[0].mocks.update.mock.calls.length).toEqual(1)
expect(core.plugins.acquirer[0].mocks.update.mock.calls[0]).toEqual([
{ foo: 'bar' }
])
expect(core.plugins.acquirer[1].mocks.update.mock.calls.length).toEqual(1)
expect(core.plugins.acquirer[1].mocks.update.mock.calls[0]).toEqual([
{ foo: 'bar' }
])
})
it('should update the state', () => {
const core = new Core()
const stateUpdateEventMock = jest.fn()
core.on('state-update', stateUpdateEventMock)
core.use(AcquirerPlugin1)
core.use(AcquirerPlugin2)
core.setState({ foo: 'bar', bee: 'boo' })
core.setState({ foo: 'baar' })
const newState = {
bee: 'boo',
capabilities: { resumableUploads: false },
files: {},
currentUploads: {},
foo: 'baar',
info: { isHidden: true, message: '', type: 'info' },
meta: {},
plugins: {},
totalProgress: 0
}
expect(core.getState()).toEqual(newState)
expect(core.plugins.acquirer[0].mocks.update.mock.calls[1]).toEqual([
newState
])
expect(core.plugins.acquirer[1].mocks.update.mock.calls[1]).toEqual([
newState
])
expect(stateUpdateEventMock.mock.calls.length).toEqual(2)
// current state
expect(stateUpdateEventMock.mock.calls[1][0]).toEqual({
bee: 'boo',
capabilities: { resumableUploads: false },
files: {},
currentUploads: {},
foo: 'bar',
info: { isHidden: true, message: '', type: 'info' },
meta: {},
plugins: {},
totalProgress: 0
})
// new state
expect(stateUpdateEventMock.mock.calls[1][1]).toEqual({
bee: 'boo',
capabilities: { resumableUploads: false },
files: {},
currentUploads: {},
foo: 'baar',
info: { isHidden: true, message: '', type: 'info' },
meta: {},
plugins: {},
totalProgress: 0
})
})
it('should get the state', () => {
const core = new Core()
core.setState({ foo: 'bar' })
expect(core.getState()).toEqual({
capabilities: { resumableUploads: false },
files: {},
currentUploads: {},
foo: 'bar',
info: { isHidden: true, message: '', type: 'info' },
meta: {},
plugins: {},
totalProgress: 0
})
})
})
it('should reset when the reset method is called', () => {
const core = new Core()
// const corePauseEventMock = jest.fn()
const coreCancelEventMock = jest.fn()
const coreStateUpdateEventMock = jest.fn()
core.on('cancel-all', coreCancelEventMock)
core.on('state-update', coreStateUpdateEventMock)
core.setState({ foo: 'bar', totalProgress: 30 })
core.reset()
// expect(corePauseEventMock.mock.calls.length).toEqual(1)
expect(coreCancelEventMock.mock.calls.length).toEqual(1)
expect(coreStateUpdateEventMock.mock.calls.length).toEqual(2)
expect(coreStateUpdateEventMock.mock.calls[1][1]).toEqual({
capabilities: { resumableUploads: false },
files: {},
currentUploads: {},
error: null,
foo: 'bar',
info: { isHidden: true, message: '', type: 'info' },
meta: {},
plugins: {},
totalProgress: 0
})
})
it('should clear all uploads on cancelAll()', () => {
const core = new Core()
const id = core._createUpload([ 'a', 'b' ])
expect(core.getState().currentUploads[id]).toBeDefined()
core.cancelAll()
expect(core.getState().currentUploads[id]).toBeUndefined()
})
it('should close, reset and uninstall when the close method is called', () => {
const core = new Core()
core.use(AcquirerPlugin1)
// const corePauseEventMock = jest.fn()
const coreCancelEventMock = jest.fn()
const coreStateUpdateEventMock = jest.fn()
// core.on('pause-all', corePauseEventMock)
core.on('cancel-all', coreCancelEventMock)
core.on('state-update', coreStateUpdateEventMock)
core.close()
// expect(corePauseEventMock.mock.calls.length).toEqual(1)
expect(coreCancelEventMock.mock.calls.length).toEqual(1)
expect(coreStateUpdateEventMock.mock.calls.length).toEqual(1)
expect(coreStateUpdateEventMock.mock.calls[0][1]).toEqual({
capabilities: { resumableUploads: false },
files: {},
currentUploads: {},
error: null,
info: { isHidden: true, message: '', type: 'info' },
meta: {},
plugins: {},
totalProgress: 0
})
expect(core.plugins.acquirer[0].mocks.uninstall.mock.calls.length).toEqual(
1
)
})
describe('upload hooks', () => {
it('should add data returned from upload hooks to the .upload() result', () => {
const core = new Core()
core.addPreProcessor((fileIDs, uploadID) => {
core.addResultData(uploadID, { pre: 'ok' })
})
core.addPostProcessor((fileIDs, uploadID) => {
core.addResultData(uploadID, { post: 'ok' })
})
core.addUploader((fileIDs, uploadID) => {
core.addResultData(uploadID, { upload: 'ok' })
})
return core.upload().then((result) => {
expect(result.pre).toBe('ok')
expect(result.upload).toBe('ok')
expect(result.post).toBe('ok')
})
})
})
describe('preprocessors', () => {
it('should add a preprocessor', () => {
const core = new Core()
const preprocessor = function () {}
core.addPreProcessor(preprocessor)
expect(core.preProcessors[0]).toEqual(preprocessor)
})
it('should remove a preprocessor', () => {
const core = new Core()
const preprocessor1 = function () {}
const preprocessor2 = function () {}
const preprocessor3 = function () {}
core.addPreProcessor(preprocessor1)
core.addPreProcessor(preprocessor2)
core.addPreProcessor(preprocessor3)
expect(core.preProcessors.length).toEqual(3)
core.removePreProcessor(preprocessor2)
expect(core.preProcessors.length).toEqual(2)
})
it('should execute all the preprocessors when uploading a file', () => {
const core = new Core()
const preprocessor1 = jest.fn()
const preprocessor2 = jest.fn()
core.addPreProcessor(preprocessor1)
core.addPreProcessor(preprocessor2)
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
return core.upload()
.then(() => {
const fileId = Object.keys(core.getState().files)[0]
expect(preprocessor1.mock.calls.length).toEqual(1)
expect(preprocessor1.mock.calls[0][0].length).toEqual(1)
expect(preprocessor1.mock.calls[0][0][0]).toEqual(fileId)
expect(preprocessor2.mock.calls[0][0].length).toEqual(1)
expect(preprocessor2.mock.calls[0][0][0]).toEqual(fileId)
})
})
it('should update the file progress state when preprocess-progress event is fired', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
const file = core.getFile(fileId)
core.emit('preprocess-progress', file, {
mode: 'determinate',
message: 'something',
value: 0
})
expect(core.getFile(fileId).progress).toEqual({
percentage: 0,
bytesUploaded: 0,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false,
preprocess: { mode: 'determinate', message: 'something', value: 0 }
})
})
it('should update the file progress state when preprocess-complete event is fired', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileID = Object.keys(core.getState().files)[0]
const file = core.getFile(fileID)
core.emit('preprocess-complete', file, {
mode: 'determinate',
message: 'something',
value: 0
})
expect(core.getFile(fileID).progress).toEqual({
percentage: 0,
bytesUploaded: 0,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false
})
})
})
describe('postprocessors', () => {
it('should add a postprocessor', () => {
const core = new Core()
const postprocessor = function () {}
core.addPostProcessor(postprocessor)
expect(core.postProcessors[0]).toEqual(postprocessor)
})
it('should remove a postprocessor', () => {
const core = new Core()
const postprocessor1 = function () {}
const postprocessor2 = function () {}
const postprocessor3 = function () {}
core.addPostProcessor(postprocessor1)
core.addPostProcessor(postprocessor2)
core.addPostProcessor(postprocessor3)
expect(core.postProcessors.length).toEqual(3)
core.removePostProcessor(postprocessor2)
expect(core.postProcessors.length).toEqual(2)
})
it('should execute all the postprocessors when uploading a file', () => {
const core = new Core()
const postprocessor1 = jest.fn()
const postprocessor2 = jest.fn()
core.addPostProcessor(postprocessor1)
core.addPostProcessor(postprocessor2)
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
return core.upload().then(() => {
expect(postprocessor1.mock.calls.length).toEqual(1)
// const lastModifiedTime = new Date()
// const fileId = 'foojpg' + lastModifiedTime.getTime()
const fileId = 'uppy-foojpg-image'
expect(postprocessor1.mock.calls[0][0].length).toEqual(1)
expect(postprocessor1.mock.calls[0][0][0].substring(0, 17)).toEqual(
fileId.substring(0, 17)
)
expect(postprocessor2.mock.calls[0][0].length).toEqual(1)
expect(postprocessor2.mock.calls[0][0][0].substring(0, 17)).toEqual(
fileId.substring(0, 17)
)
})
})
it('should update the file progress state when postprocess-progress event is fired', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
const file = core.getFile(fileId)
core.emit('postprocess-progress', file, {
mode: 'determinate',
message: 'something',
value: 0
})
expect(core.getFile(fileId).progress).toEqual({
percentage: 0,
bytesUploaded: 0,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false,
postprocess: { mode: 'determinate', message: 'something', value: 0 }
})
})
it('should update the file progress state when postprocess-complete event is fired', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
const file = core.getFile(fileId)
core.emit('postprocess-complete', file, {
mode: 'determinate',
message: 'something',
value: 0
})
expect(core.getFile(fileId).progress).toEqual({
percentage: 0,
bytesUploaded: 0,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false
})
})
})
describe('uploaders', () => {
it('should add an uploader', () => {
const core = new Core()
const uploader = function () {}
core.addUploader(uploader)
expect(core.uploaders[0]).toEqual(uploader)
})
it('should remove an uploader', () => {
const core = new Core()
const uploader1 = function () {}
const uploader2 = function () {}
const uploader3 = function () {}
core.addUploader(uploader1)
core.addUploader(uploader2)
core.addUploader(uploader3)
expect(core.uploaders.length).toEqual(3)
core.removeUploader(uploader2)
expect(core.uploaders.length).toEqual(2)
})
})
describe('adding a file', () => {
it('should call onBeforeFileAdded if it was specified in the options when initialising the class', () => {
const onBeforeFileAdded = jest.fn()
const core = new Core({
onBeforeFileAdded
})
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
expect(onBeforeFileAdded.mock.calls.length).toEqual(1)
expect(onBeforeFileAdded.mock.calls[0][0].name).toEqual('foo.jpg')
expect(onBeforeFileAdded.mock.calls[0][1]).toEqual({})
})
it('should add a file', () => {
const fileData = new File([sampleImage], { type: 'image/jpeg' })
const fileAddedEventMock = jest.fn()
const core = new Core()
core.on('file-added', fileAddedEventMock)
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: fileData
})
const fileId = Object.keys(core.getState().files)[0]
const newFile = {
extension: 'jpg',
id: fileId,
isRemote: false,
meta: { name: 'foo.jpg', type: 'image/jpeg' },
name: 'foo.jpg',
preview: undefined,
data: fileData,
progress: {
bytesTotal: 17175,
bytesUploaded: 0,
percentage: 0,
uploadComplete: false,
uploadStarted: false
},
remote: '',
size: 17175,
source: 'jest',
type: 'image/jpeg'
}
expect(core.getFile(fileId)).toEqual(newFile)
expect(fileAddedEventMock.mock.calls[0][0]).toEqual(newFile)
})
it('should not allow a file that does not meet the restrictions', () => {
const core = new Core({
restrictions: {
allowedFileTypes: ['image/gif']
}
})
try {
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
throw new Error('File was allowed through')
} catch (err) {
expect(err.message).toEqual('You can only upload: image/gif')
}
})
it('should not allow a file if onBeforeFileAdded returned false', () => {
const core = new Core({
onBeforeFileAdded: (file, files) => {
if (file.source === 'jest') {
return false
}
}
})
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
expect(core.getFiles().length).toEqual(0)
})
})
describe('uploading a file', () => {
it('should return a { successful, failed } pair containing file objects', () => {
const core = new Core()
core.addUploader((fileIDs) => Promise.resolve())
core.addFile({ source: 'jest', name: 'foo.jpg', type: 'image/jpeg', data: new Uint8Array() })
core.addFile({ source: 'jest', name: 'bar.jpg', type: 'image/jpeg', data: new Uint8Array() })
return expect(core.upload()).resolves.toMatchObject({
successful: [
{ name: 'foo.jpg' },
{ name: 'bar.jpg' }
],
failed: []
})
})
it('should return files with errors in the { failed } key', () => {
const core = new Core()
core.addUploader((fileIDs) => {
fileIDs.forEach((fileID) => {
const file = core.getFile(fileID)
if (/bar/.test(file.name)) {
core.emit('upload-error', file, new Error('This is bar and I do not like bar'))
}
})
return Promise.resolve()
})
core.addFile({ source: 'jest', name: 'foo.jpg', type: 'image/jpeg', data: new Uint8Array() })
core.addFile({ source: 'jest', name: 'bar.jpg', type: 'image/jpeg', data: new Uint8Array() })
return expect(core.upload()).resolves.toMatchObject({
successful: [
{ name: 'foo.jpg' }
],
failed: [
{ name: 'bar.jpg', error: 'This is bar and I do not like bar' }
]
})
})
it('should only upload files that are not already assigned to another upload id', () => {
const core = new Core()
core.store.state.currentUploads = {
upload1: {
fileIDs: ['uppy-file1jpg-image/jpeg', 'uppy-file2jpg-image/jpeg', 'uppy-file3jpg-image/jpeg']
},
upload2: {
fileIDs: ['uppy-file4jpg-image/jpeg', 'uppy-file5jpg-image/jpeg', 'uppy-file6jpg-image/jpeg']
}
}
core.addUploader((fileIDs) => Promise.resolve())
core.addFile({ source: 'jest', name: 'foo.jpg', type: 'image/jpeg', data: new Uint8Array() })
core.addFile({ source: 'jest', name: 'bar.jpg', type: 'image/jpeg', data: new Uint8Array() })
core.addFile({ source: 'file3', name: 'file3.jpg', type: 'image/jpeg', data: new Uint8Array() })
return expect(core.upload()).resolves.toMatchSnapshot()
})
it('should not upload if onBeforeUpload returned false', () => {
const core = new Core({
autoProceed: false,
onBeforeUpload: (files) => {
for (var fileId in files) {
if (files[fileId].name === '123.foo') {
return false
}
}
}
})
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
core.addFile({
source: 'jest',
name: 'bar.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
core.addFile({
source: 'jest',
name: '123.foo',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
return core.upload().catch((err) => {
expect(err).toMatchObject(new Error('Not starting the upload because onBeforeUpload returned false'))
})
})
})
describe('removing a file', () => {
it('should remove the file', () => {
const fileRemovedEventMock = jest.fn()
const core = new Core()
core.on('file-removed', fileRemovedEventMock)
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
expect(core.getFiles().length).toEqual(1)
core.setState({
totalProgress: 50
})
const file = core.getFile(fileId)
core.removeFile(fileId)
expect(core.getFiles().length).toEqual(0)
expect(fileRemovedEventMock.mock.calls[0][0]).toEqual(file)
expect(core.getState().totalProgress).toEqual(0)
})
})
describe('restoring a file', () => {
xit('should restore a file', () => {})
xit("should fail to restore a file if it doesn't exist", () => {})
})
describe('get a file', () => {
it('should get the specified file', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
expect(core.getFile(fileId).name).toEqual('foo.jpg')
expect(core.getFile('non existant file')).toEqual(undefined)
})
})
describe('getFiles', () => {
it('should return an empty array if there are no files', () => {
const core = new Core()
expect(core.getFiles()).toEqual([])
})
it('should return all files as an array', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
core.addFile({
source: 'jest',
name: 'empty.dat',
type: 'application/octet-stream',
data: new File([Buffer.alloc(1000)], { type: 'application/octet-stream' })
})
expect(core.getFiles()).toHaveLength(2)
expect(core.getFiles().map((file) => file.name).sort()).toEqual(['empty.dat', 'foo.jpg'])
})
})
describe('meta data', () => {
it('should set meta data by calling setMeta', () => {
const core = new Core({
meta: { foo2: 'bar2' }
})
core.setMeta({ foo: 'bar', bur: 'mur' })
core.setMeta({ boo: 'moo', bur: 'fur' })
expect(core.getState().meta).toEqual({
foo: 'bar',
foo2: 'bar2',
boo: 'moo',
bur: 'fur'
})
})
it('should update meta data for a file by calling updateMeta', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
core.setFileMeta(fileId, { foo: 'bar', bur: 'mur' })
core.setFileMeta(fileId, { boo: 'moo', bur: 'fur' })
expect(core.getFile(fileId).meta).toEqual({
name: 'foo.jpg',
type: 'image/jpeg',
foo: 'bar',
bur: 'fur',
boo: 'moo'
})
})
it('should merge meta data when add file', () => {
const core = new Core({
meta: { foo2: 'bar2' }
})
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
meta: {
resize: 5000
},
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
expect(core.getFile(fileId).meta).toEqual({
name: 'foo.jpg',
type: 'image/jpeg',
foo2: 'bar2',
resize: 5000
})
})
})
describe('progress', () => {
it('should calculate the progress of a file upload', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const fileId = Object.keys(core.getState().files)[0]
const file = core.getFile(fileId)
core._calculateProgress(file, {
bytesUploaded: 12345,
bytesTotal: 17175
})
expect(core.getFile(fileId).progress).toEqual({
percentage: 71,
bytesUploaded: 12345,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false
})
core._calculateProgress(file, {
bytesUploaded: 17175,
bytesTotal: 17175
})
expect(core.getFile(fileId).progress).toEqual({
percentage: 100,
bytesUploaded: 17175,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false
})
})
it('should calculate the total progress of all file uploads', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
core.addFile({
source: 'jest',
name: 'foo2.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const [file1, file2] = core.getFiles()
core.setFileState(file1.id, { progress: Object.assign({}, file1.progress, { uploadStarted: new Date() }) })
core.setFileState(file2.id, { progress: Object.assign({}, file2.progress, { uploadStarted: new Date() }) })
core._calculateProgress(core.getFile(file1.id), {
bytesUploaded: 12345,
bytesTotal: 17175
})
core._calculateProgress(core.getFile(file2.id), {
bytesUploaded: 10201,
bytesTotal: 17175
})
core._calculateTotalProgress()
expect(core.getState().totalProgress).toEqual(65)
})
it('should reset the progress', () => {
const resetProgressEvent = jest.fn()
const core = new Core()
core.on('reset-progress', resetProgressEvent)
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
core.addFile({
source: 'jest',
name: 'foo2.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
const [file1, file2] = core.getFiles()
core.setFileState(file1.id, { progress: Object.assign({}, file1.progress, { uploadStarted: new Date() }) })
core.setFileState(file2.id, { progress: Object.assign({}, file2.progress, { uploadStarted: new Date() }) })
core._calculateProgress(core.getFile(file1.id), {
bytesUploaded: 12345,
bytesTotal: 17175
})
core._calculateProgress(core.getFile(file2.id), {
bytesUploaded: 10201,
bytesTotal: 17175
})
core._calculateTotalProgress()
expect(core.getState().totalProgress).toEqual(65)
core.resetProgress()
expect(core.getFile(file1.id).progress).toEqual({
percentage: 0,
bytesUploaded: 0,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false
})
expect(core.getFile(file2.id).progress).toEqual({
percentage: 0,
bytesUploaded: 0,
bytesTotal: 17175,
uploadComplete: false,
uploadStarted: false
})
expect(core.getState().totalProgress).toEqual(0)
expect(resetProgressEvent.mock.calls.length).toEqual(1)
})
})
describe('checkRestrictions', () => {
it('should enforce the maxNumberOfFiles rule', () => {
const core = new Core({
autoProceed: false,
restrictions: {
maxNumberOfFiles: 1
}
})
// add 2 files
core.addFile({
source: 'jest',
name: 'foo1.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
try {
core.addFile({
source: 'jest',
name: 'foo2.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
throw new Error('should have thrown')
} catch (err) {
expect(err).toMatchObject(new Error('You can only upload 1 file'))
expect(core.getState().info.message).toEqual('You can only upload 1 file')
}
})
xit('should enforce the minNumberOfFiles rule', () => {})
it('should enforce the allowedFileTypes rule', () => {
const core = new Core({
autoProceed: false,
restrictions: {
allowedFileTypes: ['image/gif', 'image/png']
}
})
try {
core.addFile({
source: 'jest',
name: 'foo2.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
throw new Error('should have thrown')
} catch (err) {
expect(err).toMatchObject(new Error('You can only upload: image/gif, image/png'))
expect(core.getState().info.message).toEqual('You can only upload: image/gif, image/png')
}
})
it('should enforce the allowedFileTypes rule with file extensions', () => {
const core = new Core({
autoProceed: false,
restrictions: {
allowedFileTypes: ['.gif', '.jpg', '.jpeg']
}
})
try {
core.addFile({
source: 'jest',
name: 'foo2.png',
type: '',
data: new File([sampleImage], { type: 'image/jpeg' })
})
throw new Error('should have thrown')
} catch (err) {
expect(err).toMatchObject(new Error('You can only upload: .gif, .jpg, .jpeg'))
expect(core.getState().info.message).toEqual('You can only upload: .gif, .jpg, .jpeg')
}
})
it('should enforce the maxFileSize rule', () => {
const core = new Core({
autoProceed: false,
restrictions: {
maxFileSize: 1234
}
})
try {
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
throw new Error('should have thrown')
} catch (err) {
expect(err).toMatchObject(new Error('This file exceeds maximum allowed size of 1.2 KB'))
expect(core.getState().info.message).toEqual('This file exceeds maximum allowed size of 1.2 KB')
}
})
})
describe('actions', () => {
it('should update the state when receiving the error event', () => {
const core = new Core()
core.emit('error', new Error('foooooo'))
expect(core.getState().error).toEqual('foooooo')
})
it('should update the state when receiving the upload-error event', () => {
const core = new Core()
core.setState({
files: {
fileId: {
id: 'fileId',
name: 'filename'
}
}
})
core.emit('upload-error', core.getFile('fileId'), new Error('this is the error'))
expect(core.getState().info).toEqual({'message': 'Failed to upload filename', 'details': 'this is the error', 'isHidden': false, 'type': 'error'})
})
it('should reset the error state when receiving the upload event', () => {
const core = new Core()
core.emit('error', { foo: 'bar' })
core.emit('upload')
expect(core.getState().error).toEqual(null)
})
})
describe('updateOnlineStatus', () => {
const RealNavigatorOnline = global.window.navigator.onLine
function mockNavigatorOnline (status) {
Object.defineProperty(
global.window.navigator,
'onLine',
{
value: status,
writable: true
}
)
}
afterEach(() => {
global.window.navigator.onLine = RealNavigatorOnline
})
it('should emit the correct event based on whether there is a network connection', () => {
const onlineEventMock = jest.fn()
const offlineEventMock = jest.fn()
const backOnlineEventMock = jest.fn()
const core = new Core()
core.on('is-offline', offlineEventMock)
core.on('is-online', onlineEventMock)
core.on('back-online', backOnlineEventMock)
mockNavigatorOnline(true)
core.updateOnlineStatus()
expect(onlineEventMock.mock.calls.length).toEqual(1)
expect(offlineEventMock.mock.calls.length).toEqual(0)
expect(backOnlineEventMock.mock.calls.length).toEqual(0)
mockNavigatorOnline(false)
core.updateOnlineStatus()
expect(onlineEventMock.mock.calls.length).toEqual(1)
expect(offlineEventMock.mock.calls.length).toEqual(1)
expect(backOnlineEventMock.mock.calls.length).toEqual(0)
mockNavigatorOnline(true)
core.updateOnlineStatus()
expect(onlineEventMock.mock.calls.length).toEqual(2)
expect(offlineEventMock.mock.calls.length).toEqual(1)
expect(backOnlineEventMock.mock.calls.length).toEqual(1)
})
})
describe('info', () => {
it('should set a string based message to be displayed infinitely', () => {
const infoVisibleEvent = jest.fn()
const core = new Core()
core.on('info-visible', infoVisibleEvent)
core.info('This is the message', 'info', 0)
expect(core.getState().info).toEqual({
isHidden: false,
type: 'info',
message: 'This is the message',
details: null
})
expect(infoVisibleEvent.mock.calls.length).toEqual(1)
expect(typeof core.infoTimeoutID).toEqual('undefined')
})
it('should set a object based message to be displayed infinitely', () => {
const infoVisibleEvent = jest.fn()
const core = new Core()
core.on('info-visible', infoVisibleEvent)
core.info({
message: 'This is the message',
details: {
foo: 'bar'
}
}, 'warning', 0)
expect(core.getState().info).toEqual({
isHidden: false,
type: 'warning',
message: 'This is the message',
details: {
foo: 'bar'
}
})
expect(infoVisibleEvent.mock.calls.length).toEqual(1)
expect(typeof core.infoTimeoutID).toEqual('undefined')
})
it('should set an info message to be displayed for a period of time before hiding', (done) => {
const infoVisibleEvent = jest.fn()
const infoHiddenEvent = jest.fn()
const core = new Core()
core.on('info-visible', infoVisibleEvent)
core.on('info-hidden', infoHiddenEvent)
core.info('This is the message', 'info', 100)
expect(typeof core.infoTimeoutID).toEqual('number')
expect(infoHiddenEvent.mock.calls.length).toEqual(0)
setTimeout(() => {
expect(infoHiddenEvent.mock.calls.length).toEqual(1)
expect(core.getState().info).toEqual({
isHidden: true,
type: 'info',
message: 'This is the message',
details: null
})
done()
}, 110)
})
it('should hide an info message', () => {
const infoVisibleEvent = jest.fn()
const infoHiddenEvent = jest.fn()
const core = new Core()
core.on('info-visible', infoVisibleEvent)
core.on('info-hidden', infoHiddenEvent)
core.info('This is the message', 'info', 0)
expect(typeof core.infoTimeoutID).toEqual('undefined')
expect(infoHiddenEvent.mock.calls.length).toEqual(0)
core.hideInfo()
expect(infoHiddenEvent.mock.calls.length).toEqual(1)
expect(core.getState().info).toEqual({
isHidden: true,
type: 'info',
message: 'This is the message',
details: null
})
})
})
describe('createUpload', () => {
it('should assign the specified files to a new upload', () => {
const core = new Core()
core.addFile({
source: 'jest',
name: 'foo.jpg',
type: 'image/jpeg',
data: new File([sampleImage], { type: 'image/jpeg' })
})
core._createUpload(Object.keys(core.getState().files))
const uploadId = Object.keys(core.getState().currentUploads)[0]
const currentUploadsState = {}
currentUploadsState[uploadId] = {
fileIDs: Object.keys(core.getState().files),
step: 0,
result: {}
}
expect(core.getState().currentUploads).toEqual(currentUploadsState)
})
})
describe('i18n', () => {
it('merges in custom locale strings', () => {
const core = new Core({
locale: {
strings: {
test: 'beep boop'
}
}
})
expect(core.i18n('exceedsSize')).toBe('This file exceeds maximum allowed size of')
expect(core.i18n('test')).toBe('beep boop')
})
})
describe('default restrictions', () => {
it('should be merged with supplied restrictions', () => {
const core = new Core({
restrictions: {
maxNumberOfFiles: 3
}
})
expect(core.opts.restrictions.maxNumberOfFiles).toBe(3)
expect(core.opts.restrictions.minNumberOfFiles).toBe(null)
})
})
})
| 1 | 10,883 | Can we keep the other assertion too? I think it's helpful to ensure that the uninstall function was called too | transloadit-uppy | js |
@@ -109,7 +109,7 @@ module RSpec
def warn_if_key_taken(source, key, new_block)
return unless existing_block = example_block_for(source, key)
- Kernel.warn <<-WARNING.gsub(/^ +\|/, '')
+ RSpec.warn_with <<-WARNING.gsub(/^ +\|/, '')
|WARNING: Shared example group '#{key}' has been previously defined at:
| #{formatted_location existing_block}
|...and you are now defining it at: | 1 | module RSpec
module Core
module SharedExampleGroup
# @overload shared_examples(name, &block)
# @overload shared_examples(name, tags, &block)
#
# Wraps the `block` in a module which can then be included in example
# groups using `include_examples`, `include_context`, or
# `it_behaves_like`.
#
# @param [String] name to match when looking up this shared group
# @param block to be eval'd in a nested example group generated by `it_behaves_like`
#
# @example
#
# shared_examples "auditable" do
# it "stores an audit record on save!" do
# lambda { auditable.save! }.should change(Audit, :count).by(1)
# end
# end
#
# class Account do
# it_behaves_like "auditable" do
# def auditable; Account.new; end
# end
# end
#
# @see ExampleGroup.it_behaves_like
# @see ExampleGroup.include_examples
# @see ExampleGroup.include_context
def shared_examples(*args, &block)
SharedExampleGroup.registry.add_group(self, *args, &block)
end
alias_method :shared_context, :shared_examples
alias_method :share_examples_for, :shared_examples
alias_method :shared_examples_for, :shared_examples
def shared_example_groups
SharedExampleGroup.registry.shared_example_groups_for('main', *ancestors[0..-1])
end
module TopLevelDSL
def shared_examples(*args, &block)
SharedExampleGroup.registry.add_group('main', *args, &block)
end
alias_method :shared_context, :shared_examples
alias_method :share_examples_for, :shared_examples
alias_method :shared_examples_for, :shared_examples
def shared_example_groups
SharedExampleGroup.registry.shared_example_groups_for('main')
end
end
def self.registry
@registry ||= Registry.new
end
# @private
#
# Used internally to manage the shared example groups and
# constants. We want to limit the number of methods we add
# to objects we don't own (main and Module) so this allows
# us to have helper methods that don't get added to those
# objects.
class Registry
def add_group(source, *args, &block)
ensure_block_has_source_location(block, CallerFilter.first_non_rspec_line)
if key? args.first
key = args.shift
warn_if_key_taken source, key, block
add_shared_example_group source, key, block
end
unless args.empty?
mod = Module.new
(class << mod; self; end).send :define_method, :extended do |host|
host.class_eval(&block)
end
RSpec.configuration.extend mod, *args
end
end
def shared_example_groups_for(*sources)
Collection.new(sources, shared_example_groups)
end
def shared_example_groups
@shared_example_groups ||= Hash.new { |hash,key| hash[key] = Hash.new }
end
def clear
shared_example_groups.clear
end
private
def add_shared_example_group(source, key, block)
shared_example_groups[source][key] = block
end
def key?(candidate)
[String, Symbol, Module].any? { |cls| cls === candidate }
end
def warn_if_key_taken(source, key, new_block)
return unless existing_block = example_block_for(source, key)
Kernel.warn <<-WARNING.gsub(/^ +\|/, '')
|WARNING: Shared example group '#{key}' has been previously defined at:
| #{formatted_location existing_block}
|...and you are now defining it at:
| #{formatted_location new_block}
|The new definition will overwrite the original one.
WARNING
end
def formatted_location(block)
block.source_location.join ":"
end
def example_block_for(source, key)
shared_example_groups[source][key]
end
if Proc.method_defined?(:source_location)
def ensure_block_has_source_location(block, caller_line); end
else # for 1.8.7
def ensure_block_has_source_location(block, caller_line)
block.extend Module.new {
define_method :source_location do
caller_line.split(':')
end
}
end
end
end
end
end
end
extend RSpec::Core::SharedExampleGroup::TopLevelDSL
Module.send(:include, RSpec::Core::SharedExampleGroup::TopLevelDSL)
| 1 | 9,601 | `warn_with` prefixes the message with `WARNING:`, right? So this will put `WARNING:` twice. It would be good to verify all the warnings look good after this change, given how easy it is to make a simple mistake like this :(. | rspec-rspec-core | rb |
@@ -12,6 +12,19 @@ import (
"github.com/filecoin-project/go-filecoin/types"
)
+// MessageTimeOut is the number of tipsets we should receive before timing out messages
+const MessageTimeOut = 6
+
+type timedmessage struct {
+ message *types.SignedMessage
+ addedAt uint64
+}
+
+// BlockTimer defines a interface to a struct that can give the current block height.
+type BlockTimer interface {
+ BlockHeight() (uint64, error)
+}
+
// MessagePool keeps an unordered, de-duplicated set of Messages and supports removal by CID.
// By 'de-duplicated' we mean that insertion of a message by cid that already
// exists is a nop. We use a MessagePool to store all messages received by this node | 1 | package core
import (
"context"
"sync"
"gx/ipfs/QmNf3wujpV2Y7Lnj2hy2UrmuX8bhMDStRHbnSLh7Ypf36h/go-hamt-ipld"
"gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid"
"gx/ipfs/QmVmDhyTTUcQXFD1rRQ64fGLMSAoaQvNH3hwuaCFAPq2hy/errors"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/types"
)
// MessagePool keeps an unordered, de-duplicated set of Messages and supports removal by CID.
// By 'de-duplicated' we mean that insertion of a message by cid that already
// exists is a nop. We use a MessagePool to store all messages received by this node
// via network or directly created via user command that have yet to be included
// in a block. Messages are removed as they are processed.
//
// MessagePool is safe for concurrent access.
type MessagePool struct {
lk sync.RWMutex
pending map[cid.Cid]*types.SignedMessage // all pending messages
}
// Add adds a message to the pool.
func (pool *MessagePool) Add(msg *types.SignedMessage) (cid.Cid, error) {
pool.lk.Lock()
defer pool.lk.Unlock()
c, err := msg.Cid()
if err != nil {
return cid.Undef, errors.Wrap(err, "failed to create CID")
}
// Reject messages with invalid signatires
if !msg.VerifySignature() {
return cid.Undef, errors.Errorf("failed to add message %s to pool: sig invalid", c.String())
}
pool.pending[c] = msg
return c, nil
}
// Pending returns all pending messages.
func (pool *MessagePool) Pending() []*types.SignedMessage {
pool.lk.Lock()
defer pool.lk.Unlock()
out := make([]*types.SignedMessage, 0, len(pool.pending))
for _, msg := range pool.pending {
out = append(out, msg)
}
return out
}
// Get retrieves a message from the pool by CID.
func (pool *MessagePool) Get(c cid.Cid) (value *types.SignedMessage, ok bool) {
pool.lk.Lock()
defer pool.lk.Unlock()
value, ok = pool.pending[c]
if ok && value == nil {
panic("Found nil message for CID " + c.String())
}
return
}
// Remove removes the message by CID from the pending pool.
func (pool *MessagePool) Remove(c cid.Cid) {
pool.lk.Lock()
defer pool.lk.Unlock()
delete(pool.pending, c)
}
// NewMessagePool constructs a new MessagePool.
func NewMessagePool() *MessagePool {
return &MessagePool{
pending: make(map[cid.Cid]*types.SignedMessage),
}
}
// getParentTips returns the parent tipset of the provided tipset
// TODO msgPool should have access to a chain store that can just look this up...
func getParentTipSet(ctx context.Context, store *hamt.CborIpldStore, ts types.TipSet) (types.TipSet, error) {
newTipSet := types.TipSet{}
parents, err := ts.Parents()
if err != nil {
return nil, err
}
for it := parents.Iter(); !it.Complete() && ctx.Err() == nil; it.Next() {
var newBlk types.Block
if err := store.Get(ctx, it.Value(), &newBlk); err != nil {
return nil, err
}
if err := newTipSet.AddBlock(&newBlk); err != nil {
return nil, err
}
}
return newTipSet, nil
}
// collectChainsMessagesToHeight is a helper that collects all the messages
// from block `b` down the chain to but not including its ancestor of
// height `height`. This function returns the messages collected along with
// the tipset at the final height.
// TODO ripe for optimizing away lots of allocations
func collectChainsMessagesToHeight(ctx context.Context, store *hamt.CborIpldStore, curTipSet types.TipSet, height uint64) ([]*types.SignedMessage, types.TipSet, error) {
var msgs []*types.SignedMessage
h, err := curTipSet.Height()
if err != nil {
return nil, nil, err
}
for h > height {
for _, blk := range curTipSet {
msgs = append(msgs, blk.Messages...)
}
parents, err := curTipSet.Parents()
if err != nil {
return nil, nil, err
}
switch parents.Len() {
case 0:
return msgs, curTipSet, nil
default:
nextTipSet, err := getParentTipSet(ctx, store, curTipSet)
if err != nil {
return []*types.SignedMessage{}, types.TipSet{}, err
}
curTipSet = nextTipSet
h, err = curTipSet.Height()
if err != nil {
return nil, nil, err
}
}
}
return msgs, curTipSet, nil
}
// UpdateMessagePool brings the message pool into the correct state after
// we accept a new block. It removes messages from the pool that are
// found in the newly adopted chain and adds back those from the removed
// chain (if any) that do not appear in the new chain. We think
// that the right model for keeping the message pool up to date is
// to think about it like a garbage collector.
//
// TODO there is considerable functionality missing here: don't add
// messages that have expired, respect nonce, do this efficiently,
// etc.
func UpdateMessagePool(ctx context.Context, pool *MessagePool, store *hamt.CborIpldStore, old, new types.TipSet) error {
// Strategy: walk head-of-chain pointers old and new back until they are at the same
// height, then walk back in lockstep to find the common ancesetor.
// If old is higher/longer than new, collect all the messages
// from old's chain down to the height of new.
newHeight, err := new.Height()
if err != nil {
return err
}
addToPool, old, err := collectChainsMessagesToHeight(ctx, store, old, newHeight)
if err != nil {
return err
}
// If new is higher/longer than old, collect all the messages
// from new's chain down to the height of old.
oldHeight, err := old.Height()
if err != nil {
return err
}
removeFromPool, new, err := collectChainsMessagesToHeight(ctx, store, new, oldHeight)
if err != nil {
return err
}
// Old and new are now at the same height. Keep walking them down a
// tipset at a time in lockstep until they are pointing to the same
// tipset, the common ancestor. Collect their messages to add/remove
// along the way.
//
// TODO probably should limit depth here.
for !old.Equals(new) {
for _, blk := range old {
// skip genesis block
if blk.Height > 0 {
addToPool = append(addToPool, blk.Messages...)
}
}
for _, blk := range new {
removeFromPool = append(removeFromPool, blk.Messages...)
}
oldParents, err := old.Parents()
if err != nil {
return err
}
newParents, err := new.Parents()
if err != nil {
return err
}
if oldParents.Empty() || newParents.Empty() {
break
}
old, err = getParentTipSet(ctx, store, old)
if err != nil {
return err
}
new, err = getParentTipSet(ctx, store, new)
if err != nil {
return err
}
}
// Now actually update the pool.
for _, m := range addToPool {
_, err := pool.Add(m)
if err != nil {
return err
}
}
// m.Cid() can error, so collect all the Cids before
removeCids := make([]cid.Cid, len(removeFromPool))
for i, m := range removeFromPool {
cid, err := m.Cid()
if err != nil {
return err
}
removeCids[i] = cid
}
for _, cid := range removeCids {
pool.Remove(cid)
}
return nil
}
// LargestNonce returns the largest nonce used by a message from address in the pool.
// If no messages from address are found, found will be false.
func LargestNonce(pool *MessagePool, address address.Address) (largest uint64, found bool) {
for _, m := range pool.Pending() {
if m.From == address {
found = true
if uint64(m.Nonce) > largest {
largest = uint64(m.Nonce)
}
}
}
return
}
| 1 | 17,718 | FYI In the message queue I use the term "stamp" to refer to the time-like mark associated with each message. It's opaque to the queue/pool and should make no difference if the stamps and age limit were converted to seconds. So this could then become `Stamper` with `CurrentStamp()` method, no reference to "blocks" or height etc. The wrapper struct could be `StampedMessage. The interpretation as block height is deferred to the user/constructor that hooks things up, the "business logic". This is just an observation, take or ignore as you wish. | filecoin-project-venus | go |
@@ -508,8 +508,13 @@ def remove_xml_preamble(response):
# --------------
def get_lifecycle(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
+ exists, code, body = is_bucket_available(bucket_name)
+ if not exists:
+ return requests_response(body, status_code=code)
+
lifecycle = BUCKET_LIFECYCLE.get(bucket_name)
status_code = 200
+
if not lifecycle:
# TODO: check if bucket actually exists
lifecycle = { | 1 | import random
import re
import logging
import json
import time
from pytz import timezone
import uuid
import base64
import codecs
import xmltodict
import collections
import botocore.config
import six
import datetime
import dateutil.parser
from six.moves.urllib import parse as urlparse
from botocore.client import ClientError
from requests.models import Response, Request
from localstack import config, constants
from localstack.config import HOSTNAME, HOSTNAME_EXTERNAL
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
short_uid, timestamp_millis, to_str, to_bytes, clone, md5, get_service_protocol
)
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_responses import requests_response
from localstack.utils.persistence import PersistingProxyListener
from localstack.services.s3 import multipart_content
CONTENT_SHA256_HEADER = 'x-amz-content-sha256'
STREAMING_HMAC_PAYLOAD = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
# mappings for S3 bucket notifications
S3_NOTIFICATIONS = {}
# mappings for bucket CORS settings
BUCKET_CORS = {}
# maps bucket name to lifecycle settings
BUCKET_LIFECYCLE = {}
# maps bucket name to replication settings
BUCKET_REPLICATIONS = {}
# maps bucket name to encryption settings
BUCKET_ENCRYPTIONS = {}
# maps bucket name to object lock settings
OBJECT_LOCK_CONFIGS = {}
# map to store the s3 expiry dates
OBJECT_EXPIRY = {}
# set up logger
LOGGER = logging.getLogger(__name__)
# XML namespace constants
XMLNS_S3 = 'http://s3.amazonaws.com/doc/2006-03-01/'
# see https://stackoverflow.com/questions/50480924/regex-for-s3-bucket-name#50484916
BUCKET_NAME_REGEX = (r'(?=^.{3,63}$)(?!^(\d+\.)+\d+$)' +
r'(^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)*([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$)')
# list of destination types for bucket notifications
NOTIFICATION_DESTINATION_TYPES = ('Queue', 'Topic', 'CloudFunction', 'LambdaFunction')
# prefix for object metadata keys in headers and query params
OBJECT_METADATA_KEY_PREFIX = 'x-amz-meta-'
# response header overrides the client may request
ALLOWED_HEADER_OVERRIDES = {
'response-content-type': 'Content-Type',
'response-content-language': 'Content-Language',
'response-expires': 'Expires',
'response-cache-control': 'Cache-Control',
'response-content-disposition': 'Content-Disposition',
'response-content-encoding': 'Content-Encoding',
}
def event_type_matches(events, action, api_method):
""" check whether any of the event types in `events` matches the
given `action` and `api_method`, and return the first match. """
events = events or []
for event in events:
regex = event.replace('*', '[^:]*')
action_string = 's3:%s:%s' % (action, api_method)
match = re.match(regex, action_string)
if match:
return match
return False
def filter_rules_match(filters, object_path):
""" check whether the given object path matches all of the given filters """
filters = filters or {}
s3_filter = _get_s3_filter(filters)
for rule in s3_filter.get('FilterRule', []):
rule_name_lower = rule['Name'].lower()
if rule_name_lower == 'prefix':
if not prefix_with_slash(object_path).startswith(prefix_with_slash(rule['Value'])):
return False
elif rule_name_lower == 'suffix':
if not object_path.endswith(rule['Value']):
return False
else:
LOGGER.warning('Unknown filter name: "%s"' % rule['Name'])
return True
def _get_s3_filter(filters):
return filters.get('S3Key', filters.get('Key', {}))
def prefix_with_slash(s):
return s if s[0] == '/' else '/%s' % s
def get_event_message(event_name, bucket_name, file_name='testfile.txt', version_id=None, file_size=0):
# Based on: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html
bucket_name = normalize_bucket_name(bucket_name)
return {
'Records': [{
'eventVersion': '2.0',
'eventSource': 'aws:s3',
'awsRegion': aws_stack.get_region(),
'eventTime': timestamp_millis(),
'eventName': event_name,
'userIdentity': {
'principalId': 'AIDAJDPLRKLG7UEXAMPLE'
},
'requestParameters': {
'sourceIPAddress': '127.0.0.1' # TODO determine real source IP
},
'responseElements': {
'x-amz-request-id': short_uid(),
'x-amz-id-2': 'eftixk72aD6Ap51TnqcoF8eFidJG9Z/2' # Amazon S3 host that processed the request
},
's3': {
's3SchemaVersion': '1.0',
'configurationId': 'testConfigRule',
'bucket': {
'name': bucket_name,
'ownerIdentity': {
'principalId': 'A3NL1KOZZKExample'
},
'arn': 'arn:aws:s3:::%s' % bucket_name
},
'object': {
'key': file_name,
'size': file_size,
'eTag': 'd41d8cd98f00b204e9800998ecf8427e',
'versionId': version_id,
'sequencer': '0055AED6DCD90281E5'
}
}
}]
}
def send_notifications(method, bucket_name, object_path, version_id):
bucket_name = normalize_bucket_name(bucket_name)
for bucket, notifs in S3_NOTIFICATIONS.items():
if bucket == bucket_name:
action = {'PUT': 'ObjectCreated', 'POST': 'ObjectCreated', 'DELETE': 'ObjectRemoved'}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
if action == 'ObjectCreated' and method == 'POST':
api_method = 'CompleteMultipartUpload'
else:
api_method = {'PUT': 'Put', 'POST': 'Post', 'DELETE': 'Delete'}[method]
event_name = '%s:%s' % (action, api_method)
for notif in notifs:
send_notification_for_subscriber(notif, bucket_name, object_path,
version_id, api_method, action, event_name)
def send_notification_for_subscriber(notif, bucket_name, object_path, version_id, api_method, action, event_name):
bucket_name = normalize_bucket_name(bucket_name)
if not event_type_matches(notif['Event'], action, api_method) or \
not filter_rules_match(notif.get('Filter'), object_path):
return
key = urlparse.unquote(object_path.replace('//', '/'))[1:]
s3_client = aws_stack.connect_to_service('s3')
try:
object_size = s3_client.head_object(Bucket=bucket_name, Key=key).get('ContentLength', 0)
except botocore.exceptions.ClientError:
object_size = 0
# build event message
message = get_event_message(
event_name=event_name,
bucket_name=bucket_name,
file_name=key,
file_size=object_size,
version_id=version_id
)
message = json.dumps(message)
if notif.get('Queue'):
sqs_client = aws_stack.connect_to_service('sqs')
try:
queue_url = aws_stack.sqs_queue_url_for_arn(notif['Queue'])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s' %
(bucket_name, notif['Queue'], e))
if notif.get('Topic'):
sns_client = aws_stack.connect_to_service('sns')
try:
sns_client.publish(TopicArn=notif['Topic'], Message=message, Subject='Amazon S3 Notification')
except Exception:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SNS topic "%s".' %
(bucket_name, notif['Topic']))
# CloudFunction and LambdaFunction are semantically identical
lambda_function_config = notif.get('CloudFunction') or notif.get('LambdaFunction')
if lambda_function_config:
# make sure we don't run into a socket timeout
connection_config = botocore.config.Config(read_timeout=300)
lambda_client = aws_stack.connect_to_service('lambda', config=connection_config)
try:
lambda_client.invoke(FunctionName=lambda_function_config,
InvocationType='Event', Payload=message)
except Exception:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to Lambda function "%s".' %
(bucket_name, lambda_function_config))
if not filter(lambda x: notif.get(x), NOTIFICATION_DESTINATION_TYPES):
LOGGER.warning('Neither of %s defined for S3 notification.' %
'/'.join(NOTIFICATION_DESTINATION_TYPES))
def get_cors(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = code
return response
cors = BUCKET_CORS.get(bucket_name)
if not cors:
cors = {
'CORSConfiguration': []
}
body = xmltodict.unparse(cors)
response._content = body
response.status_code = 200
return response
def set_cors(bucket_name, cors):
bucket_name = normalize_bucket_name(bucket_name)
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = code
return response
if not isinstance(cors, dict):
cors = xmltodict.parse(cors)
BUCKET_CORS[bucket_name] = cors
response.status_code = 200
return response
def delete_cors(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
response = Response()
exists, code = bucket_exists(bucket_name)
if not exists:
response.status_code = code
return response
BUCKET_CORS.pop(bucket_name, {})
response.status_code = 200
return response
def append_cors_headers(bucket_name, request_method, request_headers, response):
bucket_name = normalize_bucket_name(bucket_name)
cors = BUCKET_CORS.get(bucket_name)
if not cors:
return
origin = request_headers.get('Origin', '')
rules = cors['CORSConfiguration']['CORSRule']
if not isinstance(rules, list):
rules = [rules]
for rule in rules:
# add allow-origin header
allowed_methods = rule.get('AllowedMethod', [])
if request_method in allowed_methods:
allowed_origins = rule.get('AllowedOrigin', [])
for allowed in allowed_origins:
if origin in allowed or re.match(allowed.replace('*', '.*'), origin):
response.headers['Access-Control-Allow-Origin'] = origin
if 'ExposeHeader' in rule:
expose_headers = rule['ExposeHeader']
response.headers['Access-Control-Expose-Headers'] = \
','.join(expose_headers) if isinstance(expose_headers, list) else expose_headers
break
def append_aws_request_troubleshooting_headers(response):
gen_amz_request_id = ''.join(random.choice('0123456789ABCDEF') for i in range(16))
if response.headers.get('x-amz-request-id') is None:
response.headers['x-amz-request-id'] = gen_amz_request_id
if response.headers.get('x-amz-id-2') is None:
response.headers['x-amz-id-2'] = 'MzRISOwyjmnup' + gen_amz_request_id + '7/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp'
def add_accept_range_header(response):
if response.headers.get('accept-ranges') is None:
response.headers['accept-ranges'] = 'bytes'
def is_object_expired(path):
object_expiry = get_object_expiry(path)
if not object_expiry:
return False
if dateutil.parser.parse(object_expiry) > \
datetime.datetime.now(timezone(dateutil.parser.parse(object_expiry).tzname())):
return False
return True
def set_object_expiry(path, headers):
OBJECT_EXPIRY[path] = headers.get('expires')
def get_object_expiry(path):
return OBJECT_EXPIRY.get(path)
def is_url_already_expired(expiry_timestamp):
if int(expiry_timestamp) < int(time.time()):
return True
return False
def add_reponse_metadata_headers(response):
if response.headers.get('content-language') is None:
response.headers['content-language'] = 'en-US'
if response.headers.get('cache-control') is None:
response.headers['cache-control'] = 'no-cache'
if response.headers.get('content-encoding') is None:
response.headers['content-encoding'] = 'identity'
def append_last_modified_headers(response, content=None):
"""Add Last-Modified header with current time
(if the response content is an XML containing <LastModified>, add that instead)"""
time_format = '%a, %d %b %Y %H:%M:%S GMT' # TimeFormat
try:
if content:
last_modified_str = re.findall(r'<LastModified>([^<]*)</LastModified>', content)
if last_modified_str:
last_modified_str = last_modified_str[0]
last_modified_time_format = dateutil.parser.parse(last_modified_str).strftime(time_format)
response.headers['Last-Modified'] = last_modified_time_format
except TypeError as err:
LOGGER.debug('No parsable content: %s' % err)
except ValueError as err:
LOGGER.error('Failed to parse LastModified: %s' % err)
except Exception as err:
LOGGER.error('Caught generic exception (parsing LastModified): %s' % err)
# if cannot parse any LastModified, just continue
try:
if response.headers.get('Last-Modified', '') == '':
response.headers['Last-Modified'] = datetime.datetime.now().strftime(time_format)
except Exception as err:
LOGGER.error('Caught generic exception (setting LastModified header): %s' % err)
def append_list_objects_marker(method, path, data, response):
if 'marker=' in path:
content = to_str(response.content)
if '<ListBucketResult' in content and '<Marker>' not in content:
parsed = urlparse.urlparse(path)
query_map = urlparse.parse_qs(parsed.query)
insert = '<Marker>%s</Marker>' % query_map.get('marker')[0]
response._content = content.replace('</ListBucketResult>', '%s</ListBucketResult>' % insert)
response.headers['Content-Length'] = str(len(response._content))
def append_metadata_headers(method, query_map, headers):
for key, value in query_map.items():
if key.lower().startswith(OBJECT_METADATA_KEY_PREFIX):
if headers.get(key) is None:
headers[key] = value[0]
def fix_location_constraint(response):
""" Make sure we return a valid non-empty LocationConstraint, as this otherwise breaks Serverless. """
try:
content = to_str(response.content or '') or ''
except Exception:
content = ''
if 'LocationConstraint' in content:
pattern = r'<LocationConstraint([^>]*)>\s*</LocationConstraint>'
replace = r'<LocationConstraint\1>%s</LocationConstraint>' % aws_stack.get_region()
response._content = re.sub(pattern, replace, content)
remove_xml_preamble(response)
def fix_range_content_type(bucket_name, path, headers, response):
# Fix content type for Range requests - https://github.com/localstack/localstack/issues/1259
if 'Range' not in headers:
return
s3_client = aws_stack.connect_to_service('s3')
path = urlparse.unquote(path)
key_name = get_key_name(path, headers)
result = s3_client.head_object(Bucket=bucket_name, Key=key_name)
content_type = result['ContentType']
if response.headers.get('Content-Type') == 'text/html; charset=utf-8':
response.headers['Content-Type'] = content_type
def fix_delete_objects_response(bucket_name, method, parsed_path, data, headers, response):
# Deleting non-existing keys should not result in errors.
# Fixes https://github.com/localstack/localstack/issues/1893
if not (method == 'POST' and parsed_path.query == 'delete' and '<Delete' in to_str(data or '')):
return
content = to_str(response._content)
if '<Error>' not in content:
return
result = xmltodict.parse(content).get('DeleteResult')
errors = result.get('Error')
errors = errors if isinstance(errors, list) else [errors]
deleted = result.get('Deleted')
if not isinstance(result.get('Deleted'), list):
deleted = result['Deleted'] = [deleted] if deleted else []
for entry in list(errors):
if set(entry.keys()) == set(['Key']):
errors.remove(entry)
deleted.append(entry)
if not errors:
result.pop('Error')
response._content = xmltodict.unparse({'DeleteResult': result})
def fix_metadata_key_underscores(request_headers={}, response=None):
# fix for https://github.com/localstack/localstack/issues/1790
underscore_replacement = '---'
meta_header_prefix = 'x-amz-meta-'
prefix_len = len(meta_header_prefix)
updated = False
for key in list(request_headers.keys()):
if key.lower().startswith(meta_header_prefix):
key_new = meta_header_prefix + key[prefix_len:].replace('_', underscore_replacement)
if key != key_new:
request_headers[key_new] = request_headers.pop(key)
updated = True
if response:
for key in list(response.headers.keys()):
if key.lower().startswith(meta_header_prefix):
key_new = meta_header_prefix + key[prefix_len:].replace(underscore_replacement, '_')
if key != key_new:
response.headers[key_new] = response.headers.pop(key)
return updated
def fix_creation_date(method, path, response):
if method != 'GET' or path != '/':
return
response._content = re.sub(r'([0-9])</CreationDate>', r'\1Z</CreationDate>', to_str(response._content))
def fix_etag_for_multipart(data, headers, response):
# Fix for https://github.com/localstack/localstack/issues/1978
if headers.get(CONTENT_SHA256_HEADER) == STREAMING_HMAC_PAYLOAD:
try:
if b'chunk-signature=' not in to_bytes(data):
return
correct_hash = md5(strip_chunk_signatures(data))
tags = r'<ETag>%s</ETag>'
pattern = r'(")?([^<&]+)(")?'
replacement = r'\g<1>%s\g<3>' % correct_hash
response._content = re.sub(tags % pattern, tags % replacement, to_str(response.content))
if response.headers.get('ETag'):
response.headers['ETag'] = re.sub(pattern, replacement, response.headers['ETag'])
except Exception:
pass
def remove_xml_preamble(response):
""" Removes <?xml ... ?> from a response content """
response._content = re.sub(r'^<\?[^\?]+\?>', '', to_str(response._content))
# --------------
# HELPER METHODS
# for lifecycle/replication/encryption/...
# --------------
def get_lifecycle(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
lifecycle = BUCKET_LIFECYCLE.get(bucket_name)
status_code = 200
if not lifecycle:
# TODO: check if bucket actually exists
lifecycle = {
'Error': {
'Code': 'NoSuchLifecycleConfiguration',
'Message': 'The lifecycle configuration does not exist'
}
}
status_code = 404
body = xmltodict.unparse(lifecycle)
return requests_response(body, status_code=status_code)
def get_replication(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
replication = BUCKET_REPLICATIONS.get(bucket_name)
status_code = 200
if not replication:
# TODO: check if bucket actually exists
replication = {
'Error': {
'Code': 'ReplicationConfigurationNotFoundError',
'Message': 'The replication configuration was not found'
}
}
status_code = 404
body = xmltodict.unparse(replication)
return requests_response(body, status_code=status_code)
def get_encryption(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
encryption = BUCKET_ENCRYPTIONS.get(bucket_name)
status_code = 200
if not encryption:
# TODO: check if bucket actually exists
encryption = {
'Error': {
'Code': 'ServerSideEncryptionConfigurationNotFoundError',
'Message': 'The server side encryption configuration was not found'
}
}
status_code = 404
body = xmltodict.unparse(encryption)
return requests_response(body, status_code=status_code)
def get_object_lock(bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
lock_config = OBJECT_LOCK_CONFIGS.get(bucket_name)
status_code = 200
if not lock_config:
# TODO: check if bucket actually exists
lock_config = {
'Error': {
'Code': 'ObjectLockConfigurationNotFoundError',
'Message': 'Object Lock configuration does not exist for this bucket'
}
}
status_code = 404
body = xmltodict.unparse(lock_config)
return requests_response(body, status_code=status_code)
def set_lifecycle(bucket_name, lifecycle):
bucket_name = normalize_bucket_name(bucket_name)
# TODO: check if bucket exists, otherwise return 404-like error
if isinstance(to_str(lifecycle), six.string_types):
lifecycle = xmltodict.parse(lifecycle)
BUCKET_LIFECYCLE[bucket_name] = lifecycle
return 200
def set_replication(bucket_name, replication):
bucket_name = normalize_bucket_name(bucket_name)
# TODO: check if bucket exists, otherwise return 404-like error
if isinstance(to_str(replication), six.string_types):
replication = xmltodict.parse(replication)
BUCKET_REPLICATIONS[bucket_name] = replication
return 200
def set_encryption(bucket_name, encryption):
bucket_name = normalize_bucket_name(bucket_name)
# TODO: check if bucket exists, otherwise return 404-like error
if isinstance(to_str(encryption), six.string_types):
encryption = xmltodict.parse(encryption)
BUCKET_ENCRYPTIONS[bucket_name] = encryption
return 200
def set_object_lock(bucket_name, lock_config):
bucket_name = normalize_bucket_name(bucket_name)
# TODO: check if bucket exists, otherwise return 404-like error
if isinstance(to_str(lock_config), six.string_types):
lock_config = xmltodict.parse(lock_config)
OBJECT_LOCK_CONFIGS[bucket_name] = lock_config
return 200
# -------------
# UTIL METHODS
# -------------
def strip_chunk_signatures(data):
# For clients that use streaming v4 authentication, the request contains chunk signatures
# in the HTTP body (see example below) which we need to strip as moto cannot handle them
#
# 17;chunk-signature=6e162122ec4962bea0b18bc624025e6ae4e9322bdc632762d909e87793ac5921
# <payload data ...>
# 0;chunk-signature=927ab45acd82fc90a3c210ca7314d59fedc77ce0c914d79095f8cc9563cf2c70
data_new = ''
if data is not None:
data_new = re.sub(b'(^|\r\n)[0-9a-fA-F]+;chunk-signature=[0-9a-f]{64}(\r\n)(\r\n$)?', b'',
data, flags=re.MULTILINE | re.DOTALL)
return data_new
def bucket_exists(bucket_name):
"""Tests for the existence of the specified bucket. Returns the error code
if the bucket does not exist (200 if the bucket does exist).
"""
bucket_name = normalize_bucket_name(bucket_name)
s3_client = aws_stack.connect_to_service('s3')
try:
s3_client.head_bucket(Bucket=bucket_name)
except ClientError as err:
error_code = err.response.get('Error').get('Code')
return False, error_code
return True, 200
def check_content_md5(data, headers):
actual = md5(strip_chunk_signatures(data))
expected = headers['Content-MD5']
try:
expected = to_str(codecs.encode(base64.b64decode(expected), 'hex'))
except Exception:
expected = '__invalid__'
if actual != expected:
return error_response('The Content-MD5 you specified was invalid', 'InvalidDigest', status_code=400)
def error_response(message, code, status_code=400):
result = {'Error': {'Code': code, 'Message': message}}
content = xmltodict.unparse(result)
headers = {'content-type': 'application/xml'}
return requests_response(content, status_code=status_code, headers=headers)
def no_such_key_error(resource, requestId=None, status_code=400):
result = {'Error': {'Code': 'NoSuchKey',
'Message': 'The resource you requested does not exist',
'Resource': resource, 'RequestId': requestId}}
content = xmltodict.unparse(result)
headers = {'content-type': 'application/xml'}
return requests_response(content, status_code=status_code, headers=headers)
def token_expired_error(resource, requestId=None, status_code=400):
result = {'Error': {'Code': 'ExpiredToken',
'Message': 'The provided token has expired.',
'Resource': resource, 'RequestId': requestId}}
content = xmltodict.unparse(result)
headers = {'content-type': 'application/xml'}
return requests_response(content, status_code=status_code, headers=headers)
def expand_redirect_url(starting_url, key, bucket):
""" Add key and bucket parameters to starting URL query string. """
parsed = urlparse.urlparse(starting_url)
query = collections.OrderedDict(urlparse.parse_qsl(parsed.query))
query.update([('key', key), ('bucket', bucket)])
redirect_url = urlparse.urlunparse((
parsed.scheme, parsed.netloc, parsed.path,
parsed.params, urlparse.urlencode(query), None))
return redirect_url
def is_bucket_specified_in_domain_name(path, headers):
host = headers.get('host', '')
return re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com', host)
def is_object_specific_request(path, headers):
""" Return whether the given request is specific to a certain S3 object.
Note: the bucket name is usually specified as a path parameter,
but may also be part of the domain name! """
bucket_in_domain = is_bucket_specified_in_domain_name(path, headers)
parts = len(path.split('/'))
return parts > (1 if bucket_in_domain else 2)
def normalize_bucket_name(bucket_name):
bucket_name = bucket_name or ''
# AWS appears to automatically convert upper to lower case chars in bucket names
bucket_name = bucket_name.lower()
return bucket_name
def get_key_name(path, headers):
parsed = urlparse.urlparse(path)
path_parts = parsed.path.lstrip('/').split('/', 1)
if uses_path_addressing(headers):
return path_parts[1]
return path_parts[0]
def uses_path_addressing(headers):
host = headers['host']
return host.startswith(HOSTNAME) or host.startswith(HOSTNAME_EXTERNAL)
def get_bucket_name(path, headers):
parsed = urlparse.urlparse(path)
# try pick the bucket_name from the path
bucket_name = parsed.path.split('/')[1]
# is the hostname not starting with a bucket name?
if uses_path_addressing(headers):
return normalize_bucket_name(bucket_name)
# matches the common endpoints like
# - '<bucket_name>.s3.<region>.amazonaws.com'
# - '<bucket_name>.s3-<region>.amazonaws.com.cn'
common_pattern = re.compile(r'^(.+)\.s3[.\-][a-z]{2}-[a-z]+-[0-9]{1,}'
r'\.amazonaws\.com(\.[a-z]+)?$')
# matches dualstack endpoints like
# - <bucket_name>.s3.dualstack.<region>.amazonaws.com'
# - <bucket_name>.s3.dualstack.<region>.amazonaws.com.cn'
dualstack_pattern = re.compile(r'^(.+)\.s3\.dualstack\.[a-z]{2}-[a-z]+-[0-9]{1,}'
r'\.amazonaws\.com(\.[a-z]+)?$')
# matches legacy endpoints like
# - '<bucket_name>.s3.amazonaws.com'
# - '<bucket_name>.s3-external-1.amazonaws.com.cn'
legacy_patterns = re.compile(r'^(.+)\.s3\.?(-external-1)?\.amazonaws\.com(\.[a-z]+)?$')
# if any of the above patterns match, the first captured group
# will be returned as the bucket name
host = headers['host']
for pattern in [common_pattern, dualstack_pattern, legacy_patterns]:
match = pattern.match(host)
if match:
bucket_name = match.groups()[0]
break
# we're either returning the original bucket_name,
# or a pattern matched the host and we're returning that name instead
return normalize_bucket_name(bucket_name)
def handle_notification_request(bucket, method, data):
response = Response()
response.status_code = 200
response._content = ''
if method == 'GET':
# TODO check if bucket exists
result = '<NotificationConfiguration xmlns="%s">' % XMLNS_S3
if bucket in S3_NOTIFICATIONS:
notifs = S3_NOTIFICATIONS[bucket]
for notif in notifs:
for dest in NOTIFICATION_DESTINATION_TYPES:
if dest in notif:
dest_dict = {
'%sConfiguration' % dest: {
'Id': uuid.uuid4(),
dest: notif[dest],
'Event': notif['Event'],
'Filter': notif['Filter']
}
}
result += xmltodict.unparse(dest_dict, full_document=False)
result += '</NotificationConfiguration>'
response._content = result
if method == 'PUT':
parsed = xmltodict.parse(data)
notif_config = parsed.get('NotificationConfiguration')
S3_NOTIFICATIONS[bucket] = []
for dest in NOTIFICATION_DESTINATION_TYPES:
config = notif_config.get('%sConfiguration' % (dest))
configs = config if isinstance(config, list) else [config] if config else []
for config in configs:
events = config.get('Event')
if isinstance(events, six.string_types):
events = [events]
event_filter = config.get('Filter', {})
# make sure FilterRule is an array
s3_filter = _get_s3_filter(event_filter)
if s3_filter and not isinstance(s3_filter.get('FilterRule', []), list):
s3_filter['FilterRule'] = [s3_filter['FilterRule']]
# create final details dict
notification_details = {
'Id': config.get('Id'),
'Event': events,
dest: config.get(dest),
'Filter': event_filter
}
S3_NOTIFICATIONS[bucket].append(clone(notification_details))
return response
def remove_bucket_notification(bucket):
S3_NOTIFICATIONS.pop(bucket, None)
class ProxyListenerS3(PersistingProxyListener):
def api_name(self):
return 's3'
@staticmethod
def is_s3_copy_request(headers, path):
return 'x-amz-copy-source' in headers or 'x-amz-copy-source' in path
@staticmethod
def get_201_response(key, bucket_name):
return """
<PostResponse>
<Location>{protocol}://{host}/{encoded_key}</Location>
<Bucket>{bucket}</Bucket>
<Key>{key}</Key>
<ETag>{etag}</ETag>
</PostResponse>
""".format(
protocol=get_service_protocol(),
host=config.HOSTNAME_EXTERNAL,
encoded_key=urlparse.quote(key, safe=''),
key=key,
bucket=bucket_name,
etag='d41d8cd98f00b204e9800998ecf8427f',
)
@staticmethod
def _update_location(content, bucket_name):
bucket_name = normalize_bucket_name(bucket_name)
host = config.HOSTNAME_EXTERNAL
if ':' not in host:
host = '%s:%s' % (host, config.PORT_S3)
return re.sub(r'<Location>\s*([a-zA-Z0-9\-]+)://[^/]+/([^<]+)\s*</Location>',
r'<Location>%s://%s/%s/\2</Location>' % (get_service_protocol(), host, bucket_name),
content, flags=re.MULTILINE)
@staticmethod
def is_query_allowable(method, query):
# Generally if there is a query (some/path/with?query) we don't want to send notifications
if not query:
return True
# Except we do want to notify on multipart and presigned url upload completion
contains_cred = 'X-Amz-Credential' in query and 'X-Amz-Signature' in query
contains_key = 'AWSAccessKeyId' in query and 'Signature' in query
if (method == 'POST' and query.startswith('uploadId')) or contains_cred or contains_key:
return True
def forward_request(self, method, path, data, headers):
# parse path and query params
parsed_path = urlparse.urlparse(path)
# Make sure we use 'localhost' as forward host, to ensure moto uses path style addressing.
# Note that all S3 clients using LocalStack need to enable path style addressing.
if 's3.amazonaws.com' not in headers.get('host', ''):
headers['host'] = 'localhost'
# check content md5 hash integrity if not a copy request
if 'Content-MD5' in headers and not self.is_s3_copy_request(headers, path):
response = check_content_md5(data, headers)
if response is not None:
return response
modified_data = None
# check bucket name
bucket_name = get_bucket_name(path, headers)
if method == 'PUT' and not re.match(BUCKET_NAME_REGEX, bucket_name):
if len(parsed_path.path) <= 1:
return error_response('Unable to extract valid bucket name. Please ensure that your AWS SDK is ' +
'configured to use path style addressing, or send a valid <Bucket>.s3.amazonaws.com "Host" header',
'InvalidBucketName', status_code=400)
return error_response('The specified bucket is not valid.', 'InvalidBucketName', status_code=400)
# TODO: For some reason, moto doesn't allow us to put a location constraint on us-east-1
to_find = to_bytes('<LocationConstraint>us-east-1</LocationConstraint>')
if data and data.startswith(to_bytes('<')) and to_find in data:
modified_data = data.replace(to_find, to_bytes(''))
# If this request contains streaming v4 authentication signatures, strip them from the message
# Related isse: https://github.com/localstack/localstack/issues/98
# TODO we should evaluate whether to replace moto s3 with scality/S3:
# https://github.com/scality/S3/issues/237
if headers.get(CONTENT_SHA256_HEADER) == STREAMING_HMAC_PAYLOAD:
modified_data = strip_chunk_signatures(modified_data or data)
headers['content-length'] = headers.get('x-amz-decoded-content-length')
# POST requests to S3 may include a "${filename}" placeholder in the
# key, which should be replaced with an actual file name before storing.
if method == 'POST':
original_data = modified_data or data
expanded_data = multipart_content.expand_multipart_filename(original_data, headers)
if expanded_data is not original_data:
modified_data = expanded_data
# If no content-type is provided, 'binary/octet-stream' should be used
# src: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
if method == 'PUT' and not headers.get('content-type'):
headers['content-type'] = 'binary/octet-stream'
# parse query params
query = parsed_path.query
path = parsed_path.path
bucket = path.split('/')[1]
query_map = urlparse.parse_qs(query, keep_blank_values=True)
# remap metadata query params (not supported in moto) to request headers
append_metadata_headers(method, query_map, headers)
# apply fixes
headers_changed = fix_metadata_key_underscores(request_headers=headers)
if query == 'notification' or 'notification' in query_map:
# handle and return response for ?notification request
response = handle_notification_request(bucket, method, data)
return response
# if the Expires key in the url is already expired then return error
if method == 'GET' and 'Expires' in query_map:
if is_url_already_expired(query_map.get('Expires')[0]):
return token_expired_error(path, headers.get('x-amz-request-id'), 400)
if query == 'cors' or 'cors' in query_map:
if method == 'GET':
return get_cors(bucket)
if method == 'PUT':
return set_cors(bucket, data)
if method == 'DELETE':
return delete_cors(bucket)
if query == 'lifecycle' or 'lifecycle' in query_map:
if method == 'GET':
return get_lifecycle(bucket)
if method == 'PUT':
return set_lifecycle(bucket, data)
if query == 'replication' or 'replication' in query_map:
if method == 'GET':
return get_replication(bucket)
if method == 'PUT':
return set_replication(bucket, data)
if query == 'encryption' or 'encryption' in query_map:
if method == 'GET':
return get_encryption(bucket)
if method == 'PUT':
return set_encryption(bucket, data)
if query == 'object-lock' or 'object-lock' in query_map:
if method == 'GET':
return get_object_lock(bucket)
if method == 'PUT':
return set_object_lock(bucket, data)
if modified_data is not None or headers_changed:
return Request(data=modified_data or data, headers=headers, method=method)
return True
def get_forward_url(self, method, path, data, headers):
def sub(match):
# make sure to convert any bucket names to lower case
bucket_name = normalize_bucket_name(match.group(1))
return '/%s%s' % (bucket_name, match.group(2) or '')
path_new = re.sub(r'/([^?/]+)([?/].*)?', sub, path)
if path == path_new:
return
url = 'http://%s:%s%s' % (constants.LOCALHOST, constants.DEFAULT_PORT_S3_BACKEND, path_new)
return url
def return_response(self, method, path, data, headers, response, request_handler=None):
path = to_str(path)
method = to_str(method)
# persist this API call to disk
super(ProxyListenerS3, self).return_response(method, path, data, headers, response, request_handler)
# No path-name based bucket name? Try host-based
bucket_name = get_bucket_name(path, headers)
hostname_parts = headers['host'].split('.')
if (not bucket_name or len(bucket_name) == 0) and len(hostname_parts) > 1:
bucket_name = hostname_parts[0]
# POST requests to S3 may include a success_action_redirect or
# success_action_status field, which should be used to redirect a
# client to a new location.
key = None
if method == 'POST':
key, redirect_url = multipart_content.find_multipart_key_value(data, headers)
if key and redirect_url:
response.status_code = 303
response.headers['Location'] = expand_redirect_url(redirect_url, key, bucket_name)
LOGGER.debug('S3 POST {} to {}'.format(response.status_code, response.headers['Location']))
key, status_code = multipart_content.find_multipart_key_value(
data, headers, 'success_action_status'
)
if response.status_code == 200 and status_code == '201' and key:
response.status_code = 201
response._content = self.get_201_response(key, bucket_name)
response.headers['Content-Length'] = str(len(response._content))
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
return response
parsed = urlparse.urlparse(path)
bucket_name_in_host = headers['host'].startswith(bucket_name)
should_send_notifications = all([
method in ('PUT', 'POST', 'DELETE'),
'/' in path[1:] or bucket_name_in_host or key,
# check if this is an actual put object request, because it could also be
# a put bucket request with a path like this: /bucket_name/
bucket_name_in_host or key or (len(path[1:].split('/')) > 1 and len(path[1:].split('/')[1]) > 0),
self.is_query_allowable(method, parsed.query)
])
# get subscribers and send bucket notifications
if should_send_notifications:
# if we already have a good key, use it, otherwise examine the path
if key:
object_path = '/' + key
elif bucket_name_in_host:
object_path = parsed.path
else:
parts = parsed.path[1:].split('/', 1)
object_path = parts[1] if parts[1][0] == '/' else '/%s' % parts[1]
version_id = response.headers.get('x-amz-version-id', None)
send_notifications(method, bucket_name, object_path, version_id)
# publish event for creation/deletion of buckets:
if method in ('PUT', 'DELETE') and ('/' not in path[1:] or len(path[1:].split('/')[1]) <= 0):
event_type = (event_publisher.EVENT_S3_CREATE_BUCKET if method == 'PUT'
else event_publisher.EVENT_S3_DELETE_BUCKET)
event_publisher.fire_event(event_type, payload={'n': event_publisher.get_hash(bucket_name)})
# fix an upstream issue in moto S3 (see https://github.com/localstack/localstack/issues/382)
if method == 'PUT' and parsed.query == 'policy':
response._content = ''
response.status_code = 204
return response
# emulate ErrorDocument functionality if a website is configured
if method == 'GET' and response.status_code == 404 and parsed.query != 'website':
s3_client = aws_stack.connect_to_service('s3')
try:
# Verify the bucket exists in the first place--if not, we want normal processing of the 404
s3_client.head_bucket(Bucket=bucket_name)
website_config = s3_client.get_bucket_website(Bucket=bucket_name)
error_doc_key = website_config.get('ErrorDocument', {}).get('Key')
if error_doc_key:
error_object = s3_client.get_object(Bucket=bucket_name, Key=error_doc_key)
response.status_code = 200
response._content = error_object['Body'].read()
response.headers['content-length'] = len(response._content)
except ClientError:
# Pass on the 404 as usual
pass
if response:
reset_content_length = False
# append CORS headers and other annotations/patches to response
append_cors_headers(bucket_name, request_method=method, request_headers=headers, response=response)
append_last_modified_headers(response=response)
append_list_objects_marker(method, path, data, response)
fix_location_constraint(response)
fix_range_content_type(bucket_name, path, headers, response)
fix_delete_objects_response(bucket_name, method, parsed, data, headers, response)
fix_metadata_key_underscores(response=response)
fix_creation_date(method, path, response=response)
fix_etag_for_multipart(data, headers, response)
append_aws_request_troubleshooting_headers(response)
if method == 'PUT':
set_object_expiry(path, headers)
# Remove body from PUT response on presigned URL
# https://github.com/localstack/localstack/issues/1317
if method == 'PUT' and ('X-Amz-Security-Token=' in path or
'X-Amz-Credential=' in path or 'AWSAccessKeyId=' in path):
response._content = ''
reset_content_length = True
response_content_str = None
try:
response_content_str = to_str(response._content)
except Exception:
pass
# Honor response header overrides
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
if method == 'GET':
add_accept_range_header(response)
add_reponse_metadata_headers(response)
if is_object_expired(path):
return no_such_key_error(path, headers.get('x-amz-request-id'), 400)
query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items():
if param_name in query_map:
response.headers[header_name] = query_map[param_name][0]
if response_content_str and response_content_str.startswith('<'):
is_bytes = isinstance(response._content, six.binary_type)
response._content = response_content_str
append_last_modified_headers(response=response, content=response_content_str)
# We need to un-pretty-print the XML, otherwise we run into this issue with Spark:
# https://github.com/jserver/mock-s3/pull/9/files
# https://github.com/localstack/localstack/issues/183
# Note: yet, we need to make sure we have a newline after the first line: <?xml ...>\n
# Note: make sure to return XML docs verbatim: https://github.com/localstack/localstack/issues/1037
if method != 'GET' or not is_object_specific_request(path, headers):
response._content = re.sub(r'([^\?])>\n\s*<', r'\1><', response_content_str, flags=re.MULTILINE)
# update Location information in response payload
response._content = self._update_location(response._content, bucket_name)
# convert back to bytes
if is_bytes:
response._content = to_bytes(response._content)
# fix content-type: https://github.com/localstack/localstack/issues/618
# https://github.com/localstack/localstack/issues/549
# https://github.com/localstack/localstack/issues/854
if 'text/html' in response.headers.get('Content-Type', '') \
and not response_content_str.lower().startswith('<!doctype html'):
response.headers['Content-Type'] = 'application/xml; charset=utf-8'
reset_content_length = True
# update content-length headers (fix https://github.com/localstack/localstack/issues/541)
if method == 'DELETE':
reset_content_length = True
if reset_content_length:
response.headers['content-length'] = len(response._content)
# instantiate listener
UPDATE_S3 = ProxyListenerS3()
| 1 | 11,027 | Can we remove the `TODO` statement here? (as this is actually fixed in this PR) | localstack-localstack | py |
@@ -198,6 +198,7 @@ public class Account implements BaseAccount, StoreConfig {
private SortType mSortType;
private Map<SortType, Boolean> mSortAscending = new HashMap<SortType, Boolean>();
private ShowPictures mShowPictures;
+ private DisplayPreference mDisplayPreference;
private boolean mIsSignatureBeforeQuotedText;
private Expunge mExpungePolicy = Expunge.EXPUNGE_IMMEDIATELY;
private int mMaxPushFolders; | 1 |
package com.fsck.k9;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.HashMap;
import java.util.concurrent.ConcurrentHashMap;
import android.content.ContentResolver;
import android.content.Context;
import android.database.Cursor;
import android.graphics.Color;
import android.net.Uri;
import android.util.Log;
import com.fsck.k9.activity.setup.AccountSetupCheckSettings.CheckDirection;
import com.fsck.k9.helper.Utility;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.NetworkType;
import com.fsck.k9.mail.Store;
import com.fsck.k9.mail.Folder.FolderClass;
import com.fsck.k9.mail.filter.Base64;
import com.fsck.k9.mail.store.RemoteStore;
import com.fsck.k9.mail.store.StoreConfig;
import com.fsck.k9.mailstore.StorageManager;
import com.fsck.k9.mailstore.StorageManager.StorageProvider;
import com.fsck.k9.mailstore.LocalStore;
import com.fsck.k9.preferences.StorageEditor;
import com.fsck.k9.preferences.Storage;
import com.fsck.k9.provider.EmailProvider;
import com.fsck.k9.provider.EmailProvider.StatsColumns;
import com.fsck.k9.search.ConditionsTreeNode;
import com.fsck.k9.search.LocalSearch;
import com.fsck.k9.search.SqlQueryBuilder;
import com.fsck.k9.search.SearchSpecification.Attribute;
import com.fsck.k9.search.SearchSpecification.SearchCondition;
import com.fsck.k9.search.SearchSpecification.SearchField;
import com.fsck.k9.mail.ssl.LocalKeyStore;
import com.fsck.k9.view.ColorChip;
import com.larswerkman.colorpicker.ColorPicker;
import static com.fsck.k9.Preferences.getEnumStringPref;
/**
* Account stores all of the settings for a single account defined by the user. It is able to save
* and delete itself given a Preferences to work with. Each account is defined by a UUID.
*/
public class Account implements BaseAccount, StoreConfig {
/**
* Default value for the inbox folder (never changes for POP3 and IMAP)
*/
public static final String INBOX = "INBOX";
/**
* This local folder is used to store messages to be sent.
*/
public static final String OUTBOX = "K9MAIL_INTERNAL_OUTBOX";
public enum Expunge {
EXPUNGE_IMMEDIATELY,
EXPUNGE_MANUALLY,
EXPUNGE_ON_POLL
}
public enum DeletePolicy {
NEVER(0),
SEVEN_DAYS(1),
ON_DELETE(2),
MARK_AS_READ(3);
public final int setting;
DeletePolicy(int setting) {
this.setting = setting;
}
public String preferenceString() {
return Integer.toString(setting);
}
public static DeletePolicy fromInt(int initialSetting) {
for (DeletePolicy policy: values()) {
if (policy.setting == initialSetting) {
return policy;
}
}
throw new IllegalArgumentException("DeletePolicy " + initialSetting + " unknown");
}
}
public static final MessageFormat DEFAULT_MESSAGE_FORMAT = MessageFormat.HTML;
public static final boolean DEFAULT_MESSAGE_FORMAT_AUTO = false;
public static final boolean DEFAULT_MESSAGE_READ_RECEIPT = false;
public static final QuoteStyle DEFAULT_QUOTE_STYLE = QuoteStyle.PREFIX;
public static final String DEFAULT_QUOTE_PREFIX = ">";
public static final boolean DEFAULT_QUOTED_TEXT_SHOWN = true;
public static final boolean DEFAULT_REPLY_AFTER_QUOTE = false;
public static final boolean DEFAULT_STRIP_SIGNATURE = true;
public static final int DEFAULT_REMOTE_SEARCH_NUM_RESULTS = 25;
public static final String ACCOUNT_DESCRIPTION_KEY = "description";
public static final String STORE_URI_KEY = "storeUri";
public static final String TRANSPORT_URI_KEY = "transportUri";
public static final String IDENTITY_NAME_KEY = "name";
public static final String IDENTITY_EMAIL_KEY = "email";
public static final String IDENTITY_DESCRIPTION_KEY = "description";
/*
* http://developer.android.com/design/style/color.html
* Note: Order does matter, it's the order in which they will be picked.
*/
public static final Integer[] PREDEFINED_COLORS = new Integer[] {
Color.parseColor("#0099CC"), // blue
Color.parseColor("#669900"), // green
Color.parseColor("#FF8800"), // orange
Color.parseColor("#CC0000"), // red
Color.parseColor("#9933CC") // purple
};
public enum SortType {
SORT_DATE(R.string.sort_earliest_first, R.string.sort_latest_first, false),
SORT_ARRIVAL(R.string.sort_earliest_first, R.string.sort_latest_first, false),
SORT_SUBJECT(R.string.sort_subject_alpha, R.string.sort_subject_re_alpha, true),
SORT_SENDER(R.string.sort_sender_alpha, R.string.sort_sender_re_alpha, true),
SORT_UNREAD(R.string.sort_unread_first, R.string.sort_unread_last, true),
SORT_FLAGGED(R.string.sort_flagged_first, R.string.sort_flagged_last, true),
SORT_ATTACHMENT(R.string.sort_attach_first, R.string.sort_unattached_first, true);
private int ascendingToast;
private int descendingToast;
private boolean defaultAscending;
SortType(int ascending, int descending, boolean ndefaultAscending) {
ascendingToast = ascending;
descendingToast = descending;
defaultAscending = ndefaultAscending;
}
public int getToast(boolean ascending) {
return (ascending) ? ascendingToast : descendingToast;
}
public boolean isDefaultAscending() {
return defaultAscending;
}
}
public static final SortType DEFAULT_SORT_TYPE = SortType.SORT_DATE;
public static final boolean DEFAULT_SORT_ASCENDING = false;
public static final String NO_OPENPGP_PROVIDER = "";
public static final long NO_OPENPGP_KEY = 0;
private DeletePolicy mDeletePolicy = DeletePolicy.NEVER;
private final String mUuid;
private String mStoreUri;
/**
* Storage provider ID, used to locate and manage the underlying DB/file
* storage
*/
private String mLocalStorageProviderId;
private String mTransportUri;
private String mDescription;
private String mAlwaysBcc;
private int mAutomaticCheckIntervalMinutes;
private int mDisplayCount;
private int mChipColor;
private long mLastAutomaticCheckTime;
private long mLatestOldMessageSeenTime;
private boolean mNotifyNewMail;
private FolderMode mFolderNotifyNewMailMode;
private boolean mNotifySelfNewMail;
private String mInboxFolderName;
private String mDraftsFolderName;
private String mSentFolderName;
private String mTrashFolderName;
private String mArchiveFolderName;
private String mSpamFolderName;
private String mAutoExpandFolderName;
private FolderMode mFolderDisplayMode;
private FolderMode mFolderSyncMode;
private FolderMode mFolderPushMode;
private FolderMode mFolderTargetMode;
private int mAccountNumber;
private boolean mPushPollOnConnect;
private boolean mNotifySync;
private SortType mSortType;
private Map<SortType, Boolean> mSortAscending = new HashMap<SortType, Boolean>();
private ShowPictures mShowPictures;
private boolean mIsSignatureBeforeQuotedText;
private Expunge mExpungePolicy = Expunge.EXPUNGE_IMMEDIATELY;
private int mMaxPushFolders;
private int mIdleRefreshMinutes;
private boolean goToUnreadMessageSearch;
private final Map<NetworkType, Boolean> compressionMap = new ConcurrentHashMap<NetworkType, Boolean>();
private Searchable searchableFolders;
private boolean subscribedFoldersOnly;
private int maximumPolledMessageAge;
private int maximumAutoDownloadMessageSize;
// Tracks if we have sent a notification for this account for
// current set of fetched messages
private boolean mRingNotified;
private MessageFormat mMessageFormat;
private boolean mMessageFormatAuto;
private boolean mMessageReadReceipt;
private QuoteStyle mQuoteStyle;
private String mQuotePrefix;
private boolean mDefaultQuotedTextShown;
private boolean mReplyAfterQuote;
private boolean mStripSignature;
private boolean mSyncRemoteDeletions;
private String mCryptoApp;
private long mCryptoKey;
private boolean mMarkMessageAsReadOnView;
private boolean mAlwaysShowCcBcc;
private boolean mAllowRemoteSearch;
private boolean mRemoteSearchFullText;
private int mRemoteSearchNumResults;
private ColorChip mUnreadColorChip;
private ColorChip mReadColorChip;
private ColorChip mFlaggedUnreadColorChip;
private ColorChip mFlaggedReadColorChip;
/**
* Indicates whether this account is enabled, i.e. ready for use, or not.
*
* <p>
* Right now newly imported accounts are disabled if the settings file didn't contain a
* password for the incoming and/or outgoing server.
* </p>
*/
private boolean mEnabled;
/**
* Name of the folder that was last selected for a copy or move operation.
*
* Note: For now this value isn't persisted. So it will be reset when
* K-9 Mail is restarted.
*/
private String lastSelectedFolderName = null;
private List<Identity> identities;
private NotificationSetting mNotificationSetting = new NotificationSetting();
public enum FolderMode {
NONE, ALL, FIRST_CLASS, FIRST_AND_SECOND_CLASS, NOT_SECOND_CLASS
}
public enum ShowPictures {
NEVER, ALWAYS, ONLY_FROM_CONTACTS
}
public enum Searchable {
ALL, DISPLAYABLE, NONE
}
public enum QuoteStyle {
PREFIX, HEADER
}
public enum MessageFormat {
TEXT, HTML, AUTO
}
protected Account(Context context) {
mUuid = UUID.randomUUID().toString();
mLocalStorageProviderId = StorageManager.getInstance(context).getDefaultProviderId();
mAutomaticCheckIntervalMinutes = -1;
mIdleRefreshMinutes = 24;
mPushPollOnConnect = true;
mDisplayCount = K9.DEFAULT_VISIBLE_LIMIT;
mAccountNumber = -1;
mNotifyNewMail = true;
mFolderNotifyNewMailMode = FolderMode.ALL;
mNotifySync = true;
mNotifySelfNewMail = true;
mFolderDisplayMode = FolderMode.NOT_SECOND_CLASS;
mFolderSyncMode = FolderMode.FIRST_CLASS;
mFolderPushMode = FolderMode.FIRST_CLASS;
mFolderTargetMode = FolderMode.NOT_SECOND_CLASS;
mSortType = DEFAULT_SORT_TYPE;
mSortAscending.put(DEFAULT_SORT_TYPE, DEFAULT_SORT_ASCENDING);
mShowPictures = ShowPictures.NEVER;
mIsSignatureBeforeQuotedText = false;
mExpungePolicy = Expunge.EXPUNGE_IMMEDIATELY;
mAutoExpandFolderName = INBOX;
mInboxFolderName = INBOX;
mMaxPushFolders = 10;
mChipColor = pickColor(context);
goToUnreadMessageSearch = false;
subscribedFoldersOnly = false;
maximumPolledMessageAge = -1;
maximumAutoDownloadMessageSize = 32768;
mMessageFormat = DEFAULT_MESSAGE_FORMAT;
mMessageFormatAuto = DEFAULT_MESSAGE_FORMAT_AUTO;
mMessageReadReceipt = DEFAULT_MESSAGE_READ_RECEIPT;
mQuoteStyle = DEFAULT_QUOTE_STYLE;
mQuotePrefix = DEFAULT_QUOTE_PREFIX;
mDefaultQuotedTextShown = DEFAULT_QUOTED_TEXT_SHOWN;
mReplyAfterQuote = DEFAULT_REPLY_AFTER_QUOTE;
mStripSignature = DEFAULT_STRIP_SIGNATURE;
mSyncRemoteDeletions = true;
mCryptoApp = NO_OPENPGP_PROVIDER;
mCryptoKey = NO_OPENPGP_KEY;
mAllowRemoteSearch = false;
mRemoteSearchFullText = false;
mRemoteSearchNumResults = DEFAULT_REMOTE_SEARCH_NUM_RESULTS;
mEnabled = true;
mMarkMessageAsReadOnView = true;
mAlwaysShowCcBcc = false;
searchableFolders = Searchable.ALL;
identities = new ArrayList<Identity>();
Identity identity = new Identity();
identity.setSignatureUse(true);
identity.setSignature(context.getString(R.string.default_signature));
identity.setDescription(context.getString(R.string.default_identity_description));
identities.add(identity);
mNotificationSetting = new NotificationSetting();
mNotificationSetting.setVibrate(false);
mNotificationSetting.setVibratePattern(0);
mNotificationSetting.setVibrateTimes(5);
mNotificationSetting.setRing(true);
mNotificationSetting.setRingtone("content://settings/system/notification_sound");
mNotificationSetting.setLedColor(mChipColor);
cacheChips();
}
/*
* Pick a nice Android guidelines color if we haven't used them all yet.
*/
private int pickColor(Context context) {
List<Account> accounts = Preferences.getPreferences(context).getAccounts();
List<Integer> availableColors = new ArrayList<Integer>(PREDEFINED_COLORS.length);
Collections.addAll(availableColors, PREDEFINED_COLORS);
for (Account account : accounts) {
Integer color = account.getChipColor();
if (availableColors.contains(color)) {
availableColors.remove(color);
if (availableColors.isEmpty()) {
break;
}
}
}
return (availableColors.isEmpty()) ? ColorPicker.getRandomColor() : availableColors.get(0);
}
protected Account(Preferences preferences, String uuid) {
this.mUuid = uuid;
loadAccount(preferences);
}
/**
* Load stored settings for this account.
*/
private synchronized void loadAccount(Preferences preferences) {
Storage storage = preferences.getStorage();
mStoreUri = Base64.decode(storage.getString(mUuid + ".storeUri", null));
mLocalStorageProviderId = storage.getString(mUuid + ".localStorageProvider", StorageManager.getInstance(K9.app).getDefaultProviderId());
mTransportUri = Base64.decode(storage.getString(mUuid + ".transportUri", null));
mDescription = storage.getString(mUuid + ".description", null);
mAlwaysBcc = storage.getString(mUuid + ".alwaysBcc", mAlwaysBcc);
mAutomaticCheckIntervalMinutes = storage.getInt(mUuid + ".automaticCheckIntervalMinutes", -1);
mIdleRefreshMinutes = storage.getInt(mUuid + ".idleRefreshMinutes", 24);
mPushPollOnConnect = storage.getBoolean(mUuid + ".pushPollOnConnect", true);
mDisplayCount = storage.getInt(mUuid + ".displayCount", K9.DEFAULT_VISIBLE_LIMIT);
if (mDisplayCount < 0) {
mDisplayCount = K9.DEFAULT_VISIBLE_LIMIT;
}
mLastAutomaticCheckTime = storage.getLong(mUuid + ".lastAutomaticCheckTime", 0);
mLatestOldMessageSeenTime = storage.getLong(mUuid + ".latestOldMessageSeenTime", 0);
mNotifyNewMail = storage.getBoolean(mUuid + ".notifyNewMail", false);
mFolderNotifyNewMailMode = getEnumStringPref(storage, mUuid + ".folderNotifyNewMailMode", FolderMode.ALL);
mNotifySelfNewMail = storage.getBoolean(mUuid + ".notifySelfNewMail", true);
mNotifySync = storage.getBoolean(mUuid + ".notifyMailCheck", false);
mDeletePolicy = DeletePolicy.fromInt(storage.getInt(mUuid + ".deletePolicy", DeletePolicy.NEVER.setting));
mInboxFolderName = storage.getString(mUuid + ".inboxFolderName", INBOX);
mDraftsFolderName = storage.getString(mUuid + ".draftsFolderName", "Drafts");
mSentFolderName = storage.getString(mUuid + ".sentFolderName", "Sent");
mTrashFolderName = storage.getString(mUuid + ".trashFolderName", "Trash");
mArchiveFolderName = storage.getString(mUuid + ".archiveFolderName", "Archive");
mSpamFolderName = storage.getString(mUuid + ".spamFolderName", "Spam");
mExpungePolicy = getEnumStringPref(storage, mUuid + ".expungePolicy", Expunge.EXPUNGE_IMMEDIATELY);
mSyncRemoteDeletions = storage.getBoolean(mUuid + ".syncRemoteDeletions", true);
mMaxPushFolders = storage.getInt(mUuid + ".maxPushFolders", 10);
goToUnreadMessageSearch = storage.getBoolean(mUuid + ".goToUnreadMessageSearch", false);
subscribedFoldersOnly = storage.getBoolean(mUuid + ".subscribedFoldersOnly", false);
maximumPolledMessageAge = storage.getInt(mUuid + ".maximumPolledMessageAge", -1);
maximumAutoDownloadMessageSize = storage.getInt(mUuid + ".maximumAutoDownloadMessageSize", 32768);
mMessageFormat = getEnumStringPref(storage, mUuid + ".messageFormat", DEFAULT_MESSAGE_FORMAT);
mMessageFormatAuto = storage.getBoolean(mUuid + ".messageFormatAuto", DEFAULT_MESSAGE_FORMAT_AUTO);
if (mMessageFormatAuto && mMessageFormat == MessageFormat.TEXT) {
mMessageFormat = MessageFormat.AUTO;
}
mMessageReadReceipt = storage.getBoolean(mUuid + ".messageReadReceipt", DEFAULT_MESSAGE_READ_RECEIPT);
mQuoteStyle = getEnumStringPref(storage, mUuid + ".quoteStyle", DEFAULT_QUOTE_STYLE);
mQuotePrefix = storage.getString(mUuid + ".quotePrefix", DEFAULT_QUOTE_PREFIX);
mDefaultQuotedTextShown = storage.getBoolean(mUuid + ".defaultQuotedTextShown", DEFAULT_QUOTED_TEXT_SHOWN);
mReplyAfterQuote = storage.getBoolean(mUuid + ".replyAfterQuote", DEFAULT_REPLY_AFTER_QUOTE);
mStripSignature = storage.getBoolean(mUuid + ".stripSignature", DEFAULT_STRIP_SIGNATURE);
for (NetworkType type : NetworkType.values()) {
Boolean useCompression = storage.getBoolean(mUuid + ".useCompression." + type,
true);
compressionMap.put(type, useCompression);
}
mAutoExpandFolderName = storage.getString(mUuid + ".autoExpandFolderName", INBOX);
mAccountNumber = storage.getInt(mUuid + ".accountNumber", 0);
mChipColor = storage.getInt(mUuid + ".chipColor", ColorPicker.getRandomColor());
mSortType = getEnumStringPref(storage, mUuid + ".sortTypeEnum", SortType.SORT_DATE);
mSortAscending.put(mSortType, storage.getBoolean(mUuid + ".sortAscending", false));
mShowPictures = getEnumStringPref(storage, mUuid + ".showPicturesEnum", ShowPictures.NEVER);
mNotificationSetting.setVibrate(storage.getBoolean(mUuid + ".vibrate", false));
mNotificationSetting.setVibratePattern(storage.getInt(mUuid + ".vibratePattern", 0));
mNotificationSetting.setVibrateTimes(storage.getInt(mUuid + ".vibrateTimes", 5));
mNotificationSetting.setRing(storage.getBoolean(mUuid + ".ring", true));
mNotificationSetting.setRingtone(storage.getString(mUuid + ".ringtone",
"content://settings/system/notification_sound"));
mNotificationSetting.setLed(storage.getBoolean(mUuid + ".led", true));
mNotificationSetting.setLedColor(storage.getInt(mUuid + ".ledColor", mChipColor));
mFolderDisplayMode = getEnumStringPref(storage, mUuid + ".folderDisplayMode", FolderMode.NOT_SECOND_CLASS);
mFolderSyncMode = getEnumStringPref(storage, mUuid + ".folderSyncMode", FolderMode.FIRST_CLASS);
mFolderPushMode = getEnumStringPref(storage, mUuid + ".folderPushMode", FolderMode.FIRST_CLASS);
mFolderTargetMode = getEnumStringPref(storage, mUuid + ".folderTargetMode", FolderMode.NOT_SECOND_CLASS);
searchableFolders = getEnumStringPref(storage, mUuid + ".searchableFolders", Searchable.ALL);
mIsSignatureBeforeQuotedText = storage.getBoolean(mUuid + ".signatureBeforeQuotedText", false);
identities = loadIdentities(storage);
String cryptoApp = storage.getString(mUuid + ".cryptoApp", NO_OPENPGP_PROVIDER);
setCryptoApp(cryptoApp);
mCryptoKey = storage.getLong(mUuid + ".cryptoKey", NO_OPENPGP_KEY);
mAllowRemoteSearch = storage.getBoolean(mUuid + ".allowRemoteSearch", false);
mRemoteSearchFullText = storage.getBoolean(mUuid + ".remoteSearchFullText", false);
mRemoteSearchNumResults = storage.getInt(mUuid + ".remoteSearchNumResults", DEFAULT_REMOTE_SEARCH_NUM_RESULTS);
mEnabled = storage.getBoolean(mUuid + ".enabled", true);
mMarkMessageAsReadOnView = storage.getBoolean(mUuid + ".markMessageAsReadOnView", true);
mAlwaysShowCcBcc = storage.getBoolean(mUuid + ".alwaysShowCcBcc", false);
cacheChips();
// Use email address as account description if necessary
if (mDescription == null) {
mDescription = getEmail();
}
}
protected synchronized void delete(Preferences preferences) {
// Get the list of account UUIDs
String[] uuids = preferences.getStorage().getString("accountUuids", "").split(",");
// Create a list of all account UUIDs excluding this account
List<String> newUuids = new ArrayList<String>(uuids.length);
for (String uuid : uuids) {
if (!uuid.equals(mUuid)) {
newUuids.add(uuid);
}
}
StorageEditor editor = preferences.getStorage().edit();
// Only change the 'accountUuids' value if this account's UUID was listed before
if (newUuids.size() < uuids.length) {
String accountUuids = Utility.combine(newUuids.toArray(), ',');
editor.putString("accountUuids", accountUuids);
}
editor.remove(mUuid + ".storeUri");
editor.remove(mUuid + ".transportUri");
editor.remove(mUuid + ".description");
editor.remove(mUuid + ".name");
editor.remove(mUuid + ".email");
editor.remove(mUuid + ".alwaysBcc");
editor.remove(mUuid + ".automaticCheckIntervalMinutes");
editor.remove(mUuid + ".pushPollOnConnect");
editor.remove(mUuid + ".idleRefreshMinutes");
editor.remove(mUuid + ".lastAutomaticCheckTime");
editor.remove(mUuid + ".latestOldMessageSeenTime");
editor.remove(mUuid + ".notifyNewMail");
editor.remove(mUuid + ".notifySelfNewMail");
editor.remove(mUuid + ".deletePolicy");
editor.remove(mUuid + ".draftsFolderName");
editor.remove(mUuid + ".sentFolderName");
editor.remove(mUuid + ".trashFolderName");
editor.remove(mUuid + ".archiveFolderName");
editor.remove(mUuid + ".spamFolderName");
editor.remove(mUuid + ".autoExpandFolderName");
editor.remove(mUuid + ".accountNumber");
editor.remove(mUuid + ".vibrate");
editor.remove(mUuid + ".vibratePattern");
editor.remove(mUuid + ".vibrateTimes");
editor.remove(mUuid + ".ring");
editor.remove(mUuid + ".ringtone");
editor.remove(mUuid + ".folderDisplayMode");
editor.remove(mUuid + ".folderSyncMode");
editor.remove(mUuid + ".folderPushMode");
editor.remove(mUuid + ".folderTargetMode");
editor.remove(mUuid + ".signatureBeforeQuotedText");
editor.remove(mUuid + ".expungePolicy");
editor.remove(mUuid + ".syncRemoteDeletions");
editor.remove(mUuid + ".maxPushFolders");
editor.remove(mUuid + ".searchableFolders");
editor.remove(mUuid + ".chipColor");
editor.remove(mUuid + ".led");
editor.remove(mUuid + ".ledColor");
editor.remove(mUuid + ".goToUnreadMessageSearch");
editor.remove(mUuid + ".subscribedFoldersOnly");
editor.remove(mUuid + ".maximumPolledMessageAge");
editor.remove(mUuid + ".maximumAutoDownloadMessageSize");
editor.remove(mUuid + ".messageFormatAuto");
editor.remove(mUuid + ".quoteStyle");
editor.remove(mUuid + ".quotePrefix");
editor.remove(mUuid + ".sortTypeEnum");
editor.remove(mUuid + ".sortAscending");
editor.remove(mUuid + ".showPicturesEnum");
editor.remove(mUuid + ".replyAfterQuote");
editor.remove(mUuid + ".stripSignature");
editor.remove(mUuid + ".cryptoApp");
editor.remove(mUuid + ".cryptoAutoSignature");
editor.remove(mUuid + ".cryptoAutoEncrypt");
editor.remove(mUuid + ".enabled");
editor.remove(mUuid + ".markMessageAsReadOnView");
editor.remove(mUuid + ".alwaysShowCcBcc");
editor.remove(mUuid + ".allowRemoteSearch");
editor.remove(mUuid + ".remoteSearchFullText");
editor.remove(mUuid + ".remoteSearchNumResults");
editor.remove(mUuid + ".defaultQuotedTextShown");
editor.remove(mUuid + ".displayCount");
editor.remove(mUuid + ".inboxFolderName");
editor.remove(mUuid + ".localStorageProvider");
editor.remove(mUuid + ".messageFormat");
editor.remove(mUuid + ".messageReadReceipt");
editor.remove(mUuid + ".notifyMailCheck");
for (NetworkType type : NetworkType.values()) {
editor.remove(mUuid + ".useCompression." + type.name());
}
deleteIdentities(preferences.getStorage(), editor);
// TODO: Remove preference settings that may exist for individual
// folders in the account.
editor.commit();
}
public static int findNewAccountNumber(List<Integer> accountNumbers) {
int newAccountNumber = -1;
Collections.sort(accountNumbers);
for (int accountNumber : accountNumbers) {
if (accountNumber > newAccountNumber + 1) {
break;
}
newAccountNumber = accountNumber;
}
newAccountNumber++;
return newAccountNumber;
}
public static List<Integer> getExistingAccountNumbers(Preferences preferences) {
List<Account> accounts = preferences.getAccounts();
List<Integer> accountNumbers = new ArrayList<Integer>(accounts.size());
for (Account a : accounts) {
accountNumbers.add(a.getAccountNumber());
}
return accountNumbers;
}
public static int generateAccountNumber(Preferences preferences) {
List<Integer> accountNumbers = getExistingAccountNumbers(preferences);
return findNewAccountNumber(accountNumbers);
}
public void move(Preferences preferences, boolean moveUp) {
String[] uuids = preferences.getStorage().getString("accountUuids", "").split(",");
StorageEditor editor = preferences.getStorage().edit();
String[] newUuids = new String[uuids.length];
if (moveUp) {
for (int i = 0; i < uuids.length; i++) {
if (i > 0 && uuids[i].equals(mUuid)) {
newUuids[i] = newUuids[i-1];
newUuids[i-1] = mUuid;
}
else {
newUuids[i] = uuids[i];
}
}
}
else {
for (int i = uuids.length - 1; i >= 0; i--) {
if (i < uuids.length - 1 && uuids[i].equals(mUuid)) {
newUuids[i] = newUuids[i+1];
newUuids[i+1] = mUuid;
}
else {
newUuids[i] = uuids[i];
}
}
}
String accountUuids = Utility.combine(newUuids, ',');
editor.putString("accountUuids", accountUuids);
editor.commit();
preferences.loadAccounts();
}
public synchronized void save(Preferences preferences) {
StorageEditor editor = preferences.getStorage().edit();
if (!preferences.getStorage().getString("accountUuids", "").contains(mUuid)) {
/*
* When the account is first created we assign it a unique account number. The
* account number will be unique to that account for the lifetime of the account.
* So, we get all the existing account numbers, sort them ascending, loop through
* the list and check if the number is greater than 1 + the previous number. If so
* we use the previous number + 1 as the account number. This refills gaps.
* mAccountNumber starts as -1 on a newly created account. It must be -1 for this
* algorithm to work.
*
* I bet there is a much smarter way to do this. Anyone like to suggest it?
*/
List<Account> accounts = preferences.getAccounts();
int[] accountNumbers = new int[accounts.size()];
for (int i = 0; i < accounts.size(); i++) {
accountNumbers[i] = accounts.get(i).getAccountNumber();
}
Arrays.sort(accountNumbers);
for (int accountNumber : accountNumbers) {
if (accountNumber > mAccountNumber + 1) {
break;
}
mAccountNumber = accountNumber;
}
mAccountNumber++;
String accountUuids = preferences.getStorage().getString("accountUuids", "");
accountUuids += (accountUuids.length() != 0 ? "," : "") + mUuid;
editor.putString("accountUuids", accountUuids);
}
editor.putString(mUuid + ".storeUri", Base64.encode(mStoreUri));
editor.putString(mUuid + ".localStorageProvider", mLocalStorageProviderId);
editor.putString(mUuid + ".transportUri", Base64.encode(mTransportUri));
editor.putString(mUuid + ".description", mDescription);
editor.putString(mUuid + ".alwaysBcc", mAlwaysBcc);
editor.putInt(mUuid + ".automaticCheckIntervalMinutes", mAutomaticCheckIntervalMinutes);
editor.putInt(mUuid + ".idleRefreshMinutes", mIdleRefreshMinutes);
editor.putBoolean(mUuid + ".pushPollOnConnect", mPushPollOnConnect);
editor.putInt(mUuid + ".displayCount", mDisplayCount);
editor.putLong(mUuid + ".lastAutomaticCheckTime", mLastAutomaticCheckTime);
editor.putLong(mUuid + ".latestOldMessageSeenTime", mLatestOldMessageSeenTime);
editor.putBoolean(mUuid + ".notifyNewMail", mNotifyNewMail);
editor.putString(mUuid + ".folderNotifyNewMailMode", mFolderNotifyNewMailMode.name());
editor.putBoolean(mUuid + ".notifySelfNewMail", mNotifySelfNewMail);
editor.putBoolean(mUuid + ".notifyMailCheck", mNotifySync);
editor.putInt(mUuid + ".deletePolicy", mDeletePolicy.setting);
editor.putString(mUuid + ".inboxFolderName", mInboxFolderName);
editor.putString(mUuid + ".draftsFolderName", mDraftsFolderName);
editor.putString(mUuid + ".sentFolderName", mSentFolderName);
editor.putString(mUuid + ".trashFolderName", mTrashFolderName);
editor.putString(mUuid + ".archiveFolderName", mArchiveFolderName);
editor.putString(mUuid + ".spamFolderName", mSpamFolderName);
editor.putString(mUuid + ".autoExpandFolderName", mAutoExpandFolderName);
editor.putInt(mUuid + ".accountNumber", mAccountNumber);
editor.putString(mUuid + ".sortTypeEnum", mSortType.name());
editor.putBoolean(mUuid + ".sortAscending", mSortAscending.get(mSortType));
editor.putString(mUuid + ".showPicturesEnum", mShowPictures.name());
editor.putString(mUuid + ".folderDisplayMode", mFolderDisplayMode.name());
editor.putString(mUuid + ".folderSyncMode", mFolderSyncMode.name());
editor.putString(mUuid + ".folderPushMode", mFolderPushMode.name());
editor.putString(mUuid + ".folderTargetMode", mFolderTargetMode.name());
editor.putBoolean(mUuid + ".signatureBeforeQuotedText", this.mIsSignatureBeforeQuotedText);
editor.putString(mUuid + ".expungePolicy", mExpungePolicy.name());
editor.putBoolean(mUuid + ".syncRemoteDeletions", mSyncRemoteDeletions);
editor.putInt(mUuid + ".maxPushFolders", mMaxPushFolders);
editor.putString(mUuid + ".searchableFolders", searchableFolders.name());
editor.putInt(mUuid + ".chipColor", mChipColor);
editor.putBoolean(mUuid + ".goToUnreadMessageSearch", goToUnreadMessageSearch);
editor.putBoolean(mUuid + ".subscribedFoldersOnly", subscribedFoldersOnly);
editor.putInt(mUuid + ".maximumPolledMessageAge", maximumPolledMessageAge);
editor.putInt(mUuid + ".maximumAutoDownloadMessageSize", maximumAutoDownloadMessageSize);
if (MessageFormat.AUTO.equals(mMessageFormat)) {
// saving MessageFormat.AUTO as is to the database will cause downgrades to crash on
// startup, so we save as MessageFormat.TEXT instead with a separate flag for auto.
editor.putString(mUuid + ".messageFormat", Account.MessageFormat.TEXT.name());
mMessageFormatAuto = true;
} else {
editor.putString(mUuid + ".messageFormat", mMessageFormat.name());
mMessageFormatAuto = false;
}
editor.putBoolean(mUuid + ".messageFormatAuto", mMessageFormatAuto);
editor.putBoolean(mUuid + ".messageReadReceipt", mMessageReadReceipt);
editor.putString(mUuid + ".quoteStyle", mQuoteStyle.name());
editor.putString(mUuid + ".quotePrefix", mQuotePrefix);
editor.putBoolean(mUuid + ".defaultQuotedTextShown", mDefaultQuotedTextShown);
editor.putBoolean(mUuid + ".replyAfterQuote", mReplyAfterQuote);
editor.putBoolean(mUuid + ".stripSignature", mStripSignature);
editor.putString(mUuid + ".cryptoApp", mCryptoApp);
editor.putLong(mUuid + ".cryptoKey", mCryptoKey);
editor.putBoolean(mUuid + ".allowRemoteSearch", mAllowRemoteSearch);
editor.putBoolean(mUuid + ".remoteSearchFullText", mRemoteSearchFullText);
editor.putInt(mUuid + ".remoteSearchNumResults", mRemoteSearchNumResults);
editor.putBoolean(mUuid + ".enabled", mEnabled);
editor.putBoolean(mUuid + ".markMessageAsReadOnView", mMarkMessageAsReadOnView);
editor.putBoolean(mUuid + ".alwaysShowCcBcc", mAlwaysShowCcBcc);
editor.putBoolean(mUuid + ".vibrate", mNotificationSetting.shouldVibrate());
editor.putInt(mUuid + ".vibratePattern", mNotificationSetting.getVibratePattern());
editor.putInt(mUuid + ".vibrateTimes", mNotificationSetting.getVibrateTimes());
editor.putBoolean(mUuid + ".ring", mNotificationSetting.shouldRing());
editor.putString(mUuid + ".ringtone", mNotificationSetting.getRingtone());
editor.putBoolean(mUuid + ".led", mNotificationSetting.isLed());
editor.putInt(mUuid + ".ledColor", mNotificationSetting.getLedColor());
for (NetworkType type : NetworkType.values()) {
Boolean useCompression = compressionMap.get(type);
if (useCompression != null) {
editor.putBoolean(mUuid + ".useCompression." + type, useCompression);
}
}
saveIdentities(preferences.getStorage(), editor);
editor.commit();
}
public void resetVisibleLimits() {
try {
getLocalStore().resetVisibleLimits(getDisplayCount());
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Unable to reset visible limits", e);
}
}
/**
* @param context
* @return <code>null</code> if not available
* @throws MessagingException
* @see {@link #isAvailable(Context)}
*/
public AccountStats getStats(Context context) throws MessagingException {
if (!isAvailable(context)) {
return null;
}
AccountStats stats = new AccountStats();
ContentResolver cr = context.getContentResolver();
Uri uri = Uri.withAppendedPath(EmailProvider.CONTENT_URI,
"account/" + getUuid() + "/stats");
String[] projection = {
StatsColumns.UNREAD_COUNT,
StatsColumns.FLAGGED_COUNT
};
// Create LocalSearch instance to exclude special folders (Trash, Drafts, Spam, Outbox,
// Sent) and limit the search to displayable folders.
LocalSearch search = new LocalSearch();
excludeSpecialFolders(search);
limitToDisplayableFolders(search);
// Use the LocalSearch instance to create a WHERE clause to query the content provider
StringBuilder query = new StringBuilder();
List<String> queryArgs = new ArrayList<String>();
ConditionsTreeNode conditions = search.getConditions();
SqlQueryBuilder.buildWhereClause(this, conditions, query, queryArgs);
String selection = query.toString();
String[] selectionArgs = queryArgs.toArray(new String[0]);
Cursor cursor = cr.query(uri, projection, selection, selectionArgs, null);
try {
if (cursor.moveToFirst()) {
stats.unreadMessageCount = cursor.getInt(0);
stats.flaggedMessageCount = cursor.getInt(1);
}
} finally {
cursor.close();
}
LocalStore localStore = getLocalStore();
if (K9.measureAccounts()) {
stats.size = localStore.getSize();
}
return stats;
}
public synchronized void setChipColor(int color) {
mChipColor = color;
cacheChips();
}
public synchronized void cacheChips() {
mReadColorChip = new ColorChip(mChipColor, true, ColorChip.CIRCULAR);
mUnreadColorChip = new ColorChip(mChipColor, false, ColorChip.CIRCULAR);
mFlaggedReadColorChip = new ColorChip(mChipColor, true, ColorChip.STAR);
mFlaggedUnreadColorChip = new ColorChip(mChipColor, false, ColorChip.STAR);
}
public synchronized int getChipColor() {
return mChipColor;
}
public ColorChip generateColorChip(boolean messageRead, boolean toMe, boolean ccMe,
boolean fromMe, boolean messageFlagged) {
ColorChip chip;
if (messageRead) {
if (messageFlagged) {
chip = mFlaggedReadColorChip;
} else {
chip = mReadColorChip;
}
} else {
if (messageFlagged) {
chip = mFlaggedUnreadColorChip;
} else {
chip = mUnreadColorChip;
}
}
return chip;
}
@Override
public String getUuid() {
return mUuid;
}
public Uri getContentUri() {
return Uri.parse("content://accounts/" + getUuid());
}
public synchronized String getStoreUri() {
return mStoreUri;
}
public synchronized void setStoreUri(String storeUri) {
this.mStoreUri = storeUri;
}
public synchronized String getTransportUri() {
return mTransportUri;
}
public synchronized void setTransportUri(String transportUri) {
this.mTransportUri = transportUri;
}
@Override
public synchronized String getDescription() {
return mDescription;
}
@Override
public synchronized void setDescription(String description) {
this.mDescription = description;
}
public synchronized String getName() {
return identities.get(0).getName();
}
public synchronized void setName(String name) {
identities.get(0).setName(name);
}
public synchronized boolean getSignatureUse() {
return identities.get(0).getSignatureUse();
}
public synchronized void setSignatureUse(boolean signatureUse) {
identities.get(0).setSignatureUse(signatureUse);
}
public synchronized String getSignature() {
return identities.get(0).getSignature();
}
public synchronized void setSignature(String signature) {
identities.get(0).setSignature(signature);
}
@Override
public synchronized String getEmail() {
return identities.get(0).getEmail();
}
@Override
public synchronized void setEmail(String email) {
identities.get(0).setEmail(email);
}
public synchronized String getAlwaysBcc() {
return mAlwaysBcc;
}
public synchronized void setAlwaysBcc(String alwaysBcc) {
this.mAlwaysBcc = alwaysBcc;
}
/* Have we sent a new mail notification on this account */
public boolean isRingNotified() {
return mRingNotified;
}
public void setRingNotified(boolean ringNotified) {
mRingNotified = ringNotified;
}
public String getLocalStorageProviderId() {
return mLocalStorageProviderId;
}
public void setLocalStorageProviderId(String id) {
if (!mLocalStorageProviderId.equals(id)) {
boolean successful = false;
try {
switchLocalStorage(id);
successful = true;
} catch (MessagingException e) {
Log.e(K9.LOG_TAG, "Switching local storage provider from " +
mLocalStorageProviderId + " to " + id + " failed.", e);
} finally {
// if migration to/from SD-card failed once, it will fail again.
if (!successful) {
return;
}
}
mLocalStorageProviderId = id;
}
}
/**
* Returns -1 for never.
*/
public synchronized int getAutomaticCheckIntervalMinutes() {
return mAutomaticCheckIntervalMinutes;
}
/**
* @param automaticCheckIntervalMinutes or -1 for never.
*/
public synchronized boolean setAutomaticCheckIntervalMinutes(int automaticCheckIntervalMinutes) {
int oldInterval = this.mAutomaticCheckIntervalMinutes;
this.mAutomaticCheckIntervalMinutes = automaticCheckIntervalMinutes;
return (oldInterval != automaticCheckIntervalMinutes);
}
public synchronized int getDisplayCount() {
return mDisplayCount;
}
public synchronized void setDisplayCount(int displayCount) {
if (displayCount != -1) {
this.mDisplayCount = displayCount;
} else {
this.mDisplayCount = K9.DEFAULT_VISIBLE_LIMIT;
}
resetVisibleLimits();
}
public synchronized long getLastAutomaticCheckTime() {
return mLastAutomaticCheckTime;
}
public synchronized void setLastAutomaticCheckTime(long lastAutomaticCheckTime) {
this.mLastAutomaticCheckTime = lastAutomaticCheckTime;
}
public synchronized long getLatestOldMessageSeenTime() {
return mLatestOldMessageSeenTime;
}
public synchronized void setLatestOldMessageSeenTime(long latestOldMessageSeenTime) {
this.mLatestOldMessageSeenTime = latestOldMessageSeenTime;
}
public synchronized boolean isNotifyNewMail() {
return mNotifyNewMail;
}
public synchronized void setNotifyNewMail(boolean notifyNewMail) {
this.mNotifyNewMail = notifyNewMail;
}
public synchronized FolderMode getFolderNotifyNewMailMode() {
return mFolderNotifyNewMailMode;
}
public synchronized void setFolderNotifyNewMailMode(FolderMode folderNotifyNewMailMode) {
this.mFolderNotifyNewMailMode = folderNotifyNewMailMode;
}
public synchronized DeletePolicy getDeletePolicy() {
return mDeletePolicy;
}
public synchronized void setDeletePolicy(DeletePolicy deletePolicy) {
this.mDeletePolicy = deletePolicy;
}
public boolean isSpecialFolder(String folderName) {
return (folderName != null && (folderName.equalsIgnoreCase(getInboxFolderName()) ||
folderName.equals(getTrashFolderName()) ||
folderName.equals(getDraftsFolderName()) ||
folderName.equals(getArchiveFolderName()) ||
folderName.equals(getSpamFolderName()) ||
folderName.equals(getOutboxFolderName()) ||
folderName.equals(getSentFolderName()) ||
folderName.equals(getErrorFolderName())));
}
public synchronized String getDraftsFolderName() {
return mDraftsFolderName;
}
public synchronized void setDraftsFolderName(String name) {
mDraftsFolderName = name;
}
/**
* Checks if this account has a drafts folder set.
* @return true if account has a drafts folder set.
*/
public synchronized boolean hasDraftsFolder() {
return !K9.FOLDER_NONE.equalsIgnoreCase(mDraftsFolderName);
}
public synchronized String getSentFolderName() {
return mSentFolderName;
}
public synchronized String getErrorFolderName() {
return K9.ERROR_FOLDER_NAME;
}
public synchronized void setSentFolderName(String name) {
mSentFolderName = name;
}
/**
* Checks if this account has a sent folder set.
* @return true if account has a sent folder set.
*/
public synchronized boolean hasSentFolder() {
return !K9.FOLDER_NONE.equalsIgnoreCase(mSentFolderName);
}
public synchronized String getTrashFolderName() {
return mTrashFolderName;
}
public synchronized void setTrashFolderName(String name) {
mTrashFolderName = name;
}
/**
* Checks if this account has a trash folder set.
* @return true if account has a trash folder set.
*/
public synchronized boolean hasTrashFolder() {
return !K9.FOLDER_NONE.equalsIgnoreCase(mTrashFolderName);
}
public synchronized String getArchiveFolderName() {
return mArchiveFolderName;
}
public synchronized void setArchiveFolderName(String archiveFolderName) {
mArchiveFolderName = archiveFolderName;
}
/**
* Checks if this account has an archive folder set.
* @return true if account has an archive folder set.
*/
public synchronized boolean hasArchiveFolder() {
return !K9.FOLDER_NONE.equalsIgnoreCase(mArchiveFolderName);
}
public synchronized String getSpamFolderName() {
return mSpamFolderName;
}
public synchronized void setSpamFolderName(String name) {
mSpamFolderName = name;
}
/**
* Checks if this account has a spam folder set.
* @return true if account has a spam folder set.
*/
public synchronized boolean hasSpamFolder() {
return !K9.FOLDER_NONE.equalsIgnoreCase(mSpamFolderName);
}
public synchronized String getOutboxFolderName() {
return OUTBOX;
}
public synchronized String getAutoExpandFolderName() {
return mAutoExpandFolderName;
}
public synchronized void setAutoExpandFolderName(String name) {
mAutoExpandFolderName = name;
}
public synchronized int getAccountNumber() {
return mAccountNumber;
}
public synchronized FolderMode getFolderDisplayMode() {
return mFolderDisplayMode;
}
public synchronized boolean setFolderDisplayMode(FolderMode displayMode) {
FolderMode oldDisplayMode = mFolderDisplayMode;
mFolderDisplayMode = displayMode;
return oldDisplayMode != displayMode;
}
public synchronized FolderMode getFolderSyncMode() {
return mFolderSyncMode;
}
public synchronized boolean setFolderSyncMode(FolderMode syncMode) {
FolderMode oldSyncMode = mFolderSyncMode;
mFolderSyncMode = syncMode;
if (syncMode == FolderMode.NONE && oldSyncMode != FolderMode.NONE) {
return true;
}
if (syncMode != FolderMode.NONE && oldSyncMode == FolderMode.NONE) {
return true;
}
return false;
}
public synchronized FolderMode getFolderPushMode() {
return mFolderPushMode;
}
public synchronized boolean setFolderPushMode(FolderMode pushMode) {
FolderMode oldPushMode = mFolderPushMode;
mFolderPushMode = pushMode;
return pushMode != oldPushMode;
}
public synchronized boolean isShowOngoing() {
return mNotifySync;
}
public synchronized void setShowOngoing(boolean showOngoing) {
this.mNotifySync = showOngoing;
}
public synchronized SortType getSortType() {
return mSortType;
}
public synchronized void setSortType(SortType sortType) {
mSortType = sortType;
}
public synchronized boolean isSortAscending(SortType sortType) {
if (mSortAscending.get(sortType) == null) {
mSortAscending.put(sortType, sortType.isDefaultAscending());
}
return mSortAscending.get(sortType);
}
public synchronized void setSortAscending(SortType sortType, boolean sortAscending) {
mSortAscending.put(sortType, sortAscending);
}
public synchronized ShowPictures getShowPictures() {
return mShowPictures;
}
public synchronized void setShowPictures(ShowPictures showPictures) {
mShowPictures = showPictures;
}
public synchronized FolderMode getFolderTargetMode() {
return mFolderTargetMode;
}
public synchronized void setFolderTargetMode(FolderMode folderTargetMode) {
mFolderTargetMode = folderTargetMode;
}
public synchronized boolean isSignatureBeforeQuotedText() {
return mIsSignatureBeforeQuotedText;
}
public synchronized void setSignatureBeforeQuotedText(boolean mIsSignatureBeforeQuotedText) {
this.mIsSignatureBeforeQuotedText = mIsSignatureBeforeQuotedText;
}
public synchronized boolean isNotifySelfNewMail() {
return mNotifySelfNewMail;
}
public synchronized void setNotifySelfNewMail(boolean notifySelfNewMail) {
mNotifySelfNewMail = notifySelfNewMail;
}
public synchronized Expunge getExpungePolicy() {
return mExpungePolicy;
}
public synchronized void setExpungePolicy(Expunge expungePolicy) {
mExpungePolicy = expungePolicy;
}
public synchronized int getMaxPushFolders() {
return mMaxPushFolders;
}
public synchronized boolean setMaxPushFolders(int maxPushFolders) {
int oldMaxPushFolders = mMaxPushFolders;
mMaxPushFolders = maxPushFolders;
return oldMaxPushFolders != maxPushFolders;
}
public LocalStore getLocalStore() throws MessagingException {
return LocalStore.getInstance(this, K9.app);
}
public Store getRemoteStore() throws MessagingException {
return RemoteStore.getInstance(K9.app, this);
}
// It'd be great if this actually went into the store implementation
// to get this, but that's expensive and not easily accessible
// during initialization
public boolean isSearchByDateCapable() {
return (getStoreUri().startsWith("imap"));
}
@Override
public synchronized String toString() {
return mDescription;
}
public synchronized void setCompression(NetworkType networkType, boolean useCompression) {
compressionMap.put(networkType, useCompression);
}
public synchronized boolean useCompression(NetworkType networkType) {
Boolean useCompression = compressionMap.get(networkType);
if (useCompression == null) {
return true;
}
return useCompression;
}
@Override
public boolean equals(Object o) {
if (o instanceof Account) {
return ((Account)o).mUuid.equals(mUuid);
}
return super.equals(o);
}
@Override
public int hashCode() {
return mUuid.hashCode();
}
private synchronized List<Identity> loadIdentities(Storage storage) {
List<Identity> newIdentities = new ArrayList<Identity>();
int ident = 0;
boolean gotOne = false;
do {
gotOne = false;
String name = storage.getString(mUuid + "." + IDENTITY_NAME_KEY + "." + ident, null);
String email = storage.getString(mUuid + "." + IDENTITY_EMAIL_KEY + "." + ident, null);
boolean signatureUse = storage.getBoolean(mUuid + ".signatureUse." + ident, true);
String signature = storage.getString(mUuid + ".signature." + ident, null);
String description = storage.getString(mUuid + "." + IDENTITY_DESCRIPTION_KEY + "." + ident, null);
final String replyTo = storage.getString(mUuid + ".replyTo." + ident, null);
if (email != null) {
Identity identity = new Identity();
identity.setName(name);
identity.setEmail(email);
identity.setSignatureUse(signatureUse);
identity.setSignature(signature);
identity.setDescription(description);
identity.setReplyTo(replyTo);
newIdentities.add(identity);
gotOne = true;
}
ident++;
} while (gotOne);
if (newIdentities.isEmpty()) {
String name = storage.getString(mUuid + ".name", null);
String email = storage.getString(mUuid + ".email", null);
boolean signatureUse = storage.getBoolean(mUuid + ".signatureUse", true);
String signature = storage.getString(mUuid + ".signature", null);
Identity identity = new Identity();
identity.setName(name);
identity.setEmail(email);
identity.setSignatureUse(signatureUse);
identity.setSignature(signature);
identity.setDescription(email);
newIdentities.add(identity);
}
return newIdentities;
}
private synchronized void deleteIdentities(Storage storage, StorageEditor editor) {
int ident = 0;
boolean gotOne = false;
do {
gotOne = false;
String email = storage.getString(mUuid + "." + IDENTITY_EMAIL_KEY + "." + ident, null);
if (email != null) {
editor.remove(mUuid + "." + IDENTITY_NAME_KEY + "." + ident);
editor.remove(mUuid + "." + IDENTITY_EMAIL_KEY + "." + ident);
editor.remove(mUuid + ".signatureUse." + ident);
editor.remove(mUuid + ".signature." + ident);
editor.remove(mUuid + "." + IDENTITY_DESCRIPTION_KEY + "." + ident);
editor.remove(mUuid + ".replyTo." + ident);
gotOne = true;
}
ident++;
} while (gotOne);
}
private synchronized void saveIdentities(Storage storage, StorageEditor editor) {
deleteIdentities(storage, editor);
int ident = 0;
for (Identity identity : identities) {
editor.putString(mUuid + "." + IDENTITY_NAME_KEY + "." + ident, identity.getName());
editor.putString(mUuid + "." + IDENTITY_EMAIL_KEY + "." + ident, identity.getEmail());
editor.putBoolean(mUuid + ".signatureUse." + ident, identity.getSignatureUse());
editor.putString(mUuid + ".signature." + ident, identity.getSignature());
editor.putString(mUuid + "." + IDENTITY_DESCRIPTION_KEY + "." + ident, identity.getDescription());
editor.putString(mUuid + ".replyTo." + ident, identity.getReplyTo());
ident++;
}
}
public synchronized List<Identity> getIdentities() {
return identities;
}
public synchronized void setIdentities(List<Identity> newIdentities) {
identities = new ArrayList<Identity>(newIdentities);
}
public synchronized Identity getIdentity(int i) {
if (i < identities.size()) {
return identities.get(i);
}
throw new IllegalArgumentException("Identity with index " + i + " not found");
}
public boolean isAnIdentity(Address[] addrs) {
if (addrs == null) {
return false;
}
for (Address addr : addrs) {
if (findIdentity(addr) != null) {
return true;
}
}
return false;
}
public boolean isAnIdentity(Address addr) {
return findIdentity(addr) != null;
}
public synchronized Identity findIdentity(Address addr) {
for (Identity identity : identities) {
String email = identity.getEmail();
if (email != null && email.equalsIgnoreCase(addr.getAddress())) {
return identity;
}
}
return null;
}
public synchronized Searchable getSearchableFolders() {
return searchableFolders;
}
public synchronized void setSearchableFolders(Searchable searchableFolders) {
this.searchableFolders = searchableFolders;
}
public synchronized int getIdleRefreshMinutes() {
return mIdleRefreshMinutes;
}
public synchronized void setIdleRefreshMinutes(int idleRefreshMinutes) {
mIdleRefreshMinutes = idleRefreshMinutes;
}
public synchronized boolean isPushPollOnConnect() {
return mPushPollOnConnect;
}
public synchronized void setPushPollOnConnect(boolean pushPollOnConnect) {
mPushPollOnConnect = pushPollOnConnect;
}
/**
* Are we storing out localStore on the SD-card instead of the local device
* memory?<br/>
* Only to be called durin initial account-setup!<br/>
* Side-effect: changes {@link #mLocalStorageProviderId}.
*
* @param newStorageProviderId
* Never <code>null</code>.
* @throws MessagingException
*/
public void switchLocalStorage(final String newStorageProviderId) throws MessagingException {
if (!mLocalStorageProviderId.equals(newStorageProviderId)) {
getLocalStore().switchLocalStorage(newStorageProviderId);
}
}
public synchronized boolean goToUnreadMessageSearch() {
return goToUnreadMessageSearch;
}
public synchronized void setGoToUnreadMessageSearch(boolean goToUnreadMessageSearch) {
this.goToUnreadMessageSearch = goToUnreadMessageSearch;
}
public synchronized boolean subscribedFoldersOnly() {
return subscribedFoldersOnly;
}
public synchronized void setSubscribedFoldersOnly(boolean subscribedFoldersOnly) {
this.subscribedFoldersOnly = subscribedFoldersOnly;
}
public synchronized int getMaximumPolledMessageAge() {
return maximumPolledMessageAge;
}
public synchronized void setMaximumPolledMessageAge(int maximumPolledMessageAge) {
this.maximumPolledMessageAge = maximumPolledMessageAge;
}
public synchronized int getMaximumAutoDownloadMessageSize() {
return maximumAutoDownloadMessageSize;
}
public synchronized void setMaximumAutoDownloadMessageSize(int maximumAutoDownloadMessageSize) {
this.maximumAutoDownloadMessageSize = maximumAutoDownloadMessageSize;
}
public Date getEarliestPollDate() {
int age = getMaximumPolledMessageAge();
if (age >= 0) {
Calendar now = Calendar.getInstance();
now.set(Calendar.HOUR_OF_DAY, 0);
now.set(Calendar.MINUTE, 0);
now.set(Calendar.SECOND, 0);
now.set(Calendar.MILLISECOND, 0);
if (age < 28) {
now.add(Calendar.DATE, age * -1);
} else switch (age) {
case 28:
now.add(Calendar.MONTH, -1);
break;
case 56:
now.add(Calendar.MONTH, -2);
break;
case 84:
now.add(Calendar.MONTH, -3);
break;
case 168:
now.add(Calendar.MONTH, -6);
break;
case 365:
now.add(Calendar.YEAR, -1);
break;
}
return now.getTime();
}
return null;
}
public MessageFormat getMessageFormat() {
return mMessageFormat;
}
public void setMessageFormat(MessageFormat messageFormat) {
this.mMessageFormat = messageFormat;
}
public synchronized boolean isMessageReadReceiptAlways() {
return mMessageReadReceipt;
}
public synchronized void setMessageReadReceipt(boolean messageReadReceipt) {
mMessageReadReceipt = messageReadReceipt;
}
public QuoteStyle getQuoteStyle() {
return mQuoteStyle;
}
public void setQuoteStyle(QuoteStyle quoteStyle) {
this.mQuoteStyle = quoteStyle;
}
public synchronized String getQuotePrefix() {
return mQuotePrefix;
}
public synchronized void setQuotePrefix(String quotePrefix) {
mQuotePrefix = quotePrefix;
}
public synchronized boolean isDefaultQuotedTextShown() {
return mDefaultQuotedTextShown;
}
public synchronized void setDefaultQuotedTextShown(boolean shown) {
mDefaultQuotedTextShown = shown;
}
public synchronized boolean isReplyAfterQuote() {
return mReplyAfterQuote;
}
public synchronized void setReplyAfterQuote(boolean replyAfterQuote) {
mReplyAfterQuote = replyAfterQuote;
}
public synchronized boolean isStripSignature() {
return mStripSignature;
}
public synchronized void setStripSignature(boolean stripSignature) {
mStripSignature = stripSignature;
}
public String getCryptoApp() {
return mCryptoApp;
}
public void setCryptoApp(String cryptoApp) {
if (cryptoApp == null || cryptoApp.equals("apg")) {
mCryptoApp = NO_OPENPGP_PROVIDER;
} else {
mCryptoApp = cryptoApp;
}
}
public long getCryptoKey() {
return mCryptoKey;
}
public void setCryptoKey(long keyId) {
mCryptoKey = keyId;
}
public boolean allowRemoteSearch() {
return mAllowRemoteSearch;
}
public void setAllowRemoteSearch(boolean val) {
mAllowRemoteSearch = val;
}
public int getRemoteSearchNumResults() {
return mRemoteSearchNumResults;
}
public void setRemoteSearchNumResults(int val) {
mRemoteSearchNumResults = (val >= 0 ? val : 0);
}
public String getInboxFolderName() {
return mInboxFolderName;
}
public void setInboxFolderName(String name) {
this.mInboxFolderName = name;
}
public synchronized boolean syncRemoteDeletions() {
return mSyncRemoteDeletions;
}
public synchronized void setSyncRemoteDeletions(boolean syncRemoteDeletions) {
mSyncRemoteDeletions = syncRemoteDeletions;
}
public synchronized String getLastSelectedFolderName() {
return lastSelectedFolderName;
}
public synchronized void setLastSelectedFolderName(String folderName) {
lastSelectedFolderName = folderName;
}
public synchronized String getOpenPgpProvider() {
if (!isOpenPgpProviderConfigured()) {
return null;
}
return getCryptoApp();
}
public synchronized boolean isOpenPgpProviderConfigured() {
return !NO_OPENPGP_PROVIDER.equals(getCryptoApp());
}
public synchronized NotificationSetting getNotificationSetting() {
return mNotificationSetting;
}
/**
* @return <code>true</code> if our {@link StorageProvider} is ready. (e.g.
* card inserted)
*/
public boolean isAvailable(Context context) {
String localStorageProviderId = getLocalStorageProviderId();
if (localStorageProviderId == null) {
return true; // defaults to internal memory
}
return StorageManager.getInstance(context).isReady(localStorageProviderId);
}
public synchronized boolean isEnabled() {
return mEnabled;
}
public synchronized void setEnabled(boolean enabled) {
mEnabled = enabled;
}
public synchronized boolean isMarkMessageAsReadOnView() {
return mMarkMessageAsReadOnView;
}
public synchronized void setMarkMessageAsReadOnView(boolean value) {
mMarkMessageAsReadOnView = value;
}
public synchronized boolean isAlwaysShowCcBcc() {
return mAlwaysShowCcBcc;
}
public synchronized void setAlwaysShowCcBcc(boolean show) {
mAlwaysShowCcBcc = show;
}
public boolean isRemoteSearchFullText() {
return false; // Temporarily disabled
//return mRemoteSearchFullText;
}
public void setRemoteSearchFullText(boolean val) {
mRemoteSearchFullText = val;
}
/**
* Modify the supplied {@link LocalSearch} instance to limit the search to displayable folders.
*
* <p>
* This method uses the current folder display mode to decide what folders to include/exclude.
* </p>
*
* @param search
* The {@code LocalSearch} instance to modify.
*
* @see #getFolderDisplayMode()
*/
public void limitToDisplayableFolders(LocalSearch search) {
final Account.FolderMode displayMode = getFolderDisplayMode();
switch (displayMode) {
case FIRST_CLASS: {
// Count messages in the INBOX and non-special first class folders
search.and(SearchField.DISPLAY_CLASS, FolderClass.FIRST_CLASS.name(),
Attribute.EQUALS);
break;
}
case FIRST_AND_SECOND_CLASS: {
// Count messages in the INBOX and non-special first and second class folders
search.and(SearchField.DISPLAY_CLASS, FolderClass.FIRST_CLASS.name(),
Attribute.EQUALS);
// TODO: Create a proper interface for creating arbitrary condition trees
SearchCondition searchCondition = new SearchCondition(SearchField.DISPLAY_CLASS,
Attribute.EQUALS, FolderClass.SECOND_CLASS.name());
ConditionsTreeNode root = search.getConditions();
if (root.mRight != null) {
root.mRight.or(searchCondition);
} else {
search.or(searchCondition);
}
break;
}
case NOT_SECOND_CLASS: {
// Count messages in the INBOX and non-special non-second-class folders
search.and(SearchField.DISPLAY_CLASS, FolderClass.SECOND_CLASS.name(),
Attribute.NOT_EQUALS);
break;
}
default:
case ALL: {
// Count messages in the INBOX and non-special folders
break;
}
}
}
/**
* Modify the supplied {@link LocalSearch} instance to exclude special folders.
*
* <p>
* Currently the following folders are excluded:
* <ul>
* <li>Trash</li>
* <li>Drafts</li>
* <li>Spam</li>
* <li>Outbox</li>
* <li>Sent</li>
* </ul>
* The Inbox will always be included even if one of the special folders is configured to point
* to the Inbox.
* </p>
*
* @param search
* The {@code LocalSearch} instance to modify.
*/
public void excludeSpecialFolders(LocalSearch search) {
excludeSpecialFolder(search, getTrashFolderName());
excludeSpecialFolder(search, getDraftsFolderName());
excludeSpecialFolder(search, getSpamFolderName());
excludeSpecialFolder(search, getOutboxFolderName());
excludeSpecialFolder(search, getSentFolderName());
excludeSpecialFolder(search, getErrorFolderName());
search.or(new SearchCondition(SearchField.FOLDER, Attribute.EQUALS, getInboxFolderName()));
}
/**
* Modify the supplied {@link LocalSearch} instance to exclude "unwanted" folders.
*
* <p>
* Currently the following folders are excluded:
* <ul>
* <li>Trash</li>
* <li>Spam</li>
* <li>Outbox</li>
* </ul>
* The Inbox will always be included even if one of the special folders is configured to point
* to the Inbox.
* </p>
*
* @param search
* The {@code LocalSearch} instance to modify.
*/
public void excludeUnwantedFolders(LocalSearch search) {
excludeSpecialFolder(search, getTrashFolderName());
excludeSpecialFolder(search, getSpamFolderName());
excludeSpecialFolder(search, getOutboxFolderName());
search.or(new SearchCondition(SearchField.FOLDER, Attribute.EQUALS, getInboxFolderName()));
}
private void excludeSpecialFolder(LocalSearch search, String folderName) {
if (!K9.FOLDER_NONE.equals(folderName)) {
search.and(SearchField.FOLDER, folderName, Attribute.NOT_EQUALS);
}
}
/**
* Add a new certificate for the incoming or outgoing server to the local key store.
*/
public void addCertificate(CheckDirection direction,
X509Certificate certificate) throws CertificateException {
Uri uri;
if (direction == CheckDirection.INCOMING) {
uri = Uri.parse(getStoreUri());
} else {
uri = Uri.parse(getTransportUri());
}
LocalKeyStore localKeyStore = LocalKeyStore.getInstance();
localKeyStore.addCertificate(uri.getHost(), uri.getPort(), certificate);
}
/**
* Examine the existing settings for an account. If the old host/port is different from the
* new host/port, then try and delete any (possibly non-existent) certificate stored for the
* old host/port.
*/
public void deleteCertificate(String newHost, int newPort,
CheckDirection direction) {
Uri uri;
if (direction == CheckDirection.INCOMING) {
uri = Uri.parse(getStoreUri());
} else {
uri = Uri.parse(getTransportUri());
}
String oldHost = uri.getHost();
int oldPort = uri.getPort();
if (oldPort == -1) {
// This occurs when a new account is created
return;
}
if (!newHost.equals(oldHost) || newPort != oldPort) {
LocalKeyStore localKeyStore = LocalKeyStore.getInstance();
localKeyStore.deleteCertificate(oldHost, oldPort);
}
}
/**
* Examine the settings for the account and attempt to delete (possibly non-existent)
* certificates for the incoming and outgoing servers.
*/
public void deleteCertificates() {
LocalKeyStore localKeyStore = LocalKeyStore.getInstance();
String storeUri = getStoreUri();
if (storeUri != null) {
Uri uri = Uri.parse(storeUri);
localKeyStore.deleteCertificate(uri.getHost(), uri.getPort());
}
String transportUri = getTransportUri();
if (transportUri != null) {
Uri uri = Uri.parse(transportUri);
localKeyStore.deleteCertificate(uri.getHost(), uri.getPort());
}
}
}
| 1 | 13,598 | m prefix no longer in code style | k9mail-k-9 | java |
@@ -168,6 +168,10 @@ func (kvb *kvStoreWithBuffer) MustDelete(ns string, key []byte) {
kvb.buffer.Delete(ns, key, "failed to delete %x in %s", key, ns)
}
+func (kvb *kvStoreWithBuffer) Filter(ns string, c Condition) ([][]byte, [][]byte, error) {
+ return kvb.store.Filter(ns, c)
+}
+
func (kvb *kvStoreWithBuffer) WriteBatch(b batch.KVStoreBatch) (err error) {
b.Lock()
defer func() { | 1 | package db
import (
"context"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/db/batch"
"github.com/iotexproject/iotex-core/pkg/log"
)
type (
withBuffer interface {
Snapshot() int
Revert(int) error
SerializeQueue(batch.WriteInfoFilter) []byte
MustPut(string, []byte, []byte)
MustDelete(string, []byte)
Size() int
}
// KVStoreWithBuffer defines a KVStore with a buffer, which enables snapshot, revert,
// and transaction with multiple writes
KVStoreWithBuffer interface {
KVStore
withBuffer
}
// kvStoreWithBuffer is an implementation of KVStore, which buffers all the changes
kvStoreWithBuffer struct {
store KVStore
buffer batch.CachedBatch
}
// KVStoreFlusher is a wrapper of KVStoreWithBuffer, which has flush api
KVStoreFlusher interface {
SerializeQueue() []byte
Flush() error
KVStoreWithBuffer() KVStoreWithBuffer
}
flusher struct {
kvb *kvStoreWithBuffer
serializeFilter batch.WriteInfoFilter
flushTranslate batch.WriteInfoTranslate
}
// KVStoreFlusherOption sets option for KVStoreFlusher
KVStoreFlusherOption func(*flusher) error
)
// SerializeFilterOption sets the filter for serialize write queue
func SerializeFilterOption(filter batch.WriteInfoFilter) KVStoreFlusherOption {
return func(f *flusher) error {
if filter == nil {
return errors.New("filter cannot be nil")
}
f.serializeFilter = filter
return nil
}
}
// FlushTranslateOption sets the translate for flush
func FlushTranslateOption(wit batch.WriteInfoTranslate) KVStoreFlusherOption {
return func(f *flusher) error {
if wit == nil {
return errors.New("translate cannot be nil")
}
f.flushTranslate = wit
return nil
}
}
// NewKVStoreFlusher returns kv store flusher
func NewKVStoreFlusher(store KVStore, buffer batch.CachedBatch, opts ...KVStoreFlusherOption) (KVStoreFlusher, error) {
if store == nil {
return nil, errors.New("store cannot be nil")
}
if buffer == nil {
return nil, errors.New("buffer cannot be nil")
}
f := &flusher{
kvb: &kvStoreWithBuffer{
store: store,
buffer: buffer,
},
}
for _, opt := range opts {
if err := opt(f); err != nil {
return nil, errors.Wrap(err, "failed to apply option")
}
}
return f, nil
}
func (f *flusher) Flush() error {
if err := f.kvb.store.WriteBatch(f.kvb.buffer.Translate(f.flushTranslate)); err != nil {
return err
}
f.kvb.buffer.Lock()
f.kvb.buffer.ClearAndUnlock()
return nil
}
func (f *flusher) SerializeQueue() []byte {
return f.kvb.SerializeQueue(f.serializeFilter)
}
func (f *flusher) KVStoreWithBuffer() KVStoreWithBuffer {
return f.kvb
}
func (kvb *kvStoreWithBuffer) Start(ctx context.Context) error {
return kvb.store.Start(ctx)
}
func (kvb *kvStoreWithBuffer) Stop(ctx context.Context) error {
return kvb.store.Stop(ctx)
}
func (kvb *kvStoreWithBuffer) Snapshot() int {
return kvb.buffer.Snapshot()
}
func (kvb *kvStoreWithBuffer) Revert(sid int) error {
return kvb.buffer.Revert(sid)
}
func (kvb *kvStoreWithBuffer) SerializeQueue(filter batch.WriteInfoFilter) []byte {
return kvb.buffer.SerializeQueue(filter)
}
func (kvb *kvStoreWithBuffer) Size() int {
return kvb.buffer.Size()
}
func (kvb *kvStoreWithBuffer) Get(ns string, key []byte) ([]byte, error) {
value, err := kvb.buffer.Get(ns, key)
if errors.Cause(err) == batch.ErrNotExist {
value, err = kvb.store.Get(ns, key)
}
if errors.Cause(err) == batch.ErrAlreadyDeleted {
err = errors.Wrapf(ErrNotExist, "failed to get key %x in %s, deleted in buffer level", key, ns)
}
return value, err
}
func (kvb *kvStoreWithBuffer) Put(ns string, key, value []byte) error {
kvb.buffer.Put(ns, key, value, "faild to put %x in %s", key, ns)
return nil
}
func (kvb *kvStoreWithBuffer) MustPut(ns string, key, value []byte) {
kvb.buffer.Put(ns, key, value, "faild to put %x in %s", key, ns)
}
func (kvb *kvStoreWithBuffer) Delete(ns string, key []byte) error {
kvb.buffer.Delete(ns, key, "failed to delete %x in %s", key, ns)
return nil
}
func (kvb *kvStoreWithBuffer) MustDelete(ns string, key []byte) {
kvb.buffer.Delete(ns, key, "failed to delete %x in %s", key, ns)
}
func (kvb *kvStoreWithBuffer) WriteBatch(b batch.KVStoreBatch) (err error) {
b.Lock()
defer func() {
if err == nil {
// clear the batch if commit succeeds
b.ClearAndUnlock()
} else {
b.Unlock()
}
}()
writes := make([]*batch.WriteInfo, b.Size())
for i := 0; i < b.Size(); i++ {
write, e := b.Entry(i)
if e != nil {
return e
}
if write.WriteType() != batch.Put && write.WriteType() != batch.Delete {
return errors.Errorf("invalid write type %d", write.WriteType())
}
writes[i] = write
}
kvb.buffer.Lock()
defer kvb.buffer.Unlock()
for _, write := range writes {
switch write.WriteType() {
case batch.Put:
kvb.buffer.Put(write.Namespace(), write.Key(), write.Value(), write.ErrorFormat(), write.ErrorArgs())
case batch.Delete:
kvb.buffer.Delete(write.Namespace(), write.Key(), write.ErrorFormat(), write.ErrorArgs())
default:
log.S().Panic("unexpected write type")
}
}
return nil
}
| 1 | 20,980 | need to filter the entities in buffer as well | iotexproject-iotex-core | go |
@@ -12,6 +12,7 @@ import java.nio.charset.Charset;
import java.security.MessageDigest;
public class FastBlurTransformation extends BitmapTransformation {
+ private static final String ID="de.danoeh.antennapod.core.glide.FastBlurTransformation";
private static final String TAG = FastBlurTransformation.class.getSimpleName();
| 1 | package de.danoeh.antennapod.core.glide;
import android.graphics.Bitmap;
import android.media.ThumbnailUtils;
import androidx.annotation.NonNull;
import android.util.Log;
import com.bumptech.glide.load.engine.bitmap_recycle.BitmapPool;
import com.bumptech.glide.load.resource.bitmap.BitmapTransformation;
import java.nio.charset.Charset;
import java.security.MessageDigest;
public class FastBlurTransformation extends BitmapTransformation {
private static final String TAG = FastBlurTransformation.class.getSimpleName();
private static final int STACK_BLUR_RADIUS = 10;
public FastBlurTransformation() {
super();
}
@Override
protected Bitmap transform(@NonNull BitmapPool pool,
@NonNull Bitmap source,
int outWidth,
int outHeight) {
int targetWidth = outWidth / 3;
int targetHeight = (int) (1.0 * outHeight * targetWidth / outWidth);
Bitmap resized = ThumbnailUtils.extractThumbnail(source, targetWidth, targetHeight);
Bitmap result = fastBlur(resized, STACK_BLUR_RADIUS);
if (result == null) {
Log.w(TAG, "result was null");
return source;
}
return result;
}
@Override
public boolean equals(Object o) {
return o instanceof FastBlurTransformation;
}
@Override
public void updateDiskCacheKey(@NonNull MessageDigest messageDigest) {
messageDigest.update(TAG.getBytes(Charset.defaultCharset()));
}
private static Bitmap fastBlur(Bitmap bitmap, int radius) {
// Stack Blur v1.0 from
// http://www.quasimondo.com/StackBlurForCanvas/StackBlurDemo.html
//
// Java Author: Mario Klingemann <mario at quasimondo.com>
// http://incubator.quasimondo.com
// created Feburary 29, 2004
// Android port : Yahel Bouaziz <yahel at kayenko.com>
// http://www.kayenko.com
// ported april 5th, 2012
// This is a compromise between Gaussian Blur and Box blur
// It creates much better looking blurs than Box Blur, but is
// 7x faster than my Gaussian Blur implementation.
//
// I called it Stack Blur because this describes best how this
// filter works internally: it creates a kind of moving stack
// of colors whilst scanning through the image. Thereby it
// just has to add one new block of color to the right side
// of the stack and remove the leftmost color. The remaining
// colors on the topmost layer of the stack are either added on
// or reduced by one, depending on if they are on the right or
// on the left side of the stack.
//
// If you are using this algorithm in your code please add
// the following line:
//
// Stack Blur Algorithm by Mario Klingemann <[email protected]>
if (radius < 1) {
return null;
}
int w = bitmap.getWidth();
int h = bitmap.getHeight();
int[] pix = new int[w * h];
bitmap.getPixels(pix, 0, w, 0, 0, w, h);
int wm = w - 1;
int hm = h - 1;
int wh = w * h;
int div = radius + radius + 1;
int[] r = new int[wh];
int[] g = new int[wh];
int[] b = new int[wh];
int rsum;
int gsum;
int bsum;
int x;
int y;
int i;
int p;
int yp;
int yi;
int yw;
int[] vmin = new int[Math.max(w, h)];
int divsum = (div + 1) >> 1;
divsum *= divsum;
int[] dv = new int[256 * divsum];
for (i = 0; i < 256 * divsum; i++) {
dv[i] = (i / divsum);
}
yw = yi = 0;
int[][] stack = new int[div][3];
int stackpointer;
int stackstart;
int[] sir;
int rbs;
int r1 = radius + 1;
int routsum;
int goutsum;
int boutsum;
int rinsum;
int ginsum;
int binsum;
for (y = 0; y < h; y++) {
rinsum = ginsum = binsum = routsum = goutsum = boutsum = rsum = gsum = bsum = 0;
for (i = -radius; i <= radius; i++) {
p = pix[yi + Math.min(wm, Math.max(i, 0))];
sir = stack[i + radius];
sir[0] = (p & 0xff0000) >> 16;
sir[1] = (p & 0x00ff00) >> 8;
sir[2] = (p & 0x0000ff);
rbs = r1 - Math.abs(i);
rsum += sir[0] * rbs;
gsum += sir[1] * rbs;
bsum += sir[2] * rbs;
if (i > 0) {
rinsum += sir[0];
ginsum += sir[1];
binsum += sir[2];
} else {
routsum += sir[0];
goutsum += sir[1];
boutsum += sir[2];
}
}
stackpointer = radius;
for (x = 0; x < w; x++) {
r[yi] = dv[rsum];
g[yi] = dv[gsum];
b[yi] = dv[bsum];
rsum -= routsum;
gsum -= goutsum;
bsum -= boutsum;
stackstart = stackpointer - radius + div;
sir = stack[stackstart % div];
routsum -= sir[0];
goutsum -= sir[1];
boutsum -= sir[2];
if (y == 0) {
vmin[x] = Math.min(x + radius + 1, wm);
}
p = pix[yw + vmin[x]];
sir[0] = (p & 0xff0000) >> 16;
sir[1] = (p & 0x00ff00) >> 8;
sir[2] = (p & 0x0000ff);
rinsum += sir[0];
ginsum += sir[1];
binsum += sir[2];
rsum += rinsum;
gsum += ginsum;
bsum += binsum;
stackpointer = (stackpointer + 1) % div;
sir = stack[(stackpointer) % div];
routsum += sir[0];
goutsum += sir[1];
boutsum += sir[2];
rinsum -= sir[0];
ginsum -= sir[1];
binsum -= sir[2];
yi++;
}
yw += w;
}
for (x = 0; x < w; x++) {
rinsum = ginsum = binsum = routsum = goutsum = boutsum = rsum = gsum = bsum = 0;
yp = -radius * w;
for (i = -radius; i <= radius; i++) {
yi = Math.max(0, yp) + x;
sir = stack[i + radius];
sir[0] = r[yi];
sir[1] = g[yi];
sir[2] = b[yi];
rbs = r1 - Math.abs(i);
rsum += r[yi] * rbs;
gsum += g[yi] * rbs;
bsum += b[yi] * rbs;
if (i > 0) {
rinsum += sir[0];
ginsum += sir[1];
binsum += sir[2];
} else {
routsum += sir[0];
goutsum += sir[1];
boutsum += sir[2];
}
if (i < hm) {
yp += w;
}
}
yi = x;
stackpointer = radius;
for (y = 0; y < h; y++) {
// Set alpha to 1
pix[yi] = 0xff000000 | (dv[rsum] << 16) | (dv[gsum] << 8) | dv[bsum];
rsum -= routsum;
gsum -= goutsum;
bsum -= boutsum;
stackstart = stackpointer - radius + div;
sir = stack[stackstart % div];
routsum -= sir[0];
goutsum -= sir[1];
boutsum -= sir[2];
if (x == 0) {
vmin[y] = Math.min(y + r1, hm) * w;
}
p = x + vmin[y];
sir[0] = r[p];
sir[1] = g[p];
sir[2] = b[p];
rinsum += sir[0];
ginsum += sir[1];
binsum += sir[2];
rsum += rinsum;
gsum += ginsum;
bsum += binsum;
stackpointer = (stackpointer + 1) % div;
sir = stack[stackpointer];
routsum += sir[0];
goutsum += sir[1];
boutsum += sir[2];
rinsum -= sir[0];
ginsum -= sir[1];
binsum -= sir[2];
yi += w;
}
}
bitmap.setPixels(pix, 0, w, 0, 0, w, h);
return bitmap;
}
}
| 1 | 20,250 | Please add spaces before and after the equals sign | AntennaPod-AntennaPod | java |
@@ -49,7 +49,7 @@
#endif
#include "common/sql.error_event.pb.h"
#include "common/sql.info_event.pb.h"
-#include "wrapper/amqpwrapper.h"
+//#include "wrapper/amqpwrapper.h"
#include "sq_sql_eventids.h"
#include "common/evl_sqlog_eventnum.h"
| 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
*****************************************************************************
*
* File: logmxevent.cpp
* Description: Eventlogging functions for SQL
*
* Created: 02/05/96
* Language: C++
*
*
*
*
****************************************************************************/
#include "NLSConversion.h"
#include "logmxevent.h"
#include "str.h"
#include <stdlib.h>
#include <pthread.h>
#include <rosetta/tal.h>
#include "rosetta/rosgen.h"
#include <limits.h>
#include <stdarg.h>
#include <execinfo.h>
#ifdef _MSC_VER
#undef _MSC_VER
#endif
#include "common/sql.error_event.pb.h"
#include "common/sql.info_event.pb.h"
#include "wrapper/amqpwrapper.h"
#include "sq_sql_eventids.h"
#include "common/evl_sqlog_eventnum.h"
// #include "sqevlog/evl_sqlog_writer.h"
#include "seabed/fs.h"
// forward declaration
static void check_assert_bug_catcher();
Lng32 sqlToSLSeverity(const char *severity, NABoolean isWarning);
#if 0 /* No longer needed with sqlTextBuf moved to CmpContext */
THREAD_P NAWString* sqlTextBuf = 0;
#endif
NABoolean SQLMXLoggingArea::establishedAMQPConnection_ = FALSE;
pthread_mutex_t SQLMXLoggingArea::loggingMutex_;
bool SQLMXLoggingArea::loggingMutexInitialized_ = false;
void SQLMXLoggingArea::init()
{
char buffer[80];
int rc;
establishedAMQPConnection_= FALSE;
if (!loggingMutexInitialized_)
{
rc = pthread_mutex_init(&loggingMutex_, NULL);
if (rc == 0)
loggingMutexInitialized_ = true;
else
{
sprintf(buffer, "SQLMXLoggingArea::init() pthread_mutex_init() rc=%d", rc);
logSQLMXDebugEvent(buffer, (short)rc);
}
}
}
bool SQLMXLoggingArea::lockMutex()
{
char buffer[80];
int rc = 0;
if (loggingMutexInitialized_)
{
rc = pthread_mutex_trylock(&loggingMutex_);
if (rc)
{
sprintf(buffer, "SQLMXLoggingArea::lockMutex() pthread_mutex_trylock() rc=%d", rc);
logSQLMXDebugEvent(buffer, (short)rc, false);
}
}
return rc ? false : true;
}
void SQLMXLoggingArea::unlockMutex()
{
char buffer[80];
int rc = 0;
if (loggingMutexInitialized_)
rc = pthread_mutex_unlock(&loggingMutex_);
if (rc)
{
sprintf(buffer, "SQLMXLoggingArea::unlockMutex() pthread_mutex_unlock() rc=%d", rc);
logSQLMXDebugEvent(buffer, (short)rc, false);
}
}
#if 0 /* No longer needed with sqlTextBuf moved to CmpContext */
// Set the SQL text for later use. If the buffer is not empty, this
// call does nothing.
//
void SQLMXLoggingArea::setSqlText(const NAWString& x)
{
if ( sqlTextBuf == 0 )
sqlTextBuf = new NAWString(x);
}
#endif
SQLMXLoggingArea::~SQLMXLoggingArea()
{
#ifndef SP_DIS
closeAMQPConnection();
#endif
establishedAMQPConnection_ = FALSE;
};
#if 0 /* No longer needed with sqlTextBuf moved to CmpContext */
//
// clear up the SQL text so that next setSqlText() call can have effect.
//
void SQLMXLoggingArea::resetSqlText()
{
delete sqlTextBuf;
sqlTextBuf = 0;
}
#endif
Int32 SQLMXLoggingArea::logSQLMXEventForError( ULng32 sqlcode,
const char* experienceLevel,
const char* severityLevel,
const char* eventTarget,
const char *msgTxt,
const char* sqlId,
const Lng32 Int0,
const Lng32 Int1,
const Lng32 Int2,
const Lng32 Int3,
const Lng32 Int4,
const char *String0,
const char * String1,
const char * String2,
const char * String3,
const char * String4,
const char * serverName,
const char * connectionName,
const char * constraintCatalog,
const char * constraintSchema,
const char * constraintName,
const char * triggerCatalog,
const char * triggerSchema,
const char *triggerName,
const char *catalogName,
const char *schemaName,
const char *tableName,
const char *columnName,
const Int64 currTransid,
const Lng32 rowNumber,
const Lng32 platformCode,
NABoolean isWarning
)
{
Int32 rc = 0;
// sealog logging of sql error events
// declare a event stack variable and populate
#ifndef SP_DIS
bool lockedMutex = lockMutex();
sql::error_event sql_error_event;
Int32 qpidNodePort = atoi(getenv("QPID_NODE_PORT"));
common::event_header * eventHeader = sql_error_event.mutable_header();
common::info_header * infoHeader = eventHeader->mutable_header();
if (!SQLMXLoggingArea::establishedAMQPConnection())
{
rc = createAMQPConnection(NULL,-1);
if (rc)
{
if (lockedMutex)
unlockMutex();
return rc;
}
establishedAMQPConnection_ = TRUE;
}
char eventidStr[10]=" ";
Lng32 eventidLen = 0;
str_sprintf(eventidStr,"10%d%06d",SQEVL_SQL,sqlcode);
str_strip_blanks(eventidStr,eventidLen);
Lng32 eventIdVal = (Lng32)str_atoi(eventidStr,eventidLen);
sql_error_event.mutable_header()->set_event_id(eventIdVal);
sql_error_event.mutable_header()->set_event_severity(sqlToSLSeverity(severityLevel, isWarning));
sql_error_event.set_sqlcode(sqlcode);
if (sqlId)
sql_error_event.set_sqlid(sqlId);
else
{
SB_Phandle_Type myphandle;
XPROCESSHANDLE_GETMINE_(&myphandle);
char charProcHandle[200];
char myprocname[30];
Int32 mycpu,mypin,mynodenumber=0;
short myproclength = 0;
XPROCESSHANDLE_DECOMPOSE_(&myphandle, &mycpu, &mypin, &mynodenumber,NULL,100, NULL, myprocname,100, &myproclength);
myprocname[myproclength] = '\0';
str_sprintf(charProcHandle,"%d,%d,%d,%s",mycpu,mypin,mynodenumber,myprocname);
sql_error_event.set_sqlid(charProcHandle);
}
sql_error_event.set_message_text(msgTxt);
sql_error_event.set_err_experience_level(experienceLevel);
sql_error_event.set_err_target(eventTarget);
sql_error_event.set_int0(Int0);
sql_error_event.set_int1(Int1);
sql_error_event.set_int2(Int2);
sql_error_event.set_int3(Int3);
sql_error_event.set_int4(Int4);
if (String0)
sql_error_event.set_string0(String0);
if (String1)
sql_error_event.set_string1(String1);
if (String2)
sql_error_event.set_string2(String2);
if (String3)
sql_error_event.set_string3(String3);
if (String4)
sql_error_event.set_string4(String4);
//LCOV_EXCL_START - unused members of diags are
if (serverName)
sql_error_event.set_server_name(serverName);
if (connectionName)
sql_error_event.set_connection_name(connectionName);
//LCOV_EXCL_STOP
if (constraintCatalog)
sql_error_event.set_constraint_catalog(constraintCatalog);
if (constraintSchema)
sql_error_event.set_constraint_schema(constraintSchema);
if (constraintName)
sql_error_event.set_constraint_name(constraintName);
if (triggerCatalog)
sql_error_event.set_trigger_catalog(triggerCatalog);
if (triggerSchema)
sql_error_event.set_trigger_schema(triggerSchema);
if (triggerName)
sql_error_event.set_trigger_name(triggerName);
if (catalogName)
sql_error_event.set_catalog_name(catalogName);
if (schemaName)
sql_error_event.set_schema_name(schemaName);
if (tableName)
sql_error_event.set_table_name(tableName);
if (columnName)
sql_error_event.set_column_name(columnName);
sql_error_event.set_current_transid(currTransid);
sql_error_event.set_row_number(rowNumber);
sql_error_event.set_platform_error_code(platformCode);
rc = initAMQPInfoHeader(infoHeader, SQEVL_SQL);
if (rc)
{
closeAMQPConnection();
establishedAMQPConnection_ = FALSE;
if (lockedMutex)
unlockMutex();
return rc;
}
AMQPRoutingKey routingKey(SP_EVENT, SP_SQLPACKAGE, SP_INSTANCE,
SP_PUBLIC, SP_GPBPROTOCOL, "error_event");
try {
rc = sendAMQPMessage(true, sql_error_event.SerializeAsString(),
SP_CONTENT_TYPE_APP, routingKey);
if (rc) throw 1;
} catch (...) {
closeAMQPConnection();
establishedAMQPConnection_ = FALSE;
if (!rc) rc = SP_SEND_FAILED;
if (lockedMutex)
unlockMutex();
return rc;
}
if (lockedMutex)
unlockMutex();
#else
rc = 0;
#endif
return rc;
}
Lng32 sqlToSLSeverity(const char *severity,NABoolean isWarning)
{
if (isWarning)
return SQ_LOG_WARNING;
if (str_cmp(severity,"CRTCL",str_len(severity)) == 0)
return SQ_LOG_CRIT;
else if (str_cmp(severity,"MAJOR",str_len(severity)) == 0)
return SQ_LOG_ERR;
else if (str_cmp(severity,"MINOR",str_len(severity)) == 0)
return SQ_LOG_ERR;
else if (str_cmp(severity,"INFRM",str_len(severity)) == 0 )
return SQ_LOG_INFO;
else
return -1;
}
void SQLMXLoggingArea::logCompNQCretryEvent(char *stmt)
{
const char m[]="Statement was compiled as if query plan caching were off: ";
Int32 mLen = sizeof(m);
Int32 sLen = str_len(stmt);
char msg[8192];
str_cpy_all(msg, m, mLen);
str_cpy_all(msg+mLen, stmt, MINOF(sLen, 8192-mLen));
logSQLMXEventForError(SQEV_CMP_NQC_RETRY_OCCURED, "ADVANCED", "INFRM",
"LOGONLY", msg);
}
void SQLMXLoggingArea::logExecRtInfo(const char *fileName,
ULng32 lineNo,
const char *msg, Lng32 explainSeqNum)
{
bool lockedMutex = lockMutex();
short rc = 0;
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setFileName((char *)fileName);
sevent.setLineNumber(lineNo);
sevent.setMessageText((char *)msg);
sevent.setExplainSeqNum(explainSeqNum);
// set the event ifd and severity and send the event
sevent.sendEvent(SQEV_SQL_EXEC_RT_INFO, SQ_LOG_INFO);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
static void writeStackTrace(char *s, int bufLen)
{
const int safetyMargin = 256;
int len = sprintf(s, "Process Stack Trace:\n");
// This is a quick and dirty implementation for Linux. It is easy to
// get the program counters for the stack trace, but is difficult to
// look up the function name, line, and file number based off of the
// program counter. For simplicity, this code just calls addr2line to
// look up the information. This could be changed in the future if an
// easy to use API becomes available.
void *bt[20];
size_t size = backtrace(bt, 20);
pid_t myPID = getpid();
// Write each level of the stack except for the top frame and the
// bottom two frames, which aren't important here.
Int32 i = 1;
while (i < size - 2)
{
char buffer[128]; // Used for command-line + addr2line output.
char addrBuf[sizeof(void *)*2 + 4];
sprintf(buffer, "/usr/bin/addr2line -e /proc/%d/exe -f -C ", myPID);
Int32 j;
// Run addr2line on 5 addresses at a time.
for (j = i; j < i+5 && j < size-2; j++)
{
sprintf(addrBuf, " %p", bt[j]);
strcat(buffer, addrBuf);
}
FILE *cmdFP = popen(buffer, "r");
if (cmdFP == NULL)
{
if (len+safetyMargin < bufLen)
len += sprintf(s, "Error %d while popen() of %s\n", errno, buffer);
break;
}
else
{
for (j = i; j < i+5 && j < size-2; j++)
{
// Read from the addr2line output
fgets(buffer, sizeof(buffer), cmdFP);
// Replace newline with null character
size_t len = strlen(buffer);
if (buffer[len-1] == '\n')
buffer[len-1] = '\0';
if (len+safetyMargin < bufLen)
len += sprintf(s, "%p: %s()\n", bt[j], buffer);
fgets(buffer, sizeof(buffer), cmdFP);
if (len+safetyMargin < bufLen)
len += sprintf(s, " %s", buffer);
}
fclose(cmdFP);
}
i = j;
}
sprintf(s, "\n");
}
void SQLMXLoggingArea::logErr97Event(int rc)
{
#if 0
// to be completed, need event id 516 and proper template in sqf/seapilot/
// source/event_templates/sql.info_event.template.
const int LEN=8192;
if (rc == 97) {
char msg[LEN];
writeStackTrace(msg, LEN);
logSQLMXEventForError(SQLMX_ERR97_OCCURED, "ADVANCED", "INFRM",
"LOGONLY", msg);
}
#endif
}
void SQLMXLoggingArea::logSQLMXPredefinedEvent(
ULng32 eventId,
SQLMXLoggingArea::Category category)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
// set the event id and severity and send the event
sevent.sendEvent(eventId, SQ_LOG_INFO);
// close the connection.
// sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
void SQLMXLoggingArea::logSQLMXDebugEvent( const char *msg, short errorcode, bool lock)
{
bool lockedMutex = lock ? lockMutex() : false;
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setMessageText((char *)msg);
sevent.setError1((Lng32)errorcode);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_SQL_DEBUG_EVENT, SQ_LOG_DEBUG);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
// log an ABORT event
void
SQLMXLoggingArea::logSQLMXAbortEvent(const char* file, Int32 line, const char* msg)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setFileName((char *)file);
sevent.setLineNumber(line);
sevent.setMessageText((char *)msg);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_SQL_ABORT, SQ_LOG_ERR);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
// log an ASSERTION FAILURE event
void
SQLMXLoggingArea::logSQLMXAssertionFailureEvent(const char* file, Int32 line, const char* msg, const char* condition, const Lng32* tid)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setFileName((char *)file);
sevent.setLineNumber(line);
sevent.setMessageText((char *)msg);
if (tid)
sevent.setInt0(*tid);
if (condition)
sevent.setString0((char *)condition);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_SQL_ASSERTION_FAILURE, SQ_LOG_ERR);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
// logSQLMXEvent(SQLMX_ASSERTION_FAILURE, file, line, msg);
}
void SQLMXLoggingArea::logPOSInfoEvent(const char *msg)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setMessageText((char *)msg);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_SQL_POS_INFO, SQ_LOG_INFO);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
void SQLMXLoggingArea::logPOSErrorEvent(const Lng32 errorCode,
const char *msg1,
const char *msg2,
const char *msg3)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setError1(errorCode);
sevent.setString0((char *)msg1);
sevent.setString1((char *)msg2);
sevent.setString2((char *)msg3);
// set the event id and severity and send the event
if (errorCode == 1150)
sevent.sendEvent(SQEV_SQL_POS_ERROR, SQ_LOG_ERR);
else if (errorCode ==1154)
sevent.sendEvent(SQEV_SQL_POS_CREATE_ERROR,SQ_LOG_ERR);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
// events that correspond to informational messages from CommonLogger or one of
// its subclasses
void SQLMXLoggingArea::logCommonLoggerInfoEvent(ULng32 eventId,
const char *msg)
{
SqlSealogEvent sevent;
sevent.openConnection();
sevent.setExperienceLevel("ADVANCED");
sevent.setTarget("LOGONLY");
sevent.setMessageText((char*)msg);
sevent.sendEvent(eventId, SQ_LOG_INFO);
sevent.closeConnection();
}
// events that correspond to error messages from CommonLogger or one of its
// subclasses
void SQLMXLoggingArea::logCommonLoggerErrorEvent(ULng32 eventId,
const char *msg)
{
SqlSealogEvent sevent;
sevent.openConnection();
sevent.setExperienceLevel("ADVANCED");
sevent.setTarget("DBADMIN");
sevent.setMessageText((char*)msg);
sevent.sendEvent(eventId, SQ_LOG_ERR);
sevent.closeConnection();
}
// events that correspond to fatal error messages from CommonLogger or one of
// its subclasses
void SQLMXLoggingArea::logCommonLoggerFailureEvent(ULng32 eventId,
const char *msg)
{
SqlSealogEvent sevent;
sevent.openConnection();
sevent.setExperienceLevel("ADVANCED");
sevent.setTarget("DBADMIN");
sevent.setMessageText((char*)msg);
sevent.sendEvent(eventId, SQ_LOG_ERR);
sevent.closeConnection();
}
void SQLMXLoggingArea::logMVQRInfoEvent(const char *msg)
{
logSQLMXEventForError(SQLMX_MVQR_INFO, "ADVANCED", "INFRM", "LOGONLY", msg);
}
// events that correspond to error messages in an MVQR process (qms, qmm, qmp)
void SQLMXLoggingArea::logMVQRErrorEvent(const char *msg)
{
logSQLMXEventForError(SQLMX_MVQR_ERROR, "ADVANCED", "MAJOR", "DBADMIN", msg);
}
// events that correspond to fatal error messages in an MVQR process (qms, qmm, qmp)
void SQLMXLoggingArea::logMVQRFailureEvent(const char *msg)
{
logSQLMXEventForError(SQLMX_MVQR_FAILURE, "ADVANCED", "MAJOR", "DBADMIN", msg);
}
void SQLMXLoggingArea::logMVRefreshInfoEvent(const char *msg)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setMessageText((char *)msg);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_MVREFRESH_INFO, SQ_LOG_INFO);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
void SQLMXLoggingArea::logMVRefreshErrorEvent(const char *msg)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setMessageText((char *)msg);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_MVREFRESH_ERROR, SQ_LOG_ERR);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
void SQLMXLoggingArea::logCliReclaimSpaceEvent(Lng32 freeSize, Lng32 totalSize,
Lng32 totalContexts, Lng32 totalStatements)
{
bool lockedMutex = lockMutex();
SqlSealogEvent sevent;
// Open a new connection
sevent.openConnection();
// set the required parameters
sevent.setInt0(freeSize);
sevent.setInt1(totalSize);
sevent.setInt2(totalContexts);
sevent.setInt3(totalStatements);
// set the event id and severity and send the event
sevent.sendEvent(SQEV_SQL_CLI_RECLAIM_OCCURED, SQ_LOG_INFO);
// close the connection.
sevent.closeConnection();
if (lockedMutex)
unlockMutex();
}
Int16 SqlSealogEvent::openConnection()
{
#ifdef SP_DIS
return 0;
#else
if (SQLMXLoggingArea::establishedAMQPConnection())
return 0;
Int32 qpidNodePort = atoi(getenv("QPID_NODE_PORT"));
common::event_header * eventHeader = sqlInfoEvent_.mutable_header();
common::info_header * infoHeader = eventHeader->mutable_header();
Int32 rc = createAMQPConnection("127.0.0.1",qpidNodePort);
if (rc)
//add trace log
return rc;
SQLMXLoggingArea::establishedAMQPConnection_ = TRUE;
return rc;
#endif
}
//set methods
void SqlSealogEvent::setQueryId(char *queryId)
{
#ifndef SP_DIS
sqlInfoEvent_.set_query_id(queryId ? queryId:"(not available)");
#endif
}
void SqlSealogEvent::setMessageText(char *messageText)
{
#ifndef SP_DIS
sqlInfoEvent_.set_message_text(messageText ? messageText: "(not available)");
#endif
}
void SqlSealogEvent::setExperienceLevel(const char *el)
{
#ifndef SP_DIS
sqlInfoEvent_.set_experience_level(el?el:"ADVANCED");
#endif
}
void SqlSealogEvent::setTarget(const char *target)
{
#ifndef SP_DIS
sqlInfoEvent_.set_target(target?target:"LOGONLY");
#endif
}
void SqlSealogEvent::setFileName(char *fn)
{
#ifndef SP_DIS
sqlInfoEvent_.set_file_name(fn?fn:"(not available)");
#endif
}
void SqlSealogEvent::setLineNumber(Lng32 ln)
{
#ifndef SP_DIS
sqlInfoEvent_.set_line_number((ln>0)?ln:0);
#endif
}
void SqlSealogEvent::setExplainSeqNum(Lng32 esn)
{
#ifndef SP_DIS
sqlInfoEvent_.set_explain_seq_num(esn);
#endif
}
void SqlSealogEvent::setError1(Lng32 e1)
{
#ifndef SP_DIS
sqlInfoEvent_.set_error1(e1);
#endif
}
void SqlSealogEvent::setError2(Lng32 e2)
{
#ifndef SP_DIS
sqlInfoEvent_.set_error2(e2);
#endif
}
void SqlSealogEvent::setError3(Lng32 e3)
{
#ifndef SP_DIS
sqlInfoEvent_.set_error3(e3);
#endif
}
void SqlSealogEvent::setInt0(Lng32 i0)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int0(i0);
#endif
}
void SqlSealogEvent::setInt1(Lng32 i1)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int1(i1);
#endif
}
void SqlSealogEvent::setInt2(Lng32 i2)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int2(i2);
#endif
}
void SqlSealogEvent::setInt3(Lng32 i3)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int3(i3);
#endif
}
void SqlSealogEvent::setInt4(Lng32 i4)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int4(i4);
#endif
}
void SqlSealogEvent::setString0(char *string0)
{
#ifndef SP_DIS
sqlInfoEvent_.set_string0(string0 ? string0 : "");
#endif
}
void SqlSealogEvent::setString1(char *string1)
{
#ifndef SP_DIS
sqlInfoEvent_.set_string1(string1?string1:"");
#endif
}
void SqlSealogEvent::setString2(char *string2)
{
#ifndef SP_DIS
sqlInfoEvent_.set_string2(string2 ? string2 : "");
#endif
}
void SqlSealogEvent::setString3(char *string3)
{
#ifndef SP_DIS
sqlInfoEvent_.set_string3(string3 ? string3 : "");
#endif
}
void SqlSealogEvent::setString4(char *string4)
{
#ifndef SP_DIS
sqlInfoEvent_.set_string4(string4 ? string4 : "");
#endif
}
void SqlSealogEvent::setInt64_0(Int64 i64_0)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int64_0(i64_0);
#endif
}
void SqlSealogEvent::setInt64_1(Int64 i64_1)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int64_1(i64_1);
#endif
}
void SqlSealogEvent::setInt64_2(Int64 i64_2)
{
#ifndef SP_DIS
sqlInfoEvent_.set_int64_2(i64_2);
#endif
}
// Pass in event id (from sq_sql_eventids.h file
// Pass in severity from sealog header file (foll values) :
// SQ_LOG_CRIT
// SQ_LOG_ALERT
// SQ_LOG_ERR
// SQ_LOG_INFO
// This method sets the event id and severity and serailizes
// any other event tokens into a string form and sends the buffer
// to sealog
Int16 SqlSealogEvent::sendEvent(Int16 eventId, Lng32 slSeverity)
{
Int32 rc = 0;
#ifndef SP_DIS
char eventidStr[10]=" ";
Lng32 eventidLen = 0;
str_sprintf(eventidStr,"10%d%06d",SQEVL_SQL,eventId);
str_strip_blanks(eventidStr,eventidLen);
Lng32 eventIdVal = (Lng32)str_atoi(eventidStr,eventidLen);
common::event_header * eventHeader = sqlInfoEvent_.mutable_header();
common::info_header * infoHeader = eventHeader->mutable_header();
rc = initAMQPInfoHeader(infoHeader, SQEVL_SQL);
if (rc)
//add trace log
return rc;
sqlInfoEvent_.mutable_header()->set_event_id(eventIdVal);
sqlInfoEvent_.mutable_header()->set_event_severity(slSeverity);
setExperienceLevel("ADVANCED");
setTarget("LOGONLY");
AMQPRoutingKey routingKey(SP_EVENT, SP_SQLPACKAGE, SP_INSTANCE,
SP_PUBLIC, SP_GPBPROTOCOL, "info_event");
try {
rc = sendAMQPMessage(true, sqlInfoEvent_.SerializeAsString(), SP_CONTENT_TYPE_APP, routingKey);
} catch(...) {
rc = -1;
}
#endif
return rc;
}
Int16 SqlSealogEvent::closeConnection()
{
#ifdef SP_DIS
return 0;
#else
Int32 rc = closeAMQPConnection();
SQLMXLoggingArea::establishedAMQPConnection_ = FALSE;
if (rc)
// add trace log
return rc;
return rc;
#endif
}
void SQLMXLoggingArea::logSortDiskInfo(char *diskname, short percentfree, short diskerror)
{
//
//TBD or rewrite needed
//** use event id SQEV_SQL_SRT_INFO **
}
//LCOV_EXCL_START
// ss_cc_changes: these are stubs that need to be removed by components.
// Excluding these from code coverage
static void check_assert_bug_catcher()
{
}
void SQLMXLoggingArea::logUtilErrorsEvent (const char *utilName,
const Int32 numOfErrors,
const Lng32 errorCode,
const char *msg1,
const char *msg2,
const char *msg3,
const char *msg4,
const char *msg5)
{
/* TBD
tokenClass operation_tok(
STRING,
TKN_UTIL_NAME,
(void*)utilName
);
tokenClass errorCode_tok(
LONGINT32,
TKN_PM_SQLCODE,
(void*)&errorCode
);
// There must be atleast one error message(msg1) to emit.
tokenClass msg1_tok(STRING,
TKN_PM_ERRTEXT,
(void*)msg1
);
tokenClass msg2_tok(STRING,
TKN_PM_ERRTEXT,
(void*)msg2
);
tokenClass msg3_tok(STRING,
TKN_PM_ERRTEXT,
(void*)msg3
);
tokenClass msg4_tok(STRING,
TKN_PM_ERRTEXT,
(void*)msg4
);
tokenClass msg5_tok(STRING,
TKN_PM_ERRTEXT,
(void*)msg5
);
switch (numOfErrors)
{
case (1):
logAnMxEvent(SQLMX_UTIL_OP_ERROR,
SQLMXLoggingArea::SoftwareFailure,
&operation_tok,
&errorCode_tok,
&msg1_tok,
0
);
break;
case (2):
logAnMxEvent(SQLMX_UTIL_OP_ERROR,
SQLMXLoggingArea::SoftwareFailure,
&operation_tok,
&errorCode_tok,
&msg1_tok,
&msg2_tok,
0
);
break;
case (3):
logAnMxEvent(SQLMX_UTIL_OP_ERROR,
SQLMXLoggingArea::SoftwareFailure,
&operation_tok,
&errorCode_tok,
&msg1_tok,
&msg2_tok,
&msg3_tok,
0
);
break;
case (4):
logAnMxEvent(SQLMX_UTIL_OP_ERROR,
SQLMXLoggingArea::SoftwareFailure,
&operation_tok,
&errorCode_tok,
&msg1_tok,
&msg2_tok,
&msg3_tok,
&msg4_tok,
0
);
break;
case (5):
logAnMxEvent(SQLMX_UTIL_OP_ERROR,
SQLMXLoggingArea::SoftwareFailure,
&operation_tok,
&errorCode_tok,
&msg1_tok,
&msg2_tok,
&msg3_tok,
&msg4_tok,
&msg5_tok,
0
);
break;
default:
// should not get here.
break;
}
end TBD */
}
void SQLMXLoggingArea::logUtilOperationStatusEvent(ULng32 eventId,
const char *utilName,
const char *objType,
const char *objAnsiName,
const char *utilStatus)
{
/* TBD
tokenClass utilName_tok(
STRING,
TKN_UTIL_NAME,
(void*)utilName
);
tokenClass objType_tok(
STRING,
TKN_PM_OBJ_TYPE,
(void*)objType
);
tokenClass objAnsiName_tok(
STRING,
TKN_PM_ANSI_NAME,
(void*)objAnsiName
);
tokenClass utilStatus_tok(
STRING,
TKN_UTIL_STATUS,
(void*)utilStatus
);
logAnMxEvent(eventId,
SQLMXLoggingArea::Informational,
&utilName_tok,
&objType_tok,
&objAnsiName_tok,
&utilStatus_tok,
0
);
end TBD */
}
void SQLMXLoggingArea::logPMEvent(ULng32 eventId)
{
// TBD
}
// These aren't currently used
#if 0
void SQLMXLoggingArea::logPMOperationStatusEvent(ULng32 eventId,
const char *operation,
const char *objType,
const char *objAnsiName)
{
}
void SQLMXLoggingArea::logPMDataCopyStatusEvent(ULng32 eventId,
TInt64 elapsedTime,
TInt64 totalCopyCount,
const char *unitName)
{
}
void SQLMXLoggingArea::logPMEventWithGuardianName(ULng32 eventId,
const char *location)
{
}
void SQLMXLoggingArea::logPMEventWithDumpFileName(ULng32 eventId,
const char *location)
{
}
void SQLMXLoggingArea::logPMEventWithInterval(ULng32 eventId,
TInt64 interval)
{
}
void SQLMXLoggingArea::logPMErrorsEvent (const char *operation,
const Int32 numOfErrors,
const Lng32 errorCode,
const char *msg1,
const char *msg2,
const char *msg3,
const char *msg4,
const char *msg5)
{
}
void SQLMXLoggingArea::logPMAudInitEvent(ULng32 eventId,
TInt64 interval)
{
}
void SQLMXLoggingArea::logPMAudStartEvent(ULng32 eventId,
const short audNum)
{
}
void SQLMXLoggingArea::logPMAudDoneEvent(ULng32 eventId,
const short audNum,
TInt64 interval)
{
}
#endif // #if 0
// LCOV_EXCL_STOP
| 1 | 10,134 | This file could also be cleaned up/deleted. But lets do this separately after ensuring all the event logging has been ported to the current logmxevent_traf.cpp file completely. | apache-trafodion | cpp |
@@ -7,12 +7,13 @@ module RSpec::Core::Formatters
it 'produces the expected full output' do
output = run_example_specs_with_formatter('failures')
expect(output).to eq(<<-EOS.gsub(/^\s+\|/, ''))
- |./spec/rspec/core/resources/formatter_specs.rb:4:is marked as pending but passes
- |./spec/rspec/core/resources/formatter_specs.rb:36:fails
- |./spec/rspec/core/resources/formatter_specs.rb:40:fails twice
- |./spec/rspec/core/resources/formatter_specs.rb:47:fails with a backtrace that has no file
- |./spec/rspec/core/resources/formatter_specs.rb:53:fails with a backtrace containing an erb file
- |./spec/rspec/core/resources/formatter_specs.rb:71:raises
+ |./spec/rspec/core/resources/formatter_specs.rb:4:Expected example to fail since it is pending, but it passed.
+ |/home/roadster/dev/oss/rspec-core/spec/rspec/core/resources/formatter_specs.rb:37:expected: 2 got: 1 (compared using ==)
+ |/home/roadster/dev/oss/rspec-core/spec/rspec/core/resources/formatter_specs.rb:41:expected: 2 got: 1 (compared using ==)
+ |/home/roadster/dev/oss/rspec-core/spec/rspec/core/resources/formatter_specs.rb:42:expected: 4 got: 3 (compared using ==)
+ |/home/roadster/dev/oss/rspec-core/spec/rspec/core/resources/formatter_specs.rb:50:foo
+ |/foo.html.erb:1:Exception
+ |./spec/rspec/core/resources/formatter_specs.rb:71:boom
EOS
end
end | 1 | require 'rspec/core/formatters/failure_list_formatter'
module RSpec::Core::Formatters
RSpec.describe FailureListFormatter do
include FormatterSupport
it 'produces the expected full output' do
output = run_example_specs_with_formatter('failures')
expect(output).to eq(<<-EOS.gsub(/^\s+\|/, ''))
|./spec/rspec/core/resources/formatter_specs.rb:4:is marked as pending but passes
|./spec/rspec/core/resources/formatter_specs.rb:36:fails
|./spec/rspec/core/resources/formatter_specs.rb:40:fails twice
|./spec/rspec/core/resources/formatter_specs.rb:47:fails with a backtrace that has no file
|./spec/rspec/core/resources/formatter_specs.rb:53:fails with a backtrace containing an erb file
|./spec/rspec/core/resources/formatter_specs.rb:71:raises
EOS
end
end
end
| 1 | 18,054 | :thinking:, the line number here is the line of the example (`example.location`), that's probably why I finally chose to display the example title because it's what can be found on this line, and saying that the error is from there is confusing. But presenting the actual failures is indeed better, so maybe we can get the failure actual line number from the exception backtrace and then output two distinct lines for failures like this one? | rspec-rspec-core | rb |
@@ -35,7 +35,7 @@ class WebDriver(ChromiumDriver):
def __init__(self, executable_path="chromedriver", port=DEFAULT_PORT,
options=None, service_args=None,
desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH,
- chrome_options=None, service=None, keep_alive=True):
+ chrome_options=None, service=None, keep_alive=True, create_no_window=False):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver. | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
from selenium.webdriver.chromium.webdriver import ChromiumDriver
from .options import Options
from .service import Service
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
DEFAULT_PORT = 0
DEFAULT_SERVICE_LOG_PATH = None
class WebDriver(ChromiumDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=DEFAULT_PORT,
options=None, service_args=None,
desired_capabilities=None, service_log_path=DEFAULT_SERVICE_LOG_PATH,
chrome_options=None, service=None, keep_alive=True):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - Deprecated: path to the executable. If the default is used it assumes the executable is in the $PATH
- port - Deprecated: port you would like the service to run, if left as 0, a free port will be found.
- options - this takes an instance of ChromeOptions
- service_args - Deprecated: List of args to pass to the driver service
- desired_capabilities - Deprecated: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- service_log_path - Deprecated: Where to log information from the driver.
- keep_alive - Whether to configure ChromeRemoteConnection to use HTTP keep-alive.
"""
if executable_path != 'chromedriver':
warnings.warn('executable_path has been deprecated, please pass in a Service object',
DeprecationWarning, stacklevel=2)
if chrome_options:
warnings.warn('use options instead of chrome_options',
DeprecationWarning, stacklevel=2)
options = chrome_options
if service is None:
service = Service(executable_path, port, service_args, service_log_path)
super(WebDriver, self).__init__(DesiredCapabilities.CHROME['browserName'], "goog",
port, options,
service_args, desired_capabilities,
service_log_path, service, keep_alive)
def create_options(self):
return Options()
| 1 | 17,875 | We shouldn't add this as a new `kwarg` here. This should all be done on the options class | SeleniumHQ-selenium | js |
@@ -134,6 +134,7 @@ module Beaker
:openstack_keyname => ENV['OS_KEYNAME'],
:openstack_network => ENV['OS_NETWORK'],
:openstack_region => ENV['OS_REGION'],
+ :openstack_volume_support => ENV['OS_VOL_SUPPORT'] || true,
:jenkins_build_url => nil,
:validate => true,
:configure => true, | 1 | module Beaker
module Options
#A class representing the environment variables and preset argument values to be incorporated
#into the Beaker options Object.
class Presets
# This is a constant that describes the variables we want to collect
# from the environment. The keys correspond to the keys in
# `presets` (flattened) The values are an optional array of
# environment variable names to look for. The array structure allows
# us to define multiple environment variables for the same
# configuration value. They are checked in the order they are arrayed
# so that preferred and "fallback" values work as expected.
#
# 'JOB_NAME' and 'BUILD_URL' envs are supplied by Jenkins
# https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project
ENVIRONMENT_SPEC = {
:home => 'HOME',
:project => ['BEAKER_PROJECT', 'BEAKER_project', 'JOB_NAME'],
:department => ['BEAKER_DEPARTMENT', 'BEAKER_department'],
:jenkins_build_url => ['BEAKER_BUILD_URL', 'BUILD_URL'],
:created_by => ['BEAKER_CREATED_BY'],
:consoleport => ['BEAKER_CONSOLEPORT', 'consoleport'],
:is_pe => ['BEAKER_IS_PE', 'IS_PE'],
:pe_dir => ['BEAKER_PE_DIR', 'pe_dist_dir'],
:puppet_agent_version => ['BEAKER_PUPPET_AGENT_VERSION'],
:puppet_agent_sha => ['BEAKER_PUPPET_AGENT_SHA'],
:puppet_collection => ['BEAKER_PUPPET_COLLECTION'],
:pe_version_file => ['BEAKER_PE_VERSION_FILE', 'pe_version_file'],
:pe_ver => ['BEAKER_PE_VER', 'pe_ver'],
:forge_host => ['BEAKER_FORGE_HOST', 'forge_host'],
:package_proxy => ['BEAKER_PACKAGE_PROXY'],
:release_apt_repo_url => ['BEAKER_RELEASE_APT_REPO', 'RELEASE_APT_REPO'],
:release_yum_repo_url => ['BEAKER_RELEASE_YUM_REPO', 'RELEASE_YUM_REPO'],
:dev_builds_url => ['BEAKER_DEV_BUILDS_URL', 'DEV_BUILDS_URL'],
:vbguest_plugin => ['BEAKER_VB_GUEST_PLUGIN', 'BEAKER_vb_guest_plugin'],
:test_tag_and => ['BEAKER_TAG', 'BEAKER_TEST_TAG_AND'],
:test_tag_or => ['BEAKER_TEST_TAG_OR'],
:test_tag_exclude => ['BEAKER_EXCLUDE_TAG', 'BEAKER_TEST_TAG_EXCLUDE'],
:run_in_parallel => ['BEAKER_RUN_IN_PARALLEL'],
}
# Select all environment variables whose name matches provided regex
# @return [Hash] Hash of environment variables
def select_env_by_regex regex
envs = Beaker::Options::OptionsHash.new
ENV.each_pair do | k, v |
if k.to_s =~ /#{regex}/
envs[k] = v
end
end
envs
end
# Takes an environment_spec and searches the processes environment variables accordingly
#
# @param [Hash{Symbol=>Array,String}] env_var_spec the spec of what env vars to search for
#
# @return [Hash] Found environment values
def collect_env_vars( env_var_spec )
env_var_spec.inject({}) do |memo, key_value|
key, value = key_value[0], key_value[1]
set_env_var = Array(value).detect {|possible_variable| ENV[possible_variable] }
memo[key] = ENV[set_env_var] if set_env_var
memo
end
end
# Takes a hash where the values are found environment configuration values
# and formats them to appropriate Beaker configuration values
#
# @param [Hash{Symbol=>String}] found_env_vars Environment variables to munge
#
# @return [Hash] Environment config values formatted appropriately
def format_found_env_vars( found_env_vars )
found_env_vars[:consoleport] &&= found_env_vars[:consoleport].to_i
if found_env_vars[:is_pe]
is_pe_val = found_env_vars[:is_pe]
type = case is_pe_val
when /yes|true/ then 'pe'
when /no|false/ then 'foss'
else
raise "Invalid value for one of #{ENVIRONMENT_SPEC[:is_pe].join(' ,')}: #{is_pe_val}"
end
found_env_vars[:type] = type
end
if found_env_vars[:run_in_parallel]
found_env_vars[:run_in_parallel] = found_env_vars[:run_in_parallel].split(',')
end
found_env_vars[:pe_version_file_win] = found_env_vars[:pe_version_file]
found_env_vars
end
# Generates an OptionsHash of the environment variables of interest to Beaker
#
# @return [OptionsHash] The supported environment variables in an OptionsHash,
# empty or nil environment variables are removed from the OptionsHash
def calculate_env_vars
found = Beaker::Options::OptionsHash.new
found = found.merge(format_found_env_vars( collect_env_vars( ENVIRONMENT_SPEC )))
found[:answers] = select_env_by_regex('\\Aq_')
found.delete_if {|key, value| value.nil? or value.empty? }
found
end
# Return an OptionsHash of environment variables used in this run of Beaker
#
# @return [OptionsHash] The supported environment variables in an OptionsHash,
# empty or nil environment variables are removed from the OptionsHash
def env_vars
@env ||= calculate_env_vars
end
# Generates an OptionsHash of preset values for arguments supported by Beaker
#
# @return [OptionsHash] The supported arguments in an OptionsHash
def presets
h = Beaker::Options::OptionsHash.new
h.merge({
:project => 'Beaker',
:department => 'unknown',
:created_by => ENV['USER'] || ENV['USERNAME'] || 'unknown',
:host_tags => {},
:openstack_api_key => ENV['OS_PASSWORD'],
:openstack_username => ENV['OS_USERNAME'],
:openstack_auth_url => "#{ENV['OS_AUTH_URL']}/tokens",
:openstack_tenant => ENV['OS_TENANT_NAME'],
:openstack_keyname => ENV['OS_KEYNAME'],
:openstack_network => ENV['OS_NETWORK'],
:openstack_region => ENV['OS_REGION'],
:jenkins_build_url => nil,
:validate => true,
:configure => true,
:log_level => 'info',
:trace_limit => 10,
:"master-start-curl-retries" => 120,
:masterless => false,
:options_file => nil,
:type => 'pe',
:provision => true,
:preserve_hosts => 'never',
:root_keys => false,
:quiet => false,
:project_root => File.expand_path(File.join(File.dirname(__FILE__), "../")),
:xml_dir => 'junit',
:xml_file => 'beaker_junit.xml',
:xml_time => 'beaker_times.xml',
:xml_time_enabled => false,
:xml_stylesheet => 'junit.xsl',
:default_log_prefix => 'beaker_logs',
:log_dir => 'log',
:log_sut_event => 'sut.log',
:color => true,
:dry_run => false,
:test_tag_and => '',
:test_tag_or => '',
:test_tag_exclude => '',
:timeout => 900, # 15 minutes
:fail_mode => 'slow',
:accept_all_exit_codes => false,
:timesync => false,
:disable_iptables => false,
:set_env => true,
:disable_updates => true,
:repo_proxy => false,
:package_proxy => false,
:add_el_extras => false,
:epel_url => "http://dl.fedoraproject.org/pub/epel",
:consoleport => 443,
:pe_dir => '/opt/enterprise/dists',
:pe_version_file => 'LATEST',
:pe_version_file_win => 'LATEST-win',
:host_env => {},
:host_name_prefix => nil,
:ssh_env_file => '~/.ssh/environment',
:profile_d_env_file => '/etc/profile.d/beaker_env.sh',
:dot_fog => File.join(ENV['HOME'], '.fog'),
:ec2_yaml => 'config/image_templates/ec2.yaml',
:help => false,
:collect_perf_data => 'none',
:puppetdb_port_ssl => 8081,
:puppetdb_port_nonssl => 8080,
:puppetserver_port => 8140,
:nodeclassifier_port => 4433,
:cache_files_locally => false,
:aws_keyname_modifier => rand(10 ** 10).to_s.rjust(10,'0'), # 10 digit random number string
:run_in_parallel => [],
:ssh => {
:config => false,
:paranoid => false,
:auth_methods => ["publickey"],
:port => 22,
:forward_agent => true,
:keys => ["#{ENV['HOME']}/.ssh/id_rsa"],
:user_known_hosts_file => "#{ENV['HOME']}/.ssh/known_hosts",
:keepalive => true
}
})
end
end
end
end
| 1 | 14,817 | Should this be `OS_VOLUME_SUPPORT` to match the symbol key names? | voxpupuli-beaker | rb |
@@ -38,6 +38,8 @@
#include <flux/core.h>
#include "heaptrace.h"
+static flux_msg_handler_t **handlers = NULL;
+
static void start_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{ | 1 | /*****************************************************************************\
* Copyright (c) 2014 Lawrence Livermore National Security, LLC. Produced at
* the Lawrence Livermore National Laboratory (cf, AUTHORS, DISCLAIMER.LLNS).
* LLNL-CODE-658032 All rights reserved.
*
* This file is part of the Flux resource manager framework.
* For details, see https://github.com/flux-framework.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the license, or (at your option)
* any later version.
*
* Flux is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
* See also: http://www.gnu.org/licenses/
\*****************************************************************************/
#if HAVE_CONFIG_H
#include "config.h"
#endif
#if WITH_TCMALLOC
#if HAVE_GPERFTOOLS_HEAP_PROFILER_H
#include <gperftools/heap-profiler.h>
#elif HAVE_GOOGLE_HEAP_PROFILER_H
#include <google/heap-profiler.h>
#else
#error gperftools headers not configured
#endif
#endif /* WITH_TCMALLOC */
#include <flux/core.h>
#include "heaptrace.h"
static void start_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
const char *filename;
if (flux_request_unpack (msg, NULL, "{s:s}", "filename", &filename) < 0)
goto error;
#if WITH_TCMALLOC
if (IsHeapProfilerRunning ()) {
errno = EINVAL;
goto error;
}
HeapProfilerStart (filename);
#else
errno = ENOSYS;
goto error;
#endif
if (flux_respond (h, msg, 0, NULL) < 0)
FLUX_LOG_ERROR (h);
return;
error:
if (flux_respond (h, msg, errno, NULL) < 0)
FLUX_LOG_ERROR (h);
}
static void dump_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
const char *reason;
if (flux_request_unpack (msg, NULL, "{s:s}", "reason", &reason) < 0)
goto error;
#if WITH_TCMALLOC
if (!IsHeapProfilerRunning ()) {
errno = EINVAL;
goto error;
}
HeapProfilerDump (reason);
#else
errno = ENOSYS;
goto error;
#endif
if (flux_respond (h, msg, 0, NULL) < 0)
FLUX_LOG_ERROR (h);
return;
error:
if (flux_respond (h, msg, errno, NULL) < 0)
FLUX_LOG_ERROR (h);
}
static void stop_cb (flux_t *h, flux_msg_handler_t *mh,
const flux_msg_t *msg, void *arg)
{
if (flux_request_decode (msg, NULL, NULL) < 0)
goto error;
#if WITH_TCMALLOC
if (!IsHeapProfilerRunning ()) {
errno = EINVAL;
goto error;
}
HeapProfilerStop();
#else
errno = ENOSYS;
goto error;
#endif /* WITH_TCMALLOC */
if (flux_respond (h, msg, 0, NULL) < 0)
FLUX_LOG_ERROR (h);
return;
error:
if (flux_respond (h, msg, errno, NULL) < 0)
FLUX_LOG_ERROR (h);
}
static struct flux_msg_handler_spec handlers[] = {
{ FLUX_MSGTYPE_REQUEST, "heaptrace.start", start_cb, 0, NULL },
{ FLUX_MSGTYPE_REQUEST, "heaptrace.dump", dump_cb, 0, NULL },
{ FLUX_MSGTYPE_REQUEST, "heaptrace.stop", stop_cb, 0, NULL },
FLUX_MSGHANDLER_TABLE_END,
};
static void heaptrace_finalize (void *arg)
{
flux_msg_handler_delvec (handlers);
}
int heaptrace_initialize (flux_t *h)
{
char *dummy = "hello";
if (flux_msg_handler_addvec (h, handlers, NULL) < 0)
return -1;
flux_aux_set (h, "flux::heaptrace", dummy, heaptrace_finalize);
return 0;
}
/*
* vi:tabstop=4 shiftwidth=4 expandtab
*/
| 1 | 19,606 | In C, file scope variables are already initialized to 0 so the "= NULL" is redundant. Not a big deal. | flux-framework-flux-core | c |
@@ -2,7 +2,7 @@
var nodeName = node.nodeName.toUpperCase(),
nodeType = node.type,
- doc = document;
+ doc = axe.commons.dom.getRootNode(node);
if (node.getAttribute('aria-disabled') === 'true' || axe.commons.dom.findUp(node, '[aria-disabled="true"]')) {
return false; | 1 | /* global document */
var nodeName = node.nodeName.toUpperCase(),
nodeType = node.type,
doc = document;
if (node.getAttribute('aria-disabled') === 'true' || axe.commons.dom.findUp(node, '[aria-disabled="true"]')) {
return false;
}
if (nodeName === 'INPUT') {
return ['hidden', 'range', 'color', 'checkbox', 'radio', 'image'].indexOf(nodeType) === -1 && !node.disabled;
}
if (nodeName === 'SELECT') {
return !!node.options.length && !node.disabled;
}
if (nodeName === 'TEXTAREA') {
return !node.disabled;
}
if (nodeName === 'OPTION') {
return false;
}
if (nodeName === 'BUTTON' && node.disabled || axe.commons.dom.findUp(node, 'button[disabled]')) {
return false;
}
if (nodeName === 'FIELDSET' && node.disabled || axe.commons.dom.findUp(node, 'fieldset[disabled]')) {
return false;
}
// check if the element is a label or label descendant for a disabled control
var nodeParentLabel = axe.commons.dom.findUp(node, 'label');
if (nodeName === 'LABEL' || nodeParentLabel) {
var relevantNode = node;
if (nodeParentLabel) {
relevantNode = nodeParentLabel;
}
// explicit label of disabled input
var candidate = relevantNode.htmlFor && doc.getElementById(relevantNode.htmlFor);
if (candidate && candidate.disabled) {
return false;
}
var candidate = node.querySelector('input:not([type="hidden"]):not([type="image"])' +
':not([type="button"]):not([type="submit"]):not([type="reset"]), select, textarea');
if (candidate && candidate.disabled) {
return false;
}
}
// label of disabled control associated w/ aria-labelledby
if (node.id) {
var candidate = doc.querySelector('[aria-labelledby~=' + axe.commons.utils.escapeSelector(node.id) + ']');
if (candidate && candidate.disabled) {
return false;
}
}
if (axe.commons.text.visible(node, false, true) === '') {
return false;
}
var range = document.createRange(),
childNodes = node.childNodes,
length = childNodes.length,
child, index;
for (index = 0; index < length; index++) {
child = childNodes[index];
if (child.nodeType === 3 && axe.commons.text.sanitize(child.nodeValue) !== '') {
range.selectNodeContents(child);
}
}
var rects = range.getClientRects();
length = rects.length;
for (index = 0; index < length; index++) {
//check to see if the rectangle impinges
if (axe.commons.dom.visuallyOverlaps(rects[index], node)) {
return true;
}
}
return false;
| 1 | 11,262 | on line 40, the `relevantNode` can change and therefore the `doc` might change too for the lookup on line 43. I think this code should be moved to where the `doc` is actually being used | dequelabs-axe-core | js |
@@ -57,6 +57,7 @@ namespace OpenTelemetry.Internal
{
throw new ArgumentOutOfRangeException(
nameof(milliseconds),
+ milliseconds,
string.Format(CultureInfo.InvariantCulture, "milliseconds must be between {0} and {1}", MinMilliseconds, MaxMilliseconds));
}
| 1 | // <copyright file="DateTimeOffsetExtensions.net452.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
#if NET452
using System;
using System.Globalization;
namespace OpenTelemetry.Internal
{
internal static class DateTimeOffsetExtensions
{
private const int DaysPerYear = 365;
private const int DaysPer4Years = (DaysPerYear * 4) + 1; // 1461
private const int DaysPer100Years = (DaysPer4Years * 25) - 1; // 36524
private const int DaysPer400Years = (DaysPer100Years * 4) + 1; // 146097
private const int DaysTo1970 = (DaysPer400Years * 4) + (DaysPer100Years * 3) + (DaysPer4Years * 17) + DaysPerYear; // 719,162
private const int DaysTo10000 = (DaysPer400Years * 25) - 366; // 3652059
private const long TicksPerMillisecond = 10000;
private const long TicksPerSecond = TicksPerMillisecond * 1000;
private const long TicksPerMinute = TicksPerSecond * 60;
private const long TicksPerHour = TicksPerMinute * 60;
private const long TicksPerDay = TicksPerHour * 24;
private const long UnixEpochTicks = TimeSpan.TicksPerDay * DaysTo1970; // 621,355,968,000,000,000
private const long UnixEpochSeconds = UnixEpochTicks / TimeSpan.TicksPerSecond; // 62,135,596,800
private const long UnixEpochMilliseconds = UnixEpochTicks / TimeSpan.TicksPerMillisecond; // 62,135,596,800,000
private const long MinTicks = 0;
private const long MaxTicks = (DaysTo10000 * TicksPerDay) - 1;
public static long ToUnixTimeMilliseconds(this DateTimeOffset dateTimeOffset)
{
// Truncate sub-millisecond precision before offsetting by the Unix Epoch to avoid
// the last digit being off by one for dates that result in negative Unix times
long milliseconds = dateTimeOffset.Ticks / TimeSpan.TicksPerMillisecond;
return milliseconds - UnixEpochMilliseconds;
}
public static DateTimeOffset FromUnixTimeMilliseconds(long milliseconds)
{
const long MinMilliseconds = (MinTicks / TimeSpan.TicksPerMillisecond) - UnixEpochMilliseconds;
const long MaxMilliseconds = (MaxTicks / TimeSpan.TicksPerMillisecond) - UnixEpochMilliseconds;
if (milliseconds < MinMilliseconds || milliseconds > MaxMilliseconds)
{
throw new ArgumentOutOfRangeException(
nameof(milliseconds),
string.Format(CultureInfo.InvariantCulture, "milliseconds must be between {0} and {1}", MinMilliseconds, MaxMilliseconds));
}
long ticks = (milliseconds * TimeSpan.TicksPerMillisecond) + UnixEpochTicks;
return new DateTimeOffset(ticks, TimeSpan.Zero);
}
public static long ToUnixTimeSeconds(this DateTimeOffset dateTimeOffset)
{
long seconds = dateTimeOffset.Ticks / TimeSpan.TicksPerSecond;
return seconds - UnixEpochSeconds;
}
}
}
#endif
| 1 | 17,486 | nit: just to be similar to others, can you change to interpolation? | open-telemetry-opentelemetry-dotnet | .cs |
@@ -111,6 +111,9 @@ type ControllerOptions struct {
// CertificateRequest -> Order. Slice of string literals that are
// treated as prefixes for annotation keys.
CopiedAnnotationPrefixes []string
+
+ //Return full Certchain including root cert for k8s CSRs
+ FullCertChain bool
}
const ( | 1 | /*
Copyright 2020 The cert-manager Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"net"
"strings"
"time"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/util/sets"
cm "github.com/jetstack/cert-manager/pkg/apis/certmanager"
challengescontroller "github.com/jetstack/cert-manager/pkg/controller/acmechallenges"
orderscontroller "github.com/jetstack/cert-manager/pkg/controller/acmeorders"
shimgatewaycontroller "github.com/jetstack/cert-manager/pkg/controller/certificate-shim/gateways"
shimingresscontroller "github.com/jetstack/cert-manager/pkg/controller/certificate-shim/ingresses"
cracmecontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/acme"
crapprovercontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/approver"
crcacontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/ca"
crselfsignedcontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/selfsigned"
crvaultcontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/vault"
crvenaficontroller "github.com/jetstack/cert-manager/pkg/controller/certificaterequests/venafi"
"github.com/jetstack/cert-manager/pkg/controller/certificates/issuing"
"github.com/jetstack/cert-manager/pkg/controller/certificates/keymanager"
certificatesmetricscontroller "github.com/jetstack/cert-manager/pkg/controller/certificates/metrics"
"github.com/jetstack/cert-manager/pkg/controller/certificates/readiness"
"github.com/jetstack/cert-manager/pkg/controller/certificates/requestmanager"
"github.com/jetstack/cert-manager/pkg/controller/certificates/revisionmanager"
"github.com/jetstack/cert-manager/pkg/controller/certificates/trigger"
csracmecontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/acme"
csrcacontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/ca"
csrselfsignedcontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/selfsigned"
csrvaultcontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/vault"
csrvenaficontroller "github.com/jetstack/cert-manager/pkg/controller/certificatesigningrequests/venafi"
clusterissuerscontroller "github.com/jetstack/cert-manager/pkg/controller/clusterissuers"
issuerscontroller "github.com/jetstack/cert-manager/pkg/controller/issuers"
"github.com/jetstack/cert-manager/pkg/feature"
logf "github.com/jetstack/cert-manager/pkg/logs"
"github.com/jetstack/cert-manager/pkg/util"
utilfeature "github.com/jetstack/cert-manager/pkg/util/feature"
)
type ControllerOptions struct {
APIServerHost string
Kubeconfig string
KubernetesAPIQPS float32
KubernetesAPIBurst int
ClusterResourceNamespace string
Namespace string
LeaderElect bool
LeaderElectionNamespace string
LeaderElectionLeaseDuration time.Duration
LeaderElectionRenewDeadline time.Duration
LeaderElectionRetryPeriod time.Duration
controllers []string
ACMEHTTP01SolverImage string
ACMEHTTP01SolverResourceRequestCPU string
ACMEHTTP01SolverResourceRequestMemory string
ACMEHTTP01SolverResourceLimitsCPU string
ACMEHTTP01SolverResourceLimitsMemory string
ClusterIssuerAmbientCredentials bool
IssuerAmbientCredentials bool
// Default issuer/certificates details consumed by ingress-shim
DefaultIssuerName string
DefaultIssuerKind string
DefaultIssuerGroup string
DefaultAutoCertificateAnnotations []string
// Allows specifying a list of custom nameservers to perform DNS checks on.
DNS01RecursiveNameservers []string
// Allows controlling if recursive nameservers are only used for all checks.
// Normally authoritative nameservers are used for checking propagation.
DNS01RecursiveNameserversOnly bool
EnableCertificateOwnerRef bool
MaxConcurrentChallenges int
// The host and port address, separated by a ':', that the Prometheus server
// should expose metrics on.
MetricsListenAddress string
// EnablePprof controls whether net/http/pprof handlers are registered with
// the HTTP listener.
EnablePprof bool
DNS01CheckRetryPeriod time.Duration
// Annotations copied Certificate -> CertificateRequest,
// CertificateRequest -> Order. Slice of string literals that are
// treated as prefixes for annotation keys.
CopiedAnnotationPrefixes []string
}
const (
defaultAPIServerHost = ""
defaultKubeconfig = ""
defaultKubernetesAPIQPS float32 = 20
defaultKubernetesAPIBurst = 50
defaultClusterResourceNamespace = "kube-system"
defaultNamespace = ""
defaultLeaderElect = true
defaultLeaderElectionNamespace = "kube-system"
defaultLeaderElectionLeaseDuration = 60 * time.Second
defaultLeaderElectionRenewDeadline = 40 * time.Second
defaultLeaderElectionRetryPeriod = 15 * time.Second
defaultClusterIssuerAmbientCredentials = true
defaultIssuerAmbientCredentials = false
defaultTLSACMEIssuerName = ""
defaultTLSACMEIssuerKind = "Issuer"
defaultTLSACMEIssuerGroup = cm.GroupName
defaultEnableCertificateOwnerRef = false
defaultDNS01RecursiveNameserversOnly = false
defaultMaxConcurrentChallenges = 60
defaultPrometheusMetricsServerAddress = "0.0.0.0:9402"
defaultDNS01CheckRetryPeriod = 10 * time.Second
)
var (
defaultACMEHTTP01SolverImage = fmt.Sprintf("quay.io/jetstack/cert-manager-acmesolver:%s", util.AppVersion)
defaultACMEHTTP01SolverResourceRequestCPU = "10m"
defaultACMEHTTP01SolverResourceRequestMemory = "64Mi"
defaultACMEHTTP01SolverResourceLimitsCPU = "100m"
defaultACMEHTTP01SolverResourceLimitsMemory = "64Mi"
defaultAutoCertificateAnnotations = []string{"kubernetes.io/tls-acme"}
allControllers = []string{
issuerscontroller.ControllerName,
clusterissuerscontroller.ControllerName,
certificatesmetricscontroller.ControllerName,
shimingresscontroller.ControllerName,
shimgatewaycontroller.ControllerName,
orderscontroller.ControllerName,
challengescontroller.ControllerName,
cracmecontroller.CRControllerName,
crapprovercontroller.ControllerName,
crcacontroller.CRControllerName,
crselfsignedcontroller.CRControllerName,
crvaultcontroller.CRControllerName,
crvenaficontroller.CRControllerName,
// certificate controllers
trigger.ControllerName,
issuing.ControllerName,
keymanager.ControllerName,
requestmanager.ControllerName,
readiness.ControllerName,
revisionmanager.ControllerName,
}
defaultEnabledControllers = []string{
issuerscontroller.ControllerName,
clusterissuerscontroller.ControllerName,
certificatesmetricscontroller.ControllerName,
shimingresscontroller.ControllerName,
orderscontroller.ControllerName,
challengescontroller.ControllerName,
cracmecontroller.CRControllerName,
crapprovercontroller.ControllerName,
crcacontroller.CRControllerName,
crselfsignedcontroller.CRControllerName,
crvaultcontroller.CRControllerName,
crvenaficontroller.CRControllerName,
// certificate controllers
trigger.ControllerName,
issuing.ControllerName,
keymanager.ControllerName,
requestmanager.ControllerName,
readiness.ControllerName,
revisionmanager.ControllerName,
}
experimentalCertificateSigningRequestControllers = []string{
csracmecontroller.CSRControllerName,
csrcacontroller.CSRControllerName,
csrselfsignedcontroller.CSRControllerName,
csrvenaficontroller.CSRControllerName,
csrvaultcontroller.CSRControllerName,
}
// Annotations that will be copied from Certificate to CertificateRequest and to Order.
// By default, copy all annotations except for the ones applied by kubectl, fluxcd, argocd.
defaultCopiedAnnotationPrefixes = []string{
"*",
"-kubectl.kubernetes.io/",
"-fluxcd.io/",
"-argocd.argoproj.io/",
}
)
func NewControllerOptions() *ControllerOptions {
return &ControllerOptions{
APIServerHost: defaultAPIServerHost,
ClusterResourceNamespace: defaultClusterResourceNamespace,
KubernetesAPIQPS: defaultKubernetesAPIQPS,
KubernetesAPIBurst: defaultKubernetesAPIBurst,
Namespace: defaultNamespace,
LeaderElect: defaultLeaderElect,
LeaderElectionNamespace: defaultLeaderElectionNamespace,
LeaderElectionLeaseDuration: defaultLeaderElectionLeaseDuration,
LeaderElectionRenewDeadline: defaultLeaderElectionRenewDeadline,
LeaderElectionRetryPeriod: defaultLeaderElectionRetryPeriod,
controllers: defaultEnabledControllers,
ClusterIssuerAmbientCredentials: defaultClusterIssuerAmbientCredentials,
IssuerAmbientCredentials: defaultIssuerAmbientCredentials,
DefaultIssuerName: defaultTLSACMEIssuerName,
DefaultIssuerKind: defaultTLSACMEIssuerKind,
DefaultIssuerGroup: defaultTLSACMEIssuerGroup,
DefaultAutoCertificateAnnotations: defaultAutoCertificateAnnotations,
DNS01RecursiveNameservers: []string{},
DNS01RecursiveNameserversOnly: defaultDNS01RecursiveNameserversOnly,
EnableCertificateOwnerRef: defaultEnableCertificateOwnerRef,
MetricsListenAddress: defaultPrometheusMetricsServerAddress,
DNS01CheckRetryPeriod: defaultDNS01CheckRetryPeriod,
EnablePprof: false,
}
}
func (s *ControllerOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.APIServerHost, "master", defaultAPIServerHost, ""+
"Optional apiserver host address to connect to. If not specified, autoconfiguration "+
"will be attempted.")
fs.StringVar(&s.Kubeconfig, "kubeconfig", defaultKubeconfig, ""+
"Paths to a kubeconfig. Only required if out-of-cluster.")
fs.Float32Var(&s.KubernetesAPIQPS, "kube-api-qps", defaultKubernetesAPIQPS, "indicates the maximum queries-per-second requests to the Kubernetes apiserver")
fs.IntVar(&s.KubernetesAPIBurst, "kube-api-burst", defaultKubernetesAPIBurst, "the maximum burst queries-per-second of requests sent to the Kubernetes apiserver")
fs.StringVar(&s.ClusterResourceNamespace, "cluster-resource-namespace", defaultClusterResourceNamespace, ""+
"Namespace to store resources owned by cluster scoped resources such as ClusterIssuer in. "+
"This must be specified if ClusterIssuers are enabled.")
fs.StringVar(&s.Namespace, "namespace", defaultNamespace, ""+
"If set, this limits the scope of cert-manager to a single namespace and ClusterIssuers are disabled. "+
"If not specified, all namespaces will be watched")
fs.BoolVar(&s.LeaderElect, "leader-elect", true, ""+
"If true, cert-manager will perform leader election between instances to ensure no more "+
"than one instance of cert-manager operates at a time")
fs.StringVar(&s.LeaderElectionNamespace, "leader-election-namespace", defaultLeaderElectionNamespace, ""+
"Namespace used to perform leader election. Only used if leader election is enabled")
fs.DurationVar(&s.LeaderElectionLeaseDuration, "leader-election-lease-duration", defaultLeaderElectionLeaseDuration, ""+
"The duration that non-leader candidates will wait after observing a leadership "+
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
"slot. This is effectively the maximum duration that a leader can be stopped "+
"before it is replaced by another candidate. This is only applicable if leader "+
"election is enabled.")
fs.DurationVar(&s.LeaderElectionRenewDeadline, "leader-election-renew-deadline", defaultLeaderElectionRenewDeadline, ""+
"The interval between attempts by the acting master to renew a leadership slot "+
"before it stops leading. This must be less than or equal to the lease duration. "+
"This is only applicable if leader election is enabled.")
fs.DurationVar(&s.LeaderElectionRetryPeriod, "leader-election-retry-period", defaultLeaderElectionRetryPeriod, ""+
"The duration the clients should wait between attempting acquisition and renewal "+
"of a leadership. This is only applicable if leader election is enabled.")
fs.StringSliceVar(&s.controllers, "controllers", []string{"*"}, fmt.Sprintf(""+
"A list of controllers to enable. '--controllers=*' enables all "+
"on-by-default controllers, '--controllers=foo' enables just the controller "+
"named 'foo', '--controllers=*,-foo' disables the controller named "+
"'foo'.\nAll controllers: %s",
strings.Join(allControllers, ", ")))
fs.StringVar(&s.ACMEHTTP01SolverImage, "acme-http01-solver-image", defaultACMEHTTP01SolverImage, ""+
"The docker image to use to solve ACME HTTP01 challenges. You most likely will not "+
"need to change this parameter unless you are testing a new feature or developing cert-manager.")
fs.StringVar(&s.ACMEHTTP01SolverResourceRequestCPU, "acme-http01-solver-resource-request-cpu", defaultACMEHTTP01SolverResourceRequestCPU, ""+
"Defines the resource request CPU size when spawning new ACME HTTP01 challenge solver pods.")
fs.StringVar(&s.ACMEHTTP01SolverResourceRequestMemory, "acme-http01-solver-resource-request-memory", defaultACMEHTTP01SolverResourceRequestMemory, ""+
"Defines the resource request Memory size when spawning new ACME HTTP01 challenge solver pods.")
fs.StringVar(&s.ACMEHTTP01SolverResourceLimitsCPU, "acme-http01-solver-resource-limits-cpu", defaultACMEHTTP01SolverResourceLimitsCPU, ""+
"Defines the resource limits CPU size when spawning new ACME HTTP01 challenge solver pods.")
fs.StringVar(&s.ACMEHTTP01SolverResourceLimitsMemory, "acme-http01-solver-resource-limits-memory", defaultACMEHTTP01SolverResourceLimitsMemory, ""+
"Defines the resource limits Memory size when spawning new ACME HTTP01 challenge solver pods.")
fs.BoolVar(&s.ClusterIssuerAmbientCredentials, "cluster-issuer-ambient-credentials", defaultClusterIssuerAmbientCredentials, ""+
"Whether a cluster-issuer may make use of ambient credentials for issuers. 'Ambient Credentials' are credentials drawn from the environment, metadata services, or local files which are not explicitly configured in the ClusterIssuer API object. "+
"When this flag is enabled, the following sources for credentials are also used: "+
"AWS - All sources the Go SDK defaults to, notably including any EC2 IAM roles available via instance metadata.")
fs.BoolVar(&s.IssuerAmbientCredentials, "issuer-ambient-credentials", defaultIssuerAmbientCredentials, ""+
"Whether an issuer may make use of ambient credentials. 'Ambient Credentials' are credentials drawn from the environment, metadata services, or local files which are not explicitly configured in the Issuer API object. "+
"When this flag is enabled, the following sources for credentials are also used: "+
"AWS - All sources the Go SDK defaults to, notably including any EC2 IAM roles available via instance metadata.")
fs.StringSliceVar(&s.DefaultAutoCertificateAnnotations, "auto-certificate-annotations", defaultAutoCertificateAnnotations, ""+
"The annotation consumed by the ingress-shim controller to indicate a ingress is requesting a certificate")
fs.StringVar(&s.DefaultIssuerName, "default-issuer-name", defaultTLSACMEIssuerName, ""+
"Name of the Issuer to use when the tls is requested but issuer name is not specified on the ingress resource.")
fs.StringVar(&s.DefaultIssuerKind, "default-issuer-kind", defaultTLSACMEIssuerKind, ""+
"Kind of the Issuer to use when the tls is requested but issuer kind is not specified on the ingress resource.")
fs.StringVar(&s.DefaultIssuerGroup, "default-issuer-group", defaultTLSACMEIssuerGroup, ""+
"Group of the Issuer to use when the tls is requested but issuer group is not specified on the ingress resource.")
fs.StringSliceVar(&s.DNS01RecursiveNameservers, "dns01-recursive-nameservers",
[]string{}, "A list of comma separated dns server endpoints used for "+
"DNS01 check requests. This should be a list containing host and "+
"port, for example 8.8.8.8:53,8.8.4.4:53")
fs.BoolVar(&s.DNS01RecursiveNameserversOnly, "dns01-recursive-nameservers-only",
defaultDNS01RecursiveNameserversOnly,
"When true, cert-manager will only ever query the configured DNS resolvers "+
"to perform the ACME DNS01 self check. This is useful in DNS constrained "+
"environments, where access to authoritative nameservers is restricted. "+
"Enabling this option could cause the DNS01 self check to take longer "+
"due to caching performed by the recursive nameservers.")
fs.StringSliceVar(&s.DNS01RecursiveNameservers, "dns01-self-check-nameservers",
[]string{}, "A list of comma separated dns server endpoints used for "+
"DNS01 check requests. This should be a list containing host and port, "+
"for example 8.8.8.8:53,8.8.4.4:53")
fs.MarkDeprecated("dns01-self-check-nameservers", "Deprecated in favour of dns01-recursive-nameservers")
fs.BoolVar(&s.EnableCertificateOwnerRef, "enable-certificate-owner-ref", defaultEnableCertificateOwnerRef, ""+
"Whether to set the certificate resource as an owner of secret where the tls certificate is stored. "+
"When this flag is enabled, the secret will be automatically removed when the certificate resource is deleted.")
fs.StringSliceVar(&s.CopiedAnnotationPrefixes, "copied-annotation-prefixes", defaultCopiedAnnotationPrefixes, "Specify which annotations should/shouldn't be copied"+
"from Certificate to CertificateRequest and Order, as well as from CertificateSigningRequest to Order, by passing a list of annotation key prefixes."+
"A prefix starting with a dash(-) specifies an annotation that shouldn't be copied. Example: '*,-kubectl.kuberenetes.io/'- all annotations"+
"will be copied apart from the ones where the key is prefixed with 'kubectl.kubernetes.io/'.")
fs.IntVar(&s.MaxConcurrentChallenges, "max-concurrent-challenges", defaultMaxConcurrentChallenges, ""+
"The maximum number of challenges that can be scheduled as 'processing' at once.")
fs.DurationVar(&s.DNS01CheckRetryPeriod, "dns01-check-retry-period", defaultDNS01CheckRetryPeriod, ""+
"The duration the controller should wait between checking if a ACME dns entry exists."+
"This should be a valid duration string, for example 180s or 1h")
fs.StringVar(&s.MetricsListenAddress, "metrics-listen-address", defaultPrometheusMetricsServerAddress, ""+
"The host and port that the metrics endpoint should listen on.")
fs.BoolVar(&s.EnablePprof, "enable-profiling", false, ""+
"Enable profiling for controller.")
}
func (o *ControllerOptions) Validate() error {
switch o.DefaultIssuerKind {
case "Issuer":
case "ClusterIssuer":
default:
return fmt.Errorf("invalid default issuer kind: %v", o.DefaultIssuerKind)
}
if o.KubernetesAPIBurst <= 0 {
return fmt.Errorf("invalid value for kube-api-burst: %v must be higher than 0", o.KubernetesAPIBurst)
}
if o.KubernetesAPIQPS <= 0 {
return fmt.Errorf("invalid value for kube-api-qps: %v must be higher than 0", o.KubernetesAPIQPS)
}
if float32(o.KubernetesAPIBurst) < o.KubernetesAPIQPS {
return fmt.Errorf("invalid value for kube-api-burst: %v must be higher or equal to kube-api-qps: %v", o.KubernetesAPIQPS, o.KubernetesAPIQPS)
}
for _, server := range o.DNS01RecursiveNameservers {
// ensure all servers have a port number
_, _, err := net.SplitHostPort(server)
if err != nil {
return fmt.Errorf("invalid DNS server (%v): %v", err, server)
}
}
errs := []error{}
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.controllers {
if controller == "*" {
continue
}
controller = strings.TrimPrefix(controller, "-")
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
if len(errs) > 0 {
return fmt.Errorf("validation failed for '--controllers': %v", errs)
}
return nil
}
func (o *ControllerOptions) EnabledControllers() sets.String {
var disabled []string
enabled := sets.NewString()
for _, controller := range o.controllers {
switch {
case controller == "*":
enabled = enabled.Insert(defaultEnabledControllers...)
case strings.HasPrefix(controller, "-"):
disabled = append(disabled, strings.TrimPrefix(controller, "-"))
default:
enabled = enabled.Insert(controller)
}
}
enabled = enabled.Delete(disabled...)
if utilfeature.DefaultFeatureGate.Enabled(feature.ExperimentalCertificateSigningRequestControllers) {
logf.Log.Info("enabling all experimental certificatesigningrequest controllers")
enabled = enabled.Insert(experimentalCertificateSigningRequestControllers...)
}
return enabled
}
| 1 | 28,671 | Would this make more sense as an option on an Issuer resource (specifically on the CA issuer) rather than as a flag? It's not ideal that we have a flag that _looks_ like it could be 'global' but is actually not IMO. Also, if Istio is reading the CSR object, is it possible for it to read the CA from the CSR itself too and append the two instead? Not sure if we ever decided whether to use annotations for this purpose @JoshVanL? | jetstack-cert-manager | go |
@@ -10,6 +10,7 @@ class TargetType(Enum):
PANDAS = 'pandas'
FILE = 'file'
+SERVER_TIME_F = "%Y-%m-%dT%H:%M:%S"
DATEF = '%F'
TIMEF = '%T'
DTIMEF = '%s %s' % (DATEF, TIMEF) | 1 | """
Constants
"""
from enum import Enum
class TargetType(Enum):
"""
Enums for target types
"""
PANDAS = 'pandas'
FILE = 'file'
DATEF = '%F'
TIMEF = '%T'
DTIMEF = '%s %s' % (DATEF, TIMEF)
LATEST_TAG = 'latest'
PACKAGE_DIR_NAME = 'quilt_packages'
# reserved words in build.yml
RESERVED = {
'file': 'file',
'transform': 'transform'
}
# SHA-2 Family
HASH_TYPE = 'sha256'
# RSA digital signature key Size
RSA_BITS = 2048
# TODO nan probably not a safe choice and may pollute number cols with strs
kwargs = {'keep_default_na': False, 'na_values': ['nan']}
# Supported build targets and file types
# BUILD[target][file_extension]
# file_extension should be lowercase
TARGET = {
'pandas': {
'csv': {
'attr': 'read_csv',
'failover' : {'engine' : 'python'},
'kwargs': kwargs
},
'ssv': {
'attr': 'read_csv',
'failover' : {'engine' : 'python'},
'kwargs': dict(kwargs, sep=';')
},
'tsv': {
'attr': 'read_csv',
'failover' : {'engine' : 'python'},
'kwargs': dict(kwargs, sep='\t')
},
'xls': {
'attr': 'read_excel',
# TODO set sheetname='None' to get all sheets?
# Currently defaults to sheetname=0, which imports first sheet only
'kwargs': kwargs
},
'xlsx': {
'attr': 'read_excel',
# see comments under 'xls'
'kwargs': kwargs
}
}
}
| 1 | 14,973 | Because this acts as a coordination point between client and server, it should go in core.py instead of const.py (so eventually the server could use it to guarantee that it delivers dates in the expected format). | quiltdata-quilt | py |
@@ -212,8 +212,11 @@ func (r *createOrUpdate) Apply(obj *unstructured.Unstructured, subresources ...s
return
}
resource, err = r.options.Getter.Get(obj.GetName(), metav1.GetOptions{})
- if err != nil && apierrors.IsNotFound(errors.Cause(err)) {
- return r.options.Creator.Create(obj, subresources...)
+ if err != nil {
+ if apierrors.IsNotFound(errors.Cause(err)) {
+ return r.options.Creator.Create(obj, subresources...)
+ }
+ return nil, err
}
return r.options.Updater.Update(resource, obj, subresources...)
} | 1 | /*
Copyright 2018 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO
// Move this file to pkg/k8sresource/v1alpha1
package v1alpha1
import (
"strings"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// ResourceCreator abstracts creating an unstructured instance in kubernetes
// cluster
type ResourceCreator interface {
Create(obj *unstructured.Unstructured, subresources ...string) (*unstructured.Unstructured, error)
}
// ResourceGetter abstracts fetching an unstructured instance from kubernetes
// cluster
type ResourceGetter interface {
Get(name string, options metav1.GetOptions, subresources ...string) (*unstructured.Unstructured, error)
}
// ResourceLister abstracts fetching an unstructured list of instance from kubernetes
// cluster
type ResourceLister interface {
List(options metav1.ListOptions) (*unstructured.UnstructuredList, error)
}
// ResourceUpdater abstracts updating an unstructured instance found in
// kubernetes cluster
type ResourceUpdater interface {
Update(oldobj, newobj *unstructured.Unstructured, subresources ...string) (u *unstructured.Unstructured, err error)
}
// ResourceApplier abstracts applying an unstructured instance that may or may
// not be available in kubernetes cluster
type ResourceApplier interface {
Apply(obj *unstructured.Unstructured, subresources ...string) (*unstructured.Unstructured, error)
}
// ResourceDeleter abstracts deletes an unstructured instance that is available in kubernetes cluster
type ResourceDeleter interface {
Delete(obj *unstructured.Unstructured, subresources ...string) error
}
type resource struct {
gvr schema.GroupVersionResource // identify a resource
namespace string // namespace where this resource is to be operated at
}
// Resource returns a new resource instance
func Resource(gvr schema.GroupVersionResource, namespace string) *resource {
return &resource{gvr: gvr, namespace: namespace}
}
// Create creates a new resource in kubernetes cluster
func (r *resource) Create(obj *unstructured.Unstructured, subresources ...string) (u *unstructured.Unstructured, err error) {
if obj == nil {
err = errors.Errorf("nil resource instance: failed to create resource '%s' at '%s'", r.gvr, r.namespace)
return
}
dynamic, err := Dynamic().Provide()
if err != nil {
err = errors.Wrapf(err, "failed to create resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
return
}
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Create(obj, metav1.CreateOptions{}, subresources...)
if err != nil {
err = errors.Wrapf(err, "failed to create resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
return
}
return
}
// Delete deletes a existing resource in kubernetes cluster
func (r *resource) Delete(obj *unstructured.Unstructured, subresources ...string) error {
if obj == nil {
return errors.Errorf("nil resource instance: failed to delete resource '%s' at '%s'", r.gvr, r.namespace)
}
dynamic, err := Dynamic().Provide()
if err != nil {
return errors.Wrapf(err, "failed to delete resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
}
err = dynamic.Resource(r.gvr).Namespace(r.namespace).Delete(obj.GetName(), &metav1.DeleteOptions{})
if err != nil {
return errors.Wrapf(err, "failed to delete resource '%s' '%s' at '%s'", r.gvr, obj.GetName(), r.namespace)
}
return nil
}
// Get returns a specific resource from kubernetes cluster
func (r *resource) Get(name string, opts metav1.GetOptions, subresources ...string) (u *unstructured.Unstructured, err error) {
if len(strings.TrimSpace(name)) == 0 {
err = errors.Errorf("missing resource name: failed to get resource '%s' at '%s'", r.gvr, r.namespace)
return
}
dynamic, err := Dynamic().Provide()
if err != nil {
err = errors.Wrapf(err, "failed to get resource '%s' '%s' at '%s'", r.gvr, name, r.namespace)
return
}
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Get(name, opts, subresources...)
if err != nil {
err = errors.Wrapf(err, "failed to get resource '%s' '%s' at '%s'", r.gvr, name, r.namespace)
return
}
return
}
// Update updates the resource at kubernetes cluster
func (r *resource) Update(oldobj, newobj *unstructured.Unstructured, subresources ...string) (u *unstructured.Unstructured, err error) {
if oldobj == nil {
err = errors.Errorf("nil old resource instance: failed to update resource '%s' at '%s'", r.gvr, r.namespace)
return
}
if newobj == nil {
err = errors.Errorf("nil new resource instance: failed to update resource '%s' at '%s'", r.gvr, r.namespace)
return
}
dynamic, err := Dynamic().Provide()
if err != nil {
err = errors.Wrapf(err, "failed to update resource '%s' '%s' at '%s'", r.gvr, oldobj.GetName(), r.namespace)
return
}
resourceVersion := oldobj.GetResourceVersion()
newobj.SetResourceVersion(resourceVersion)
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).Update(newobj, metav1.UpdateOptions{}, subresources...)
if err != nil {
err = errors.Wrapf(err, "failed to update resource '%s' '%s' at '%s'", r.gvr, oldobj.GetName(), r.namespace)
return
}
return
}
// List returns a list of specific resource at kubernetes cluster
func (r *resource) List(opts metav1.ListOptions) (u *unstructured.UnstructuredList, err error) {
dynamic, err := Dynamic().Provide()
if err != nil {
err = errors.Wrapf(err, "failed to list resource '%s' at '%s'", r.gvr, r.namespace)
return
}
u, err = dynamic.Resource(r.gvr).Namespace(r.namespace).List(opts)
if err != nil {
err = errors.Wrapf(err, "failed to list resource '%s' at '%s'", r.gvr, r.namespace)
return
}
return
}
// ResourceApplyOptions is a utility instance used during the resource's apply
// operation
type ResourceApplyOptions struct {
Getter ResourceGetter
Creator ResourceCreator
Updater ResourceUpdater
}
// createOrUpdate is a resource that is suitable to be executed as an apply
// operation
type createOrUpdate struct {
*resource
options ResourceApplyOptions // options used during resource's apply operation
}
// CreateOrUpdate returns a new instance of createOrUpdate resource
func CreateOrUpdate(gvr schema.GroupVersionResource, namespace string) *createOrUpdate {
resource := Resource(gvr, namespace)
options := ResourceApplyOptions{Getter: resource, Creator: resource, Updater: resource}
return &createOrUpdate{resource: resource, options: options}
}
// Apply applies a resource to the kubernetes cluster. In other words, it
// creates a new resource if it does not exist or updates the existing resource.
func (r *createOrUpdate) Apply(obj *unstructured.Unstructured, subresources ...string) (resource *unstructured.Unstructured, err error) {
if r.options.Getter == nil {
err = errors.New("nil resource getter instance: failed to apply resource")
return
}
if r.options.Creator == nil {
err = errors.New("nil resource creator instance: failed to apply resource")
return
}
if r.options.Updater == nil {
err = errors.New("nil resource updater instance: failed to apply resource")
return
}
if obj == nil {
err = errors.New("nil resource instance: failed to apply resource")
return
}
resource, err = r.options.Getter.Get(obj.GetName(), metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(errors.Cause(err)) {
return r.options.Creator.Create(obj, subresources...)
}
return r.options.Updater.Update(resource, obj, subresources...)
}
// ResourceDeleteOptions is a utility instance used during the resource's delete operations
type ResourceDeleteOptions struct {
Deleter ResourceDeleter
}
// Delete is a resource that is suitable to be executed as a Delete operation
type Delete struct {
*resource
options ResourceDeleteOptions
}
// DeleteResource returns a new instance of delete resource
func DeleteResource(gvr schema.GroupVersionResource, namespace string) *Delete {
resource := Resource(gvr, namespace)
options := ResourceDeleteOptions{Deleter: resource}
return &Delete{resource: resource, options: options}
}
// Delete deletes a resource from a kubernetes cluster
func (d *Delete) Delete(obj *unstructured.Unstructured, subresources ...string) error {
if d.options.Deleter == nil {
return errors.New("nil resource deleter instance: failed to delete resource")
} else if obj == nil {
return errors.New("nil resource instance: failed to delete resource")
}
return d.options.Deleter.Delete(obj, subresources...)
}
// ResourceListOptions is a utility instance used during the resource's list operations
type ResourceListOptions struct {
Lister ResourceLister
}
// List is a resource resource that is suitable to be executed as a List operation
type List struct {
*resource
options ResourceListOptions
}
// ListResource returns a new instance of list resource
func ListResource(gvr schema.GroupVersionResource, namespace string) *List {
resource := Resource(gvr, namespace)
options := ResourceListOptions{Lister: resource}
return &List{resource: resource, options: options}
}
// List lists a resource from a kubernetes cluster
func (l *List) List(options metav1.ListOptions) (u *unstructured.UnstructuredList, err error) {
if l.options.Lister == nil {
err = errors.New("nil resource lister instance: failed to list resource")
return
}
return l.options.Lister.List(options)
}
// ResourceGetOptions is a utility instance used during the resource's get operations
type ResourceGetOptions struct {
Getter ResourceGetter
}
// Get is resource that is suitable to be executed as Get operation
type Get struct {
*resource
options ResourceGetOptions
}
// GetResource returns a new instance of get resource
func GetResource(gvr schema.GroupVersionResource, namespace string) *Get {
resource := Resource(gvr, namespace)
options := ResourceGetOptions{Getter: resource}
return &Get{resource: resource, options: options}
}
// Get gets a resource from a kubernetes cluster
func (g *Get) Get(name string, opts metav1.GetOptions, subresources ...string) (u *unstructured.Unstructured, err error) {
if g.options.Getter == nil {
err = errors.New("nil resource getter instance: failed to get resource")
return
}
return g.options.Getter.Get(name, opts, subresources...)
}
| 1 | 12,865 | expected statement, found 'else' (and 1 more errors) | openebs-maya | go |
@@ -16,6 +16,17 @@ func FakeID(b byte, public bool) ID {
return ID{bytes}
}
+// FakeIDRandomOrBust creates a fake public or private TLF ID from the given
+// byte, and fill the rest with empty bytes.
+func FakeIDRandomOrBust(b byte, public bool) ID {
+ id, err := MakeRandomID(public)
+ if err != nil {
+ panic(err)
+ }
+ id.id[0] = b
+ return id
+}
+
// FakeIDByte returns the byte used to create a fake TLF ID with
// FakeID.
func FakeIDByte(id ID) byte { | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package tlf
// FakeID creates a fake public or private TLF ID from the given
// byte.
func FakeID(b byte, public bool) ID {
bytes := [idByteLen]byte{b}
if public {
bytes[idByteLen-1] = pubIDSuffix
} else {
bytes[idByteLen-1] = idSuffix
}
return ID{bytes}
}
// FakeIDByte returns the byte used to create a fake TLF ID with
// FakeID.
func FakeIDByte(id ID) byte {
return id.id[0]
}
| 1 | 16,741 | IMO this doesn't need to be in the `kbfs` repo. And I don't like setting the byte. We can always compare with the randomly generated `TlfID`. | keybase-kbfs | go |
@@ -0,0 +1,7 @@
+package constants
+
+const (
+ DeviceControllerModuleName = "devicecontroller"
+ CloudHubControllerModuleName = "cloudhub"
+ EdgeControllerModuleName = "edgecontroller"
+) | 1 | 1 | 14,442 | What's the difference from ModuleName in `pkg/apis/meta/v1alpha1/types.go`? | kubeedge-kubeedge | go |
|
@@ -8,11 +8,12 @@ using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
-using System.Reflection.Metadata;
-using System.Reflection.PortableExecutable;
using System.Text;
using Newtonsoft.Json.Linq;
using Newtonsoft.Json;
+using System.Security.Cryptography;
+using System.Reflection.PortableExecutable;
+using System.Reflection.Metadata;
namespace Microsoft.DotNet.Build.Tasks
{ | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
using Microsoft.Build.Framework;
using Microsoft.Build.Utilities;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Reflection.Metadata;
using System.Reflection.PortableExecutable;
using System.Text;
using Newtonsoft.Json.Linq;
using Newtonsoft.Json;
namespace Microsoft.DotNet.Build.Tasks
{
/// <summary>
/// Resolves the assets out of packages in the project.lock.json
/// </summary>
public sealed class PrereleaseResolveNuGetPackageAssets : Task
{
internal const string NuGetPackageIdMetadata = "NuGetPackageId";
internal const string NuGetPackageVersionMetadata = "NuGetPackageVersion";
internal const string ReferenceImplementationMetadata = "Implementation";
internal const string ReferenceImageRuntimeMetadata = "ImageRuntime";
internal const string ReferenceWinMDFileMetadata = "WinMDFile";
internal const string ReferenceWinMDFileTypeMetadata = "WinMDFileType";
internal const string WinMDFileTypeManaged = "Managed";
internal const string WinMDFileTypeNative = "Native";
internal const string NuGetAssetTypeCompile = "compile";
internal const string NuGetAssetTypeNative = "native";
internal const string NuGetAssetTypeRuntime = "runtime";
internal const string NuGetAssetTypeResource = "resource";
private readonly List<ITaskItem> _analyzers = new List<ITaskItem>();
private readonly List<ITaskItem> _copyLocalItems = new List<ITaskItem>();
private readonly List<ITaskItem> _references = new List<ITaskItem>();
private readonly List<ITaskItem> _referencedPackages = new List<ITaskItem>();
#region UnitTestSupport
private readonly DirectoryExists _directoryExists = new DirectoryExists(Directory.Exists);
private readonly FileExists _fileExists = new FileExists(File.Exists);
private readonly TryGetRuntimeVersion _tryGetRuntimeVersion = new TryGetRuntimeVersion(TryGetRuntimeVersion);
internal PrereleaseResolveNuGetPackageAssets(DirectoryExists directoryExists, FileExists fileExists, TryGetRuntimeVersion tryGetRuntimeVersion)
: this()
{
if (directoryExists != null)
{
_directoryExists = directoryExists;
}
if (fileExists != null)
{
_fileExists = fileExists;
}
if (tryGetRuntimeVersion != null)
{
_tryGetRuntimeVersion = tryGetRuntimeVersion;
}
}
#endregion
/// <summary>
/// Creates a new <see cref="PrereleaseResolveNuGetPackageAssets"/>.
/// </summary>
public PrereleaseResolveNuGetPackageAssets()
{
}
/// <summary>
/// The full paths to resolved analyzers.
/// </summary>
[Output]
public ITaskItem[] ResolvedAnalyzers
{
get { return _analyzers.ToArray(); }
}
/// <summary>
/// The full paths to resolved run-time resources.
/// </summary>
[Output]
public ITaskItem[] ResolvedCopyLocalItems
{
get { return _copyLocalItems.ToArray(); }
}
/// <summary>
/// The full paths to resolved build-time dependencies. Contains standard metadata for Reference items.
/// </summary>
[Output]
public ITaskItem[] ResolvedReferences
{
get { return _references.ToArray(); }
}
/// <summary>
/// The names of NuGet packages directly referenced by this project.
/// </summary>
[Output]
public ITaskItem[] ReferencedPackages
{
get { return _referencedPackages.ToArray(); }
}
/// <summary>
/// The target monikers to use when selecting assets from packages. The first one found in the lock file is used.
/// </summary>
[Required]
public ITaskItem[] TargetMonikers
{
get; set;
}
[Required]
public string ProjectLockFile
{
get; set;
}
public string NuGetPackagesDirectory
{
get; set;
}
public string RuntimeIdentifier
{
get; set;
}
public bool AllowFallbackOnTargetSelection
{
get; set;
}
public string ProjectLanguage
{
get; set;
}
public bool IncludeFrameworkReferences
{
get; set;
}
/// <summary>
/// Performs the NuGet package resolution.
/// </summary>
public override bool Execute()
{
Log.TaskResources = Strings.ResourceManager;
try
{
ExecuteCore();
return true;
}
catch (ExceptionFromResource e)
{
Log.LogErrorFromResources(e.ResourceName, e.MessageArgs);
return false;
}
catch (Exception e)
{
Log.LogErrorFromException(e);
return false;
}
}
private void ExecuteCore()
{
if (!_fileExists(ProjectLockFile))
{
throw new ExceptionFromResource("LockFileNotFound", ProjectLockFile);
}
JObject lockFile;
using (var streamReader = new StreamReader(new FileStream(ProjectLockFile, FileMode.Open, FileAccess.Read, FileShare.Read)))
{
lockFile = JObject.Load(new JsonTextReader(streamReader));
}
GetReferences(lockFile);
GetCopyLocalItems(lockFile);
GetAnalyzers(lockFile);
GetReferencedPackages(lockFile);
}
private void GetReferences(JObject lockFile)
{
var target = GetTargetOrAttemptFallback(lockFile, needsRuntimeIdentifier: false);
var frameworkReferences = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
var fileNamesOfRegularReferences = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
foreach (var package in target)
{
var packageNameParts = package.Key.Split('/');
var packageName = packageNameParts[0];
var packageVersion = packageNameParts[1];
Log.LogMessageFromResources(MessageImportance.Low, "ResolvedReferencesFromPackage", packageName);
foreach (var referenceItem in CreateItems(packageName, packageVersion, package.Value, NuGetAssetTypeCompile))
{
_references.Add(referenceItem);
fileNamesOfRegularReferences.Add(Path.GetFileNameWithoutExtension(referenceItem.ItemSpec));
}
if (IncludeFrameworkReferences)
{
var frameworkAssembliesArray = package.Value["frameworkAssemblies"] as JArray;
if (frameworkAssembliesArray != null)
{
foreach (var frameworkAssembly in frameworkAssembliesArray.OfType<JToken>())
{
frameworkReferences.Add((string)frameworkAssembly);
}
}
}
}
foreach (var frameworkReference in frameworkReferences.Except(fileNamesOfRegularReferences, StringComparer.OrdinalIgnoreCase))
{
_references.Add(new TaskItem(frameworkReference));
}
}
private void GetCopyLocalItems(JObject lockFile)
{
// If we have no runtime identifier, we're not copying implementations
if (string.IsNullOrEmpty(RuntimeIdentifier))
{
return;
}
// We'll use as a fallback just the target moniker if the user didn't have the right runtime identifier in their lock file.
var target = GetTargetOrAttemptFallback(lockFile, needsRuntimeIdentifier: true);
HashSet<string> candidateNativeImplementations = new HashSet<string>(StringComparer.OrdinalIgnoreCase);
List<ITaskItem> runtimeWinMDItems = new List<ITaskItem>();
foreach (var package in target)
{
var packageNameParts = package.Key.Split('/');
var packageName = packageNameParts[0];
var packageVersion = packageNameParts[1];
Log.LogMessageFromResources(MessageImportance.Low, "ResolvedReferencesFromPackage", packageName);
foreach(var nativeItem in CreateItems(packageName, packageVersion, package.Value, NuGetAssetTypeNative))
{
if (Path.GetExtension(nativeItem.ItemSpec).Equals(".dll", StringComparison.OrdinalIgnoreCase))
{
candidateNativeImplementations.Add(Path.GetFileNameWithoutExtension(nativeItem.ItemSpec));
}
_copyLocalItems.Add(nativeItem);
}
foreach (var runtimeItem in CreateItems(packageName, packageVersion, package.Value, NuGetAssetTypeRuntime))
{
if (Path.GetExtension(runtimeItem.ItemSpec).Equals(".winmd", StringComparison.OrdinalIgnoreCase))
{
runtimeWinMDItems.Add(runtimeItem);
}
_copyLocalItems.Add(runtimeItem);
}
foreach (var resourceItem in CreateItems(packageName, packageVersion, package.Value, NuGetAssetTypeResource))
{
_copyLocalItems.Add(resourceItem);
}
}
SetWinMDMetadata(runtimeWinMDItems, candidateNativeImplementations);
}
private void GetAnalyzers(JObject lockFile)
{
// For analyzers, analyzers could be provided in runtime implementation packages. This might be reasonable -- imagine a gatekeeper
// scenario where somebody has a library but on .NET Native might have some specific restrictions that need to be enforced.
var target = GetTargetOrAttemptFallback(lockFile, needsRuntimeIdentifier: !string.IsNullOrEmpty(RuntimeIdentifier));
var libraries = (JObject)lockFile["libraries"];
foreach (var package in target.Children())
{
var name = (package is JProperty) ? ((JProperty)package).Name : null;
var packageNameParts = name != null ? name.Split('/') : null;
if (packageNameParts == null)
{
continue;
}
var packageId = packageNameParts[0];
var packageVersion = packageNameParts[1];
var librariesPackage = libraries[name];
foreach (var file in librariesPackage["files"].Children()
.Select(x => x.ToString())
.Where(x => x.StartsWith("analyzers")))
{
if (Path.GetExtension(file).Equals(".dll", StringComparison.OrdinalIgnoreCase))
{
string path;
if (TryGetFile(packageId, packageVersion, file, out path))
{
var analyzer = new TaskItem(path);
analyzer.SetMetadata(NuGetPackageIdMetadata, packageId);
analyzer.SetMetadata(NuGetPackageVersionMetadata, packageVersion);
_analyzers.Add(analyzer);
}
}
}
}
}
private void SetWinMDMetadata(IEnumerable<ITaskItem> runtimeWinMDs, ICollection<string> candidateImplementations)
{
foreach(var winMD in runtimeWinMDs.Where(w => _fileExists(w.ItemSpec)))
{
string imageRuntimeVersion = _tryGetRuntimeVersion(winMD.ItemSpec);
if (String.IsNullOrEmpty(imageRuntimeVersion))
continue;
// RAR sets ImageRuntime for everything but the only dependencies we're aware of are
// for WinMDs
winMD.SetMetadata(ReferenceImageRuntimeMetadata, imageRuntimeVersion);
bool isWinMD, isManaged;
TryParseRuntimeVersion(imageRuntimeVersion, out isWinMD, out isManaged);
if (isWinMD)
{
winMD.SetMetadata(ReferenceWinMDFileMetadata, "true");
if (isManaged)
{
winMD.SetMetadata(ReferenceWinMDFileTypeMetadata, WinMDFileTypeManaged);
}
else
{
winMD.SetMetadata(ReferenceWinMDFileTypeMetadata, WinMDFileTypeNative);
// Normally RAR will expect the native DLL to be next to the WinMD, but that doesn't
// work well for nuget packages since compile time assets cannot be architecture specific.
// We also explicitly set all compile time assets to not copy local so we need to
// make sure that this metadata is set on the runtime asset.
// Examine all runtime assets that are native winmds and add Implementation metadata
// We intentionally permit this to cross package boundaries to support cases where
// folks want to split their architecture specific implementations into runtime
// specific packages.
// Sample layout
// lib\netcore50\Contoso.Controls.winmd
// lib\netcore50\Contoso.Controls.xml
// runtimes\win10-arm\native\Contoso.Controls.dll
// runtimes\win10-x64\native\Contoso.Controls.dll
// runtimes\win10-x86\native\Contoso.Controls.dll
string fileName = Path.GetFileNameWithoutExtension(winMD.ItemSpec);
// determine if we have a Native WinMD that could be satisfied by this native dll.
if (candidateImplementations.Contains(fileName))
{
winMD.SetMetadata(ReferenceImplementationMetadata, fileName + ".dll");
}
}
}
}
}
private bool TryGetFile(string packageName, string packageVersion, string file, out string path)
{
if (IsFileValid(file, "C#", "VB"))
{
path = GetPath(packageName, packageVersion, file);
return true;
}
else if (IsFileValid(file, "VB", "C#"))
{
path = GetPath(packageName, packageVersion, file);
return true;
}
path = null;
return false;
}
private bool IsFileValid(string file, string expectedLanguage, string unExpectedLanguage)
{
if(ProjectLanguage == null)
{
throw new ExceptionFromResource("NoProgrammingLanguageSpecified");
}
var expectedProjectLanguage = expectedLanguage;
expectedLanguage = expectedLanguage == "C#" ? "cs" : expectedLanguage;
unExpectedLanguage = unExpectedLanguage == "C#" ? "cs" : unExpectedLanguage;
return (ProjectLanguage.Equals(expectedProjectLanguage, StringComparison.OrdinalIgnoreCase)) &&
(file.Split('/').Any(x => x.Equals(ProjectLanguage, StringComparison.OrdinalIgnoreCase)) ||
!file.Split('/').Any(x => x.Equals(unExpectedLanguage, StringComparison.OrdinalIgnoreCase)));
}
private string GetPath(string packageName, string packageVersion, string file)
{
return Path.Combine(GetNuGetPackagePath(packageName, packageVersion), file.Replace('/', '\\'));
}
/// <summary>
/// Fetches the right target from the targets section in a lock file, or attempts to find a "best match" if allowed. The "best match" logic
/// is there to allow a design time build for the IDE to generally work even if something isn't quite right. Throws an exception
/// if either the preferred isn't there and fallbacks aren't allowed, or fallbacks are allowed but nothing at all could be found.
/// </summary>
/// <param name="lockFile">The lock file JSON.</param>
/// <param name="needsRuntimeIdentifier">Whether we must find targets that include the runtime identifier or one without the runtime identifier.</param>
private JObject GetTargetOrAttemptFallback(JObject lockFile, bool needsRuntimeIdentifier)
{
var targets = (JObject)lockFile["targets"];
foreach (var preferredTargetMoniker in TargetMonikers)
{
var preferredTargetMonikerWithOptionalRuntimeIdentifier = GetTargetMonikerWithOptionalRuntimeIdentifier(preferredTargetMoniker, needsRuntimeIdentifier);
var target = (JObject)targets[preferredTargetMonikerWithOptionalRuntimeIdentifier];
if (target != null)
{
return target;
}
}
var preferredForErrorMessages = GetTargetMonikerWithOptionalRuntimeIdentifier(TargetMonikers.First(), needsRuntimeIdentifier);
if (!AllowFallbackOnTargetSelection)
{
// If we're not falling back then abort the build
throw new ExceptionFromResource("MissingEntryInLockFile", preferredForErrorMessages);
}
// We are allowing fallback, so we'll still give a warning but allow us to continue
// In production ResolveNuGetPackageAssets, this call is LogWarningFromResources.
// In our current use in dotnet\buildtools, we rely on the fallback behavior, so we just log
// this as a message.
Log.LogMessageFromResources("MissingEntryInLockFile", preferredForErrorMessages);
foreach (var fallback in TargetMonikers)
{
var target = (JObject)targets[GetTargetMonikerWithOptionalRuntimeIdentifier(fallback, needsRuntimeIdentifier: false)];
if (target != null)
{
return target;
}
}
// Anything goes
var enumerableTargets = targets.Cast<KeyValuePair<string, JToken>>();
var firstTarget = (JObject)enumerableTargets.FirstOrDefault().Value;
if (firstTarget == null)
{
throw new ExceptionFromResource("NoTargetsInLockFile");
}
return firstTarget;
}
private string GetTargetMonikerWithOptionalRuntimeIdentifier(ITaskItem preferredTargetMoniker, bool needsRuntimeIdentifier)
{
return needsRuntimeIdentifier ? preferredTargetMoniker.ItemSpec + "/" + RuntimeIdentifier : preferredTargetMoniker.ItemSpec;
}
private IEnumerable<ITaskItem> CreateItems(string packageId, string packageVersion, JToken packageObject, string key)
{
var values = packageObject[key] as JObject;
var items = new List<ITaskItem>();
if (values == null)
{
return items;
}
var nugetPackage = GetNuGetPackagePath(packageId, packageVersion);
foreach (string file in values.Properties().Select(p => p.Name))
{
if (Path.GetFileName(file) == "_._")
{
continue;
}
var sanitizedFile = file.Replace('/', '\\');
var nugetPath = Path.Combine(nugetPackage, sanitizedFile);
var item = new TaskItem(nugetPath);
item.SetMetadata(NuGetPackageIdMetadata, packageId);
item.SetMetadata(NuGetPackageVersionMetadata, packageVersion);
item.SetMetadata("Private", "false");
string targetPath = TryGetTargetPath(sanitizedFile);
if (targetPath != null)
{
var destinationSubDirectory = Path.GetDirectoryName(targetPath);
if (!string.IsNullOrEmpty(destinationSubDirectory))
{
item.SetMetadata("DestinationSubDirectory", destinationSubDirectory + "\\");
}
item.SetMetadata("TargetPath", targetPath);
}
items.Add(item);
}
return items;
}
private static string TryGetTargetPath(string file)
{
var foldersAndFile = file.Split('\\').ToArray();
#if TODO // Not sure if we support culture specific directories yet...
for (int i = foldersAndFile.Length - 1; i > -1; i--)
{
if (CultureStringUtilities.IsValidCultureString(foldersAndFile[i]))
{
return Path.Combine(foldersAndFile.Skip(i).ToArray());
}
}
#endif
// There is no culture-specific directory, so it'll go in the root
return null;
}
private void GetReferencedPackages(JObject lockFile)
{
var projectFileDependencyGroups = (JObject)lockFile["projectFileDependencyGroups"];
// find whichever target we will have selected
var actualTarget = GetTargetOrAttemptFallback(lockFile, needsRuntimeIdentifier: false)?.Parent as JProperty;
string targetMoniker = null;
if (actualTarget != null)
{
targetMoniker = actualTarget.Name.Split('/').FirstOrDefault();
}
foreach (var dependencyGroup in projectFileDependencyGroups.Values<JProperty>())
{
if (dependencyGroup.Name.Length == 0 || dependencyGroup.Name == targetMoniker)
{
foreach (var packageDependency in dependencyGroup.Value.Values<string>())
{
int firstSpace = packageDependency.IndexOf(' ');
if (firstSpace > -1)
{
_referencedPackages.Add(new TaskItem(packageDependency.Substring(0, firstSpace)));
}
}
}
}
}
private sealed class ExceptionFromResource : Exception
{
public string ResourceName { get; private set; }
public object[] MessageArgs { get; private set; }
public ExceptionFromResource(string resourceName, params object[] messageArgs)
{
ResourceName = resourceName;
MessageArgs = messageArgs;
}
}
private string GetNuGetPackagePath(string packageId, string packageVersion)
{
string packagesFolder = GetNuGetPackagesPath();
string packagePath = Path.Combine(packagesFolder, packageId, packageVersion);
if (!_directoryExists(packagePath))
{
throw new ExceptionFromResource("PackageFolderNotFound", packageId, packageVersion, packagesFolder);
}
return packagePath;
}
private string GetNuGetPackagesPath()
{
if (!string.IsNullOrEmpty(NuGetPackagesDirectory))
{
return NuGetPackagesDirectory;
}
string packagesFolder = Environment.GetEnvironmentVariable("NUGET_PACKAGES");
if (!string.IsNullOrEmpty(packagesFolder))
{
return packagesFolder;
}
return string.Empty;
}
/// <summary>
/// Parse the imageRuntimeVersion from COR header
/// </summary>
private void TryParseRuntimeVersion(string imageRuntimeVersion, out bool isWinMD, out bool isManaged)
{
if (!String.IsNullOrEmpty(imageRuntimeVersion))
{
isWinMD = imageRuntimeVersion.IndexOf("WindowsRuntime", StringComparison.OrdinalIgnoreCase) >= 0;
isManaged = imageRuntimeVersion.IndexOf("CLR", StringComparison.OrdinalIgnoreCase) >= 0;
}
else
{
isWinMD = isManaged = false;
}
}
/// <summary>
/// Given a path get the CLR runtime version of the file
/// </summary>
/// <param name="path">path to the file</param>
/// <returns>The CLR runtime version or empty if the path does not exist.</returns>
private static string TryGetRuntimeVersion(string path)
{
try
{
using (FileStream stream = File.OpenRead(path))
using (PEReader peReader = new PEReader(stream))
{
return peReader.GetMetadataReader().MetadataVersion;
}
}
catch (Exception)
{
return string.Empty;
}
}
}
}
| 1 | 8,542 | We shouldn't be changing this task as we are trying to break our dependency on it and switch to using the shipped version. | dotnet-buildtools | .cs |
@@ -737,8 +737,10 @@ analyze_clean_call(dcontext_t *dcontext, clean_call_info_t *cci, instr_t *where,
* unless multiple regs are able to be skipped.
* XXX: This should probably be in arch-specific clean_call_opt.c.
*/
+
if ((cci->num_simd_skip == 0 /* save all xmms */ &&
- cci->num_regs_skip == 0 /* save all regs */ &&
+ /* save all regs except 2, because XSP and XBP are commonly callee saved */
+ cci->num_regs_skip <= 2 &&
!cci->skip_save_flags) ||
always_out_of_line)
cci->out_of_line_swap = true; | 1 | /* **********************************************************
* Copyright (c) 2016 ARM Limited. All rights reserved.
* Copyright (c) 2010-2014 Google, Inc. All rights reserved.
* Copyright (c) 2010 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2000-2010 VMware, Inc. All rights reserved.
* Copyright (c) 2003-2007 Determina Corp.
* Copyright (c) 2001-2003 Massachusetts Institute of Technology
* Copyright (c) 2000-2001 Hewlett-Packard Company
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of ARM Limited nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL ARM LIMITED OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* file "clean_call_opt_shared.c" */
#include "../globals.h"
#include "arch.h"
#include "instrument.h"
#include "../hashtable.h"
#include "disassemble.h"
#include "instr_create.h"
#include "clean_call_opt.h"
/* make code more readable by shortening long lines
* we mark everything we add as a meta-instr to avoid hitting
* client asserts on setting translation fields
*/
#define POST instrlist_meta_postinsert
#define PRE instrlist_meta_preinsert
/****************************************************************************
* clean call callee info table for i#42 and i#43
*/
#ifdef CLIENT_INTERFACE
/* hashtable for storing analyzed callee info */
static generic_table_t *callee_info_table;
/* we only free callee info at exit, when callee_info_table_exit is true. */
static bool callee_info_table_exit = false;
#define INIT_HTABLE_SIZE_CALLEE 6 /* should remain small */
static void
callee_info_init(callee_info_t *ci)
{
uint i;
memset(ci, 0, sizeof(*ci));
ci->bailout = true;
/* to be conservative */
ci->has_locals = true;
ci->write_flags = true;
ci->read_flags = true;
ci->tls_used = true;
/* We use loop here and memset in analyze_callee_regs_usage later.
* We could reverse the logic and use memset to set the value below,
* but then later in analyze_callee_regs_usage, we have to use the loop.
*/
/* assuming all xmm registers are used */
ci->num_simd_used = NUM_SIMD_REGS;
for (i = 0; i < NUM_SIMD_REGS; i++)
ci->simd_used[i] = true;
for (i = 0; i < NUM_GP_REGS; i++)
ci->reg_used[i] = true;
ci->spill_reg = DR_REG_INVALID;
}
static void
callee_info_free(dcontext_t *dcontext, callee_info_t *ci)
{
if (ci->ilist != NULL) {
ASSERT(ci->opt_inline);
instrlist_clear_and_destroy(GLOBAL_DCONTEXT, ci->ilist);
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci, callee_info_t,
ACCT_CLEANCALL, PROTECTED);
}
static callee_info_t *
callee_info_create(app_pc start, uint num_args)
{
callee_info_t *info;
info = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, callee_info_t,
ACCT_CLEANCALL, PROTECTED);
callee_info_init(info);
info->start = start;
info->num_args = num_args;
return info;
}
void
callee_info_reserve_slot(callee_info_t *ci, slot_kind_t kind, reg_id_t value)
{
if (ci->slots_used < BUFFER_SIZE_ELEMENTS(ci->scratch_slots)) {
if (kind == SLOT_REG)
value = dr_reg_fixer[value];
ci->scratch_slots[ci->slots_used].kind = kind;
ci->scratch_slots[ci->slots_used].value = value;
} else {
LOG(THREAD_GET, LOG_CLEANCALL, 2,
"CLEANCALL: unable to fulfill callee_info_reserve_slot for "
"kind %d value %d\n", kind, value);
}
/* We check if slots_used > CLEANCALL_NUM_INLINE_SLOTS to detect failure. */
ci->slots_used++;
}
opnd_t
callee_info_slot_opnd(callee_info_t *ci, slot_kind_t kind, reg_id_t value)
{
uint i;
if (kind == SLOT_REG)
value = dr_reg_fixer[value];
for (i = 0; i < BUFFER_SIZE_ELEMENTS(ci->scratch_slots); i++) {
if (ci->scratch_slots[i].kind == kind &&
ci->scratch_slots[i].value == value) {
int disp = (int)offsetof(unprotected_context_t,
inline_spill_slots[i]);
return opnd_create_base_disp(ci->spill_reg, DR_REG_NULL, 0, disp,
OPSZ_PTR);
}
}
ASSERT_MESSAGE(CHKLVL_ASSERTS, "Tried to find scratch slot for value "
"without calling callee_info_reserve_slot for it", false);
return opnd_create_null();
}
static void
callee_info_table_init(void)
{
callee_info_table =
generic_hash_create(GLOBAL_DCONTEXT,
INIT_HTABLE_SIZE_CALLEE,
80 /* load factor: not perf-critical */,
HASHTABLE_SHARED | HASHTABLE_PERSISTENT,
(void(*)(dcontext_t*, void*)) callee_info_free
_IF_DEBUG("callee-info table"));
}
static void
callee_info_table_destroy(void)
{
callee_info_table_exit = true;
generic_hash_destroy(GLOBAL_DCONTEXT, callee_info_table);
}
static callee_info_t *
callee_info_table_lookup(void *callee)
{
callee_info_t *ci;
TABLE_RWLOCK(callee_info_table, read, lock);
ci = generic_hash_lookup(GLOBAL_DCONTEXT, callee_info_table,
(ptr_uint_t)callee);
TABLE_RWLOCK(callee_info_table, read, unlock);
/* We only delete the callee_info from the callee_info_table
* when destroy the table on exit, so we can keep the ci
* without holding the lock.
*/
return ci;
}
static callee_info_t *
callee_info_table_add(callee_info_t *ci)
{
callee_info_t *info;
TABLE_RWLOCK(callee_info_table, write, lock);
info = generic_hash_lookup(GLOBAL_DCONTEXT, callee_info_table,
(ptr_uint_t)ci->start);
if (info == NULL) {
info = ci;
generic_hash_add(GLOBAL_DCONTEXT, callee_info_table,
(ptr_uint_t)ci->start, (void *)ci);
} else {
/* Have one in the table, free the new one and use existing one.
* We cannot free the existing one in the table as it might be used by
* other thread without holding the lock.
* Since we assume callee should never be changed, they should have
* the same content of ci.
*/
callee_info_free(GLOBAL_DCONTEXT, ci);
}
TABLE_RWLOCK(callee_info_table, write, unlock);
return info;
}
/****************************************************************************/
/* clean call optimization code */
/* The max number of instructions the callee can have for inline. */
#define MAX_NUM_INLINE_INSTRS 20
/* Decode instruction from callee and return the next_pc to be decoded. */
static app_pc
decode_callee_instr(dcontext_t *dcontext, callee_info_t *ci, app_pc instr_pc)
{
instrlist_t *ilist = ci->ilist;
instr_t *instr;
app_pc next_pc = NULL;
instr = instr_create(GLOBAL_DCONTEXT);
instrlist_append(ilist, instr);
ci->num_instrs++;
TRY_EXCEPT(dcontext, {
next_pc = decode(GLOBAL_DCONTEXT, instr_pc, instr);
}, { /* EXCEPT */
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: crash on decoding callee instruction at: "PFX"\n",
instr_pc);
ASSERT_CURIOSITY(false && "crashed while decoding clean call");
ci->bailout = true;
return NULL;
});
if (!instr_valid(instr)) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: decoding invalid instruction at: "PFX"\n", instr_pc);
ci->bailout = true;
return NULL;
}
instr_set_translation(instr, instr_pc);
DOLOG(3, LOG_CLEANCALL, {
disassemble_with_bytes(dcontext, instr_pc, THREAD);
});
return next_pc;
}
/* check newly decoded instruction from callee */
static app_pc
check_callee_instr(dcontext_t *dcontext, callee_info_t *ci, app_pc next_pc)
{
instrlist_t *ilist = ci->ilist;
instr_t *instr;
app_pc cur_pc, tgt_pc;
if (next_pc == NULL)
return NULL;
instr = instrlist_last(ilist);
cur_pc = instr_get_app_pc(instr);
ASSERT(next_pc == cur_pc + instr_length(dcontext, instr));
if (!instr_is_cti(instr)) {
/* special instructions, bail out. */
if (instr_is_syscall(instr) || instr_is_interrupt(instr)) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: bail out on syscall or interrupt at: "PFX"\n",
cur_pc);
ci->bailout = true;
return NULL;
}
return next_pc;
} else { /* cti instruc */
if (instr_is_mbr(instr)) {
/* check if instr is return, and if return is the last instr. */
if (!instr_is_return(instr) || ci->fwd_tgt > cur_pc) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: bail out on indirect branch at: "PFX"\n",
cur_pc);
ci->bailout = true;
}
return NULL;
} else if (instr_is_call(instr)) {
tgt_pc = opnd_get_pc(instr_get_target(instr));
/* remove and destroy the call instruction */
ci->bailout = true;
instrlist_remove(ilist, instr);
instr_destroy(GLOBAL_DCONTEXT, instr);
instr = NULL;
ci->num_instrs--;
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: callee calls out at: "PFX" to "PFX"\n",
cur_pc, tgt_pc);
/* check special PIC code:
* 1. call next_pc; pop r1;
* or
* 2. call pic_func;
* and in pic_func: mov [%xsp] %r1; ret;
*/
if (INTERNAL_OPTION(opt_cleancall) >= 1)
return check_callee_instr_level2(dcontext, ci, next_pc, cur_pc, tgt_pc);
} else { /* ubr or cbr */
tgt_pc = opnd_get_pc(instr_get_target(instr));
if (tgt_pc < cur_pc) { /* backward branch */
if (tgt_pc < ci->start) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: bail out on out-of-range branch at: "PFX
"to "PFX"\n", cur_pc, tgt_pc);
ci->bailout = true;
return NULL;
} else if (ci->bwd_tgt == NULL || tgt_pc < ci->bwd_tgt) {
ci->bwd_tgt = tgt_pc;
}
} else { /* forward branch */
if (ci->fwd_tgt == NULL || tgt_pc > ci->fwd_tgt) {
ci->fwd_tgt = tgt_pc;
}
}
}
}
return next_pc;
}
static void
check_callee_ilist(dcontext_t *dcontext, callee_info_t *ci)
{
instrlist_t *ilist = ci->ilist;
instr_t *cti, *tgt, *ret;
app_pc tgt_pc;
if (!ci->bailout) {
/* no target pc of any branch is in a middle of an instruction,
* replace target pc with target instr
*/
ret = instrlist_last(ilist);
/* must be RETURN, otherwise, bugs in decode_callee_ilist */
ASSERT(instr_is_return(ret));
for (cti = instrlist_first(ilist);
cti != ret;
cti = instr_get_next(cti)) {
if (!instr_is_cti(cti))
continue;
ASSERT(!instr_is_mbr(cti));
tgt_pc = opnd_get_pc(instr_get_target(cti));
for (tgt = instrlist_first(ilist);
tgt != NULL;
tgt = instr_get_next(tgt)) {
if (tgt_pc == instr_get_app_pc(tgt))
break;
}
if (tgt == NULL) {
/* cannot find a target instruction, bail out */
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: bail out on strange internal branch at: "PFX
"to "PFX"\n", instr_get_app_pc(cti), tgt_pc);
ci->bailout = true;
break;
}
}
/* remove RETURN as we do not need it any more */
instrlist_remove(ilist, ret);
instr_destroy(GLOBAL_DCONTEXT, ret);
}
if (ci->bailout) {
instrlist_clear_and_destroy(GLOBAL_DCONTEXT, ilist);
ci->ilist = NULL;
}
}
static void
decode_callee_ilist(dcontext_t *dcontext, callee_info_t *ci)
{
app_pc cur_pc;
ci->ilist = instrlist_create(GLOBAL_DCONTEXT);
cur_pc = ci->start;
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: decoding callee starting at: "PFX"\n", ci->start);
ci->bailout = false;
while (cur_pc != NULL) {
cur_pc = decode_callee_instr(dcontext, ci, cur_pc);
cur_pc = check_callee_instr(dcontext, ci, cur_pc);
}
check_callee_ilist(dcontext, ci);
}
/* Pick a register to use as a base register pointing to our spill slots.
* We can't use a register that is:
* - DR_XSP (need a valid stack in case of fault)
* - DR_XAX (could be used for args or aflags)
* - REGPARM_0 on X64 (RDI on Lin and RCX on Win; for N>1 args, avoid REGPARM_<=N)
* - used by the callee
*/
static void
analyze_callee_pick_spill_reg(dcontext_t *dcontext, callee_info_t *ci)
{
uint i;
for (i = 0; i < NUM_GP_REGS; i++) {
reg_id_t reg = DR_REG_START_GPR + (reg_id_t)i;
if (reg == DR_REG_XSP
IF_X86(|| reg == DR_REG_XAX)
IF_X86_64(|| reg == REGPARM_0))
continue;
if (!ci->reg_used[i]) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: picking spill reg %s for callee "PFX"\n",
reg_names[reg], ci->start);
ci->spill_reg = reg;
return;
}
}
/* This won't happen unless someone increases CLEANCALL_NUM_INLINE_SLOTS or
* handles calls with more arguments. There are at least 8 GPRs, 4 spills,
* and 3 other regs we can't touch, so one will be available.
*/
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: failed to pick spill reg for callee "PFX"\n", ci->start);
/* Fail to inline by setting ci->spill_reg == DR_REG_INVALID. */
ci->spill_reg = DR_REG_INVALID;
}
static void
analyze_callee_inline(dcontext_t *dcontext, callee_info_t *ci)
{
bool opt_inline = true;
/* a set of condition checks */
if (INTERNAL_OPTION(opt_cleancall) < 2) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined: opt_cleancall: %d.\n",
ci->start, INTERNAL_OPTION(opt_cleancall));
opt_inline = false;
}
if (ci->num_instrs > MAX_NUM_INLINE_INSTRS) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined: num of instrs: %d.\n",
ci->start, ci->num_instrs);
opt_inline = false;
}
if (ci->bwd_tgt != NULL || ci->fwd_tgt != NULL) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined: has control flow.\n",
ci->start);
opt_inline = false;
}
if (ci->num_simd_used != 0) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined: uses XMM.\n",
ci->start);
opt_inline = false;
}
if (ci->tls_used) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined: accesses TLS.\n",
ci->start);
opt_inline = false;
}
if (ci->spill_reg == DR_REG_INVALID) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined:"
" unable to pick spill reg.\n", ci->start);
opt_inline = false;
}
if (!SCRATCH_ALWAYS_TLS() || ci->slots_used > CLEANCALL_NUM_INLINE_SLOTS) {
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined:"
" not enough scratch slots.\n", ci->start);
opt_inline = false;
}
if (!opt_inline) {
instrlist_clear_and_destroy(GLOBAL_DCONTEXT, ci->ilist);
ci->ilist = NULL;
return;
}
/* Check if possible for inline, and convert memory references */
if (!check_callee_ilist_inline(dcontext, ci))
opt_inline = false;
if (opt_inline) {
ci->opt_inline = true;
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" can be inlined.\n", ci->start);
} else {
/* not inline callee, so ilist is not needed. */
LOG(THREAD, LOG_CLEANCALL, 1,
"CLEANCALL: callee "PFX" cannot be inlined.\n", ci->start);
instrlist_clear_and_destroy(GLOBAL_DCONTEXT, ci->ilist);
ci->ilist = NULL;
}
}
static void
analyze_callee_ilist(dcontext_t *dcontext, callee_info_t *ci)
{
ASSERT(!ci->bailout && ci->ilist != NULL);
/* Remove frame setup and reg pushes before analyzing reg usage. */
if (INTERNAL_OPTION(opt_cleancall) >= 1) {
analyze_callee_save_reg(dcontext, ci);
}
analyze_callee_regs_usage(dcontext, ci);
if (INTERNAL_OPTION(opt_cleancall) < 1) {
instrlist_clear_and_destroy(GLOBAL_DCONTEXT, ci->ilist);
ci->ilist = NULL;
} else {
analyze_callee_tls(dcontext, ci);
analyze_callee_pick_spill_reg(dcontext, ci);
analyze_callee_inline(dcontext, ci);
}
}
static void
analyze_clean_call_regs(dcontext_t *dcontext, clean_call_info_t *cci)
{
uint i, num_regparm;
callee_info_t *info = cci->callee_info;
/* 1. xmm registers */
for (i = 0; i < NUM_SIMD_REGS; i++) {
if (info->simd_used[i]) {
cci->simd_skip[i] = false;
} else {
LOG(THREAD, LOG_CLEANCALL, 3,
"CLEANCALL: if inserting clean call "PFX
", skip saving XMM%d.\n", info->start, i);
cci->simd_skip[i] = true;
cci->num_simd_skip++;
}
}
if (INTERNAL_OPTION(opt_cleancall) > 2 && cci->num_simd_skip != NUM_SIMD_REGS)
cci->should_align = false;
/* 2. general purpose registers */
/* set regs not to be saved for clean call */
for (i = 0; i < NUM_GP_REGS; i++) {
if (info->reg_used[i]) {
cci->reg_skip[i] = false;
} else {
LOG(THREAD, LOG_CLEANCALL, 3,
"CLEANCALL: if inserting clean call "PFX
", skip saving reg %s.\n",
info->start, reg_names[DR_REG_START_GPR + (reg_id_t)i]);
cci->reg_skip[i] = true;
cci->num_regs_skip++;
}
}
/* we need save/restore rax if save aflags because rax is used */
if (!cci->skip_save_flags && cci->reg_skip[0]) {
LOG(THREAD, LOG_CLEANCALL, 3,
"CLEANCALL: if inserting clean call "PFX
", cannot skip saving reg xax.\n", info->start);
cci->reg_skip[0] = false;
cci->num_regs_skip++;
}
/* i#987: args are passed via regs in 64-bit, which will clober those regs,
* so we should not skip any regs that are used for arg passing.
* XXX: we do not support args passing via XMMs,
* see docs for dr_insert_clean_call
* XXX: we can elminate the arg passing instead since it is not used
* if marked for skip. However, we have to handle cases like some args
* are used and some are not.
*/
num_regparm = cci->num_args < NUM_REGPARM ? cci->num_args : NUM_REGPARM;
for (i = 0; i < num_regparm; i++) {
if (cci->reg_skip[regparms[i] - DR_REG_START_GPR]) {
LOG(THREAD, LOG_CLEANCALL, 3,
"CLEANCALL: if inserting clean call "PFX
", cannot skip saving reg %s due to param passing.\n",
info->start, reg_names[regparms[i]]);
cci->reg_skip[regparms[i] - DR_REG_START_GPR] = false;
cci->num_regs_skip--;
/* We cannot call callee_info_reserve_slot for reserving slot
* on inlining the callee here, because we are in clean call
* analysis not callee anaysis.
* Also the reg for arg passing should be first handled in
* analyze_callee_regs_usage on callee_info creation.
* If we still reach here, it means the number args changes
* for the same clean call, so we will not inline it and do not
* need call callee_info_reserve_slot either.
*/
}
}
}
static void
analyze_clean_call_args(dcontext_t *dcontext,
clean_call_info_t *cci,
opnd_t *args)
{
uint i, j, num_regparm;
num_regparm = cci->num_args < NUM_REGPARM ? cci->num_args : NUM_REGPARM;
/* If a param uses a reg, DR need restore register value, which assumes
* the full context switch with priv_mcontext_t layout,
* in which case we need keep priv_mcontext_t layout.
*/
cci->save_all_regs = false;
for (i = 0; i < cci->num_args; i++) {
if (opnd_is_reg(args[i]))
cci->save_all_regs = true;
for (j = 0; j < num_regparm; j++) {
if (opnd_uses_reg(args[i], regparms[j]))
cci->save_all_regs = true;
}
}
/* We only set cci->reg_skip all to false later if we fail to inline. We
* only need to preserve the layout if we're not inlining.
*/
}
static bool
analyze_clean_call_inline(dcontext_t *dcontext, clean_call_info_t *cci)
{
callee_info_t *info = cci->callee_info;
bool opt_inline = true;
if (INTERNAL_OPTION(opt_cleancall) <= 1) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: fail inlining clean call "PFX", opt_cleancall %d.\n",
info->start, INTERNAL_OPTION(opt_cleancall));
opt_inline = false;
}
if (cci->num_args > 1) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: fail inlining clean call "PFX", number of args %d > 1.\n",
info->start, cci->num_args);
opt_inline = false;
}
if (cci->num_args > info->num_args) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: fail inlining clean call "PFX
", number of args increases.\n",
info->start);
opt_inline = false;
}
if (cci->save_fpstate) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: fail inlining clean call "PFX", saving fpstate.\n",
info->start);
opt_inline = false;
}
if (!info->opt_inline) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: fail inlining clean call "PFX", complex callee.\n",
info->start);
opt_inline = false;
}
if (info->slots_used > CLEANCALL_NUM_INLINE_SLOTS) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: fail inlining clean call "PFX", used %d slots, "
"> %d available slots.\n",
info->start, info->slots_used, CLEANCALL_NUM_INLINE_SLOTS);
opt_inline = false;
}
if (!opt_inline) {
if (cci->save_all_regs) {
LOG(THREAD, LOG_CLEANCALL, 2,
"CLEANCALL: inserting clean call "PFX
", save all regs in priv_mcontext_t layout.\n",
info->start);
cci->num_regs_skip = 0;
memset(cci->reg_skip, 0, sizeof(bool) * NUM_GP_REGS);
cci->should_align = true;
} else {
uint i;
for (i = 0; i < NUM_GP_REGS; i++) {
if (!cci->reg_skip[i] && info->callee_save_regs[i]) {
cci->reg_skip[i] = true;
cci->num_regs_skip++;
}
}
}
if (cci->num_simd_skip == NUM_SIMD_REGS) {
STATS_INC(cleancall_simd_skipped);
}
if (cci->skip_save_flags) {
STATS_INC(cleancall_aflags_save_skipped);
}
if (cci->skip_clear_flags) {
STATS_INC(cleancall_aflags_clear_skipped);
}
} else {
cci->ilist = instrlist_clone(dcontext, info->ilist);
}
return opt_inline;
}
bool
analyze_clean_call(dcontext_t *dcontext, clean_call_info_t *cci, instr_t *where,
void *callee, bool save_fpstate, bool always_out_of_line,
uint num_args, opnd_t *args)
{
callee_info_t *ci;
/* by default, no inline optimization */
bool should_inline = false;
CLIENT_ASSERT(callee != NULL, "Clean call target is NULL");
/* 1. init clean_call_info */
clean_call_info_init(cci, callee, save_fpstate, num_args);
/* 2. check runtime optimization options */
if (INTERNAL_OPTION(opt_cleancall) > 0) {
/* 3. search if callee was analyzed before */
ci = callee_info_table_lookup(callee);
/* 4. this callee is not seen before */
if (ci == NULL) {
STATS_INC(cleancall_analyzed);
LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: analyze callee "PFX"\n", callee);
/* 4.1. create func_info */
ci = callee_info_create((app_pc)callee, num_args);
/* 4.2. decode the callee */
decode_callee_ilist(dcontext, ci);
/* 4.3. analyze the instrlist */
if (ci->bailout) {
callee_info_init(ci);
ci->start = (app_pc)callee;
} else
analyze_callee_ilist(dcontext, ci);
/* 4.4. add info into callee list */
ci = callee_info_table_add(ci);
}
cci->callee_info = ci;
if (!ci->bailout) {
/* 5. aflags optimization analysis */
analyze_clean_call_aflags(dcontext, cci, where);
/* 6. register optimization analysis */
analyze_clean_call_regs(dcontext, cci);
/* 7. check arguments */
analyze_clean_call_args(dcontext, cci, args);
/* 8. inline optimization analysis */
should_inline = analyze_clean_call_inline(dcontext, cci);
}
}
# ifdef X86
/* 9. derived fields */
/* XXX: For x64, skipping a single reg or flags still results in a huge
* code sequence to put in place: we may want to still use out-of-line
* unless multiple regs are able to be skipped.
* XXX: This should probably be in arch-specific clean_call_opt.c.
*/
if ((cci->num_simd_skip == 0 /* save all xmms */ &&
cci->num_regs_skip == 0 /* save all regs */ &&
!cci->skip_save_flags) ||
always_out_of_line)
cci->out_of_line_swap = true;
# endif
return should_inline;
}
void
insert_inline_clean_call(dcontext_t *dcontext, clean_call_info_t *cci,
instrlist_t *ilist, instr_t *where, opnd_t *args)
{
instrlist_t *callee = cci->ilist;
instr_t *instr;
ASSERT(cci->ilist != NULL);
ASSERT(SCRATCH_ALWAYS_TLS());
/* 0. update stats */
STATS_INC(cleancall_inlined);
/* 1. save registers */
insert_inline_reg_save(dcontext, cci, ilist, where, args);
/* 2. setup parameters */
insert_inline_arg_setup(dcontext, cci, ilist, where, args);
/* 3. inline clean call ilist */
instr = instrlist_first(callee);
while (instr != NULL) {
instrlist_remove(callee, instr);
/* XXX: if client has a xl8 handler we assume it will handle any faults
* in the callee (which should already have a translation set to the
* callee): and if not we assume there will be no such faults.
* We can't have a translation with no handler.
*/
if (IF_CLIENT_INTERFACE_ELSE(!dr_xl8_hook_exists(), true))
instr_set_translation(instr, NULL);
instrlist_meta_preinsert(ilist, where, instr);
instr = instrlist_first(callee);
}
instrlist_destroy(dcontext, callee);
cci->ilist = NULL;
/* 4. restore registers */
insert_inline_reg_restore(dcontext, cci, ilist, where);
/* XXX: the inlined code looks like this
* mov %rax -> %gs:0x00
* mov %rdi -> %gs:0x01
* mov $0x00000003 -> %edi
* mov <rel> 0x0000000072200c00 -> %rax
* movsxd %edi -> %rdi
* add %rdi (%rax) -> (%rax)
* mov %gs:0x00 -> %rax
* mov %gs:0x01 -> %rdi
* ...
* we can do some constant propagation optimization here,
* leave it for higher optimization level.
*/
}
void
clean_call_opt_init(void)
{
callee_info_init(&default_callee_info);
callee_info_table_init();
}
void
clean_call_opt_exit(void)
{
callee_info_table_destroy();
}
#else /* CLIENT_INTERFACE */
/* Stub implementation ifndef CLIENT_INTERFACE. Initializes cci and returns
* false for no inlining. We use dr_insert_clean_call internally, but we don't
* need it to do inlining.
*/
bool
analyze_clean_call(dcontext_t *dcontext, clean_call_info_t *cci, instr_t *where,
void *callee, bool save_fpstate, bool always_out_of_line,
uint num_args, opnd_t *args)
{
CLIENT_ASSERT(callee != NULL, "Clean call target is NULL");
/* 1. init clean_call_info */
clean_call_info_init(cci, callee, save_fpstate, num_args);
return false;
}
#endif /* CLIENT_INTERFACE */
| 1 | 11,614 | Shouldn't we have a low bar for generating out-of-line context switch, i.e., if we need save more than n (3?) simd or m (4) gprs we should go out-of-line? And it should be || instead &&. It seems the bar is still very high after this change. | DynamoRIO-dynamorio | c |
@@ -23,6 +23,7 @@ import (
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/metric/registry"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
)
// This file contains the forwarding implementation of metric.Provider | 1 | // Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"context"
"sync"
"sync/atomic"
"unsafe"
"go.opentelemetry.io/otel/api/kv"
"go.opentelemetry.io/otel/api/metric"
"go.opentelemetry.io/otel/api/metric/registry"
)
// This file contains the forwarding implementation of metric.Provider
// used as the default global instance. Metric events using instruments
// provided by this implementation are no-ops until the first Meter
// implementation is set as the global provider.
//
// The implementation here uses Mutexes to maintain a list of active
// Meters in the Provider and Instruments in each Meter, under the
// assumption that these interfaces are not performance-critical.
//
// We have the invariant that setDelegate() will be called before a
// new metric.Provider implementation is registered as the global
// provider. Mutexes in the Provider and Meters ensure that each
// instrument has a delegate before the global provider is set.
//
// Bound instrument operations are implemented by delegating to the
// instrument after it is registered, with a sync.Once initializer to
// protect against races with Release().
//
// Metric uniqueness checking is implemented by calling the exported
// methods of the api/metric/registry package.
type meterProvider struct {
delegate metric.Provider
// lock protects `delegate` and `meters`.
lock sync.Mutex
// meters maintains a unique entry for every named Meter
// that has been registered through the global instance.
meters map[string]*meterEntry
}
type meterImpl struct {
delegate unsafe.Pointer // (*metric.MeterImpl)
lock sync.Mutex
syncInsts []*syncImpl
asyncInsts []*asyncImpl
}
type meterEntry struct {
unique metric.MeterImpl
impl meterImpl
}
type instrument struct {
descriptor metric.Descriptor
}
type syncImpl struct {
delegate unsafe.Pointer // (*metric.SyncImpl)
instrument
}
type asyncImpl struct {
delegate unsafe.Pointer // (*metric.AsyncImpl)
instrument
runner metric.AsyncRunner
}
// SyncImpler is implemented by all of the sync metric
// instruments.
type SyncImpler interface {
SyncImpl() metric.SyncImpl
}
// AsyncImpler is implemented by all of the async
// metric instruments.
type AsyncImpler interface {
AsyncImpl() metric.AsyncImpl
}
type syncHandle struct {
delegate unsafe.Pointer // (*metric.HandleImpl)
inst *syncImpl
labels []kv.KeyValue
initialize sync.Once
}
var _ metric.Provider = &meterProvider{}
var _ metric.MeterImpl = &meterImpl{}
var _ metric.InstrumentImpl = &syncImpl{}
var _ metric.BoundSyncImpl = &syncHandle{}
var _ metric.AsyncImpl = &asyncImpl{}
func (inst *instrument) Descriptor() metric.Descriptor {
return inst.descriptor
}
// Provider interface and delegation
func newMeterProvider() *meterProvider {
return &meterProvider{
meters: map[string]*meterEntry{},
}
}
func (p *meterProvider) setDelegate(provider metric.Provider) {
p.lock.Lock()
defer p.lock.Unlock()
p.delegate = provider
for name, entry := range p.meters {
entry.impl.setDelegate(name, provider)
}
p.meters = nil
}
func (p *meterProvider) Meter(name string) metric.Meter {
p.lock.Lock()
defer p.lock.Unlock()
if p.delegate != nil {
return p.delegate.Meter(name)
}
entry, ok := p.meters[name]
if !ok {
entry = &meterEntry{}
entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
p.meters[name] = entry
}
return metric.WrapMeterImpl(entry.unique, name)
}
// Meter interface and delegation
func (m *meterImpl) setDelegate(name string, provider metric.Provider) {
m.lock.Lock()
defer m.lock.Unlock()
d := new(metric.MeterImpl)
*d = provider.Meter(name).MeterImpl()
m.delegate = unsafe.Pointer(d)
for _, inst := range m.syncInsts {
inst.setDelegate(*d)
}
m.syncInsts = nil
for _, obs := range m.asyncInsts {
obs.setDelegate(*d)
}
m.asyncInsts = nil
}
func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewSyncInstrument(desc)
}
inst := &syncImpl{
instrument: instrument{
descriptor: desc,
},
}
m.syncInsts = append(m.syncInsts, inst)
return inst, nil
}
// Synchronous delegation
func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.SyncImpl)
var err error
*implPtr, err = d.NewSyncInstrument(inst.descriptor)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
}
func (inst *syncImpl) Implementation() interface{} {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return inst
}
func (inst *syncImpl) Bind(labels []kv.KeyValue) metric.BoundSyncImpl {
if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
return (*implPtr).Bind(labels)
}
return &syncHandle{
inst: inst,
labels: labels,
}
}
func (bound *syncHandle) Unbind() {
bound.initialize.Do(func() {})
implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
if implPtr == nil {
return
}
(*implPtr).Unbind()
}
// Async delegation
func (m *meterImpl) NewAsyncInstrument(
desc metric.Descriptor,
runner metric.AsyncRunner,
) (metric.AsyncImpl, error) {
m.lock.Lock()
defer m.lock.Unlock()
if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
return (*meterPtr).NewAsyncInstrument(desc, runner)
}
inst := &asyncImpl{
instrument: instrument{
descriptor: desc,
},
runner: runner,
}
m.asyncInsts = append(m.asyncInsts, inst)
return inst, nil
}
func (obs *asyncImpl) Implementation() interface{} {
if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
return (*implPtr).Implementation()
}
return obs
}
func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
implPtr := new(metric.AsyncImpl)
var err error
*implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
if err != nil {
// TODO: There is no standard way to deliver this error to the user.
// See https://github.com/open-telemetry/opentelemetry-go/issues/514
// Note that the default SDK will not generate any errors yet, this is
// only for added safety.
panic(err)
}
atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
}
// Metric updates
func (m *meterImpl) RecordBatch(ctx context.Context, labels []kv.KeyValue, measurements ...metric.Measurement) {
if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
(*delegatePtr).RecordBatch(ctx, labels, measurements...)
}
}
func (inst *syncImpl) RecordOne(ctx context.Context, number metric.Number, labels []kv.KeyValue) {
if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
(*instPtr).RecordOne(ctx, number, labels)
}
}
// Bound instrument initialization
func (bound *syncHandle) RecordOne(ctx context.Context, number metric.Number) {
instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
if instPtr == nil {
return
}
var implPtr *metric.BoundSyncImpl
bound.initialize.Do(func() {
implPtr = new(metric.BoundSyncImpl)
*implPtr = (*instPtr).Bind(bound.labels)
atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
})
if implPtr == nil {
implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
}
// This may still be nil if instrument was created and bound
// without a delegate, then the instrument was set to have a
// delegate and unbound.
if implPtr == nil {
return
}
(*implPtr).RecordOne(ctx, number)
}
func AtomicFieldOffsets() map[string]uintptr {
return map[string]uintptr{
"meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
"meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate),
"syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate),
"asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate),
"syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate),
}
}
| 1 | 12,520 | Should the API have a dependency on the SDK? | open-telemetry-opentelemetry-go | go |
@@ -1475,7 +1475,7 @@ describe('Change Streams', function() {
}
});
- it('should resume piping of Change Streams when a resumable error is encountered', {
+ it.skip('should resume piping of Change Streams when a resumable error is encountered', {
metadata: {
requires: {
generators: true, | 1 | 'use strict';
const path = require('path');
const assert = require('assert');
const Transform = require('stream').Transform;
const MongoNetworkError = require('../../lib/core').MongoNetworkError;
const setupDatabase = require('./shared').setupDatabase;
const withClient = require('./shared').withClient;
const withCursor = require('./shared').withCursor;
const delay = require('./shared').delay;
const co = require('co');
const mock = require('mongodb-mock-server');
const chai = require('chai');
const expect = chai.expect;
const sinon = require('sinon');
const fs = require('fs');
const crypto = require('crypto');
const BSON = require('bson');
const Long = BSON.Long;
chai.use(require('chai-subset'));
function withChangeStream(dbName, collectionName, callback) {
if (arguments.length === 1) {
callback = dbName;
dbName = undefined;
} else if (arguments.length === 2) {
callback = collectionName;
collectionName = dbName;
dbName = undefined;
}
dbName = dbName || 'changestream_integration_test';
collectionName = collectionName || 'test';
return withClient((client, done) => {
const db = client.db(dbName);
db.createCollection(collectionName, { w: 'majority' }, (err, collection) => {
if (err) return done(err);
withCursor(
collection.watch(),
(cursor, done) => callback(collection, cursor, done),
err => collection.drop(dropErr => done(err || dropErr))
);
});
});
}
/**
* Triggers a fake resumable error on a change stream
*
* @param {ChangeStream} changeStream
* @param {number} [delay] optional delay before triggering error
* @param {Function} onClose callback when cursor closed due this error
*/
function triggerResumableError(changeStream, delay, onClose) {
if (arguments.length === 2) {
onClose = delay;
delay = undefined;
}
const stub = sinon.stub(changeStream.cursor, 'close');
stub.callsFake(function() {
stub.wrappedMethod.call(this);
stub.restore();
onClose();
});
function triggerError() {
changeStream.cursor.emit('error', new MongoNetworkError('fake error'));
}
if (delay != null) {
setTimeout(triggerError, delay);
return;
}
triggerError();
}
/**
* Waits for a change stream to start
*
* @param {ChangeStream} changeStream
* @param {function} callback
*/
function waitForStarted(changeStream, callback) {
const timeout = setTimeout(() => {
throw new Error('Change stream never started');
}, 2000);
changeStream.cursor.once('init', () => {
clearTimeout(timeout);
callback();
});
}
/**
* Iterates the next discrete batch of a change stream non-eagerly. This
* will return `null` if the next bach is empty, rather than waiting forever
* for a non-empty batch.
*
* @param {ChangeStream} changeStream
* @param {function} callback
*/
function tryNext(changeStream, callback) {
let complete = false;
function done(err, result) {
if (complete) return;
// if the arity is 1 then this a callback for `more`
if (arguments.length === 1) {
result = err;
const batch = result.cursor.firstBatch || result.cursor.nextBatch;
if (batch.length === 0) {
complete = true;
callback(null, null);
}
return;
}
// otherwise, this a normal response to `next`
complete = true;
changeStream.removeListener('more', done);
if (err) return callback(err);
callback(err, result);
}
// race the two requests
changeStream.next(done);
changeStream.cursor.once('more', done);
}
/**
* Exhausts a change stream aggregating all responses until the first
* empty batch into a returned array of events.
*
* @param {ChangeStream} changeStream
* @param {Function|Array} bag
* @param {Function} [callback]
*/
function exhaust(changeStream, bag, callback) {
if (typeof bag === 'function') {
callback = bag;
bag = [];
}
tryNext(changeStream, (err, doc) => {
if (err) return callback(err);
if (doc === null) return callback(undefined, bag);
bag.push(doc);
exhaust(changeStream, bag, callback);
});
}
// Define the pipeline processing changes
const pipeline = [
{ $addFields: { addedField: 'This is a field added using $addFields' } },
{ $project: { documentKey: false } },
{ $addFields: { comment: 'The documentKey field has been projected out of this document.' } }
];
describe('Change Streams', function() {
before(function() {
return setupDatabase(this.configuration, ['integration_tests']);
});
beforeEach(function() {
const configuration = this.configuration;
const client = configuration.newClient();
return client
.connect()
.then(() => {
const db = client.db('integration_tests');
return db.createCollection('test');
})
.then(
() => client.close(),
() => client.close()
);
});
afterEach(() => mock.cleanup());
it('should close the listeners after the cursor is closed', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
let closed = false;
function close(err) {
if (closed) return;
closed = true;
done(err);
}
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const coll = client.db('integration_tests').collection('listenertest');
const changeStream = coll.watch();
this.defer(() => changeStream.close());
changeStream.on('change', () => {
const internalCursor = changeStream.cursor;
expect(internalCursor.listenerCount('data')).to.equal(1);
changeStream.close(err => {
expect(internalCursor.listenerCount('data')).to.equal(0);
close(err);
});
});
waitForStarted(changeStream, () => this.defer(coll.insertOne({ x: 1 })));
changeStream.on('error', err => close(err));
});
}
});
class EventCollector {
constructor(obj, events, options) {
this._events = [];
this._timeout = options ? options.timeout : 5000;
events.forEach(eventName => {
this._events[eventName] = [];
obj.on(eventName, event => this._events[eventName].push(event));
});
}
waitForEvent(eventName, count, callback) {
if (typeof count === 'function') {
callback = count;
count = 1;
}
waitForEventImpl(this, Date.now(), eventName, count, callback);
}
reset(eventName) {
if (eventName == null) {
Object.keys(this._events).forEach(eventName => {
this._events[eventName] = [];
});
return;
}
if (this._events[eventName] == null) {
throw new TypeError(`invalid event name "${eventName}" specified for reset`);
}
this._events[eventName] = [];
}
}
function waitForEventImpl(collector, start, eventName, count, callback) {
const events = collector._events[eventName];
if (events.length >= count) {
return callback(undefined, events);
}
if (Date.now() - start >= collector._timeout) {
return callback(new Error(`timed out waiting for event "${eventName}"`));
}
setTimeout(() => waitForEventImpl(collector, start, eventName, count, callback), 10);
}
it('should create a ChangeStream on a collection and emit `change` events', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const collection = client.db('integration_tests').collection('docsDataEvent');
const changeStream = collection.watch(pipeline);
this.defer(() => changeStream.close());
const collector = new EventCollector(changeStream, ['init', 'change']);
waitForStarted(changeStream, () => {
// Trigger the first database event
collection.insertOne({ d: 4 }, err => {
expect(err).to.not.exist;
// Trigger the second database event
collection.updateOne({ d: 4 }, { $inc: { d: 2 } }, err => {
expect(err).to.not.exist;
collector.waitForEvent('change', 2, (err, changes) => {
expect(err).to.not.exist;
expect(changes).to.have.length(2);
expect(changes[0]).to.not.have.property('documentKey');
expect(changes[0]).to.containSubset({
operationType: 'insert',
fullDocument: { d: 4 },
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
comment: 'The documentKey field has been projected out of this document.'
});
expect(changes[1]).to.containSubset({
operationType: 'update',
updateDescription: {
updatedFields: { d: 6 }
}
});
done();
});
});
});
});
});
}
});
it(
'should create a ChangeStream on a collection and get change events through imperative callback form',
{
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const collection = client.db('integration_tests').collection('docsCallback');
const changeStream = collection.watch(pipeline);
this.defer(() => changeStream.close());
// Fetch the change notification
changeStream.hasNext((err, hasNext) => {
expect(err).to.not.exist;
assert.equal(true, hasNext);
changeStream.next((err, change) => {
expect(err).to.not.exist;
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.e, 5);
assert.equal(change.ns.db, 'integration_tests');
assert.equal(change.ns.coll, 'docsCallback');
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
// Trigger the second database event
collection.updateOne({ e: 5 }, { $inc: { e: 2 } }, err => {
expect(err).to.not.exist;
changeStream.hasNext((err, hasNext) => {
expect(err).to.not.exist;
assert.equal(true, hasNext);
changeStream.next((err, change) => {
expect(err).to.not.exist;
assert.equal(change.operationType, 'update');
done();
});
});
});
});
});
// Trigger the first database event
// NOTE: this needs to be triggered after the changeStream call so
// that the cursor is run
this.defer(collection.insertOne({ e: 5 }));
});
}
}
);
it('should support creating multiple simultaneous ChangeStreams', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const collection1 = database.collection('simultaneous1');
const collection2 = database.collection('simultaneous2');
const changeStream1 = collection1.watch([{ $addFields: { changeStreamNumber: 1 } }]);
this.defer(() => changeStream1.close());
const changeStream2 = collection2.watch([{ $addFields: { changeStreamNumber: 2 } }]);
this.defer(() => changeStream2.close());
const changeStream3 = collection2.watch([{ $addFields: { changeStreamNumber: 3 } }]);
this.defer(() => changeStream3.close());
setTimeout(() => {
this.defer(collection1.insert({ a: 1 }).then(() => collection2.insert({ a: 1 })));
}, 50);
Promise.resolve()
.then(() =>
Promise.all([changeStream1.hasNext(), changeStream2.hasNext(), changeStream3.hasNext()])
)
.then(function(hasNexts) {
// Check all the Change Streams have a next item
assert.ok(hasNexts[0]);
assert.ok(hasNexts[1]);
assert.ok(hasNexts[2]);
return Promise.all([changeStream1.next(), changeStream2.next(), changeStream3.next()]);
})
.then(function(changes) {
// Check the values of the change documents are correct
assert.equal(changes[0].operationType, 'insert');
assert.equal(changes[1].operationType, 'insert');
assert.equal(changes[2].operationType, 'insert');
assert.equal(changes[0].fullDocument.a, 1);
assert.equal(changes[1].fullDocument.a, 1);
assert.equal(changes[2].fullDocument.a, 1);
assert.equal(changes[0].ns.db, 'integration_tests');
assert.equal(changes[1].ns.db, 'integration_tests');
assert.equal(changes[2].ns.db, 'integration_tests');
assert.equal(changes[0].ns.coll, 'simultaneous1');
assert.equal(changes[1].ns.coll, 'simultaneous2');
assert.equal(changes[2].ns.coll, 'simultaneous2');
assert.equal(changes[0].changeStreamNumber, 1);
assert.equal(changes[1].changeStreamNumber, 2);
assert.equal(changes[2].changeStreamNumber, 3);
})
.then(
() => done(),
err => done(err)
);
});
}
});
it('should properly close ChangeStream cursor', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database.collection('changeStreamCloseTest').watch(pipeline);
this.defer(() => changeStream.close());
assert.equal(changeStream.isClosed(), false);
assert.equal(changeStream.cursor.isClosed(), false);
changeStream.close(err => {
expect(err).to.not.exist;
// Check the cursor is closed
assert.equal(changeStream.isClosed(), true);
assert.ok(!changeStream.cursor);
done();
});
});
}
});
it(
'should error when attempting to create a ChangeStream with a forbidden aggregation pipeline stage',
{
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const forbiddenStage = {};
const forbiddenStageName = '$alksdjfhlaskdfjh';
forbiddenStage[forbiddenStageName] = 2;
const database = client.db('integration_tests');
const changeStream = database.collection('forbiddenStageTest').watch([forbiddenStage]);
this.defer(() => changeStream.close());
changeStream.next(err => {
assert.ok(err);
assert.ok(err.message);
assert.ok(
err.message.indexOf(`Unrecognized pipeline stage name: '${forbiddenStageName}'`) > -1
);
done();
});
});
}
}
);
it('should cache the change stream resume token using imperative callback form', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database.collection('cacheResumeTokenCallback').watch(pipeline);
this.defer(() => changeStream.close());
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(database.collection('cacheResumeTokenCallback').insert({ b: 2 }));
});
// Fetch the change notification
changeStream.hasNext(function(err, hasNext) {
expect(err).to.not.exist;
assert.equal(true, hasNext);
changeStream.next(function(err, change) {
expect(err).to.not.exist;
assert.deepEqual(changeStream.resumeToken, change._id);
done();
});
});
});
}
});
it('should cache the change stream resume token using promises', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function() {
const configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(() => {
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database.collection('cacheResumeTokenPromise').watch(pipeline);
this.defer(() => changeStream.close());
// trigger the first database event
waitForStarted(changeStream, () => {
this.defer(database.collection('cacheResumeTokenPromise').insert({ b: 2 }));
});
return changeStream
.hasNext()
.then(hasNext => {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(change => {
assert.deepEqual(changeStream.resumeToken, change._id);
});
});
}
});
it('should cache the change stream resume token using event listeners', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db('integration_tests');
const changeStream = db.collection('cacheResumeTokenListener').watch(pipeline);
this.defer(() => changeStream.close());
const collector = new EventCollector(changeStream, ['change']);
waitForStarted(changeStream, () => {
// Trigger the first database event
db.collection('cacheResumeTokenListener').insert({ b: 2 }, (err, result) => {
expect(err).to.not.exist;
expect(result)
.property('insertedCount')
.to.equal(1);
collector.waitForEvent('change', (err, events) => {
expect(err).to.not.exist;
expect(changeStream)
.property('resumeToken')
.to.eql(events[0]._id);
done();
});
});
});
});
}
});
it(
'should error if resume token projected out of change stream document using imperative callback form',
{
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database
.collection('resumetokenProjectedOutCallback')
.watch([{ $project: { _id: false } }]);
this.defer(() => changeStream.close());
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(database.collection('resumetokenProjectedOutCallback').insert({ b: 2 }));
});
// Fetch the change notification
changeStream.next(err => {
expect(err).to.exist;
done();
});
});
}
}
);
it('should error if resume token projected out of change stream document using event listeners', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db('integration_tests');
const collection = db.collection('resumetokenProjectedOutListener');
const changeStream = collection.watch([{ $project: { _id: false } }]);
this.defer(() => changeStream.close());
const collector = new EventCollector(changeStream, ['change', 'error']);
waitForStarted(changeStream, () => {
collection.insert({ b: 2 }, (err, result) => {
expect(err).to.not.exist;
expect(result)
.property('insertedCount')
.to.equal(1);
collector.waitForEvent('error', (err, events) => {
expect(err).to.not.exist;
expect(events).to.have.lengthOf.at.least(1);
done();
});
});
});
});
}
});
it('should invalidate change stream on collection rename using event listeners', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database
.collection('invalidateListeners')
.watch(pipeline, { batchSize: 1 });
this.defer(() => changeStream.close());
// Attach first event listener
changeStream.once('change', change => {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, 1);
assert.equal(change.ns.db, 'integration_tests');
assert.equal(change.ns.coll, 'invalidateListeners');
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
// Attach second event listener
changeStream.on('change', change => {
if (change.operationType === 'invalidate') {
// now expect the server to close the stream
changeStream.once('close', () => done());
}
});
// Trigger the second database event
setTimeout(() => {
this.defer(
database.collection('invalidateListeners').rename('renamedDocs', { dropTarget: true })
);
}, 250);
});
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(database.collection('invalidateListeners').insert({ a: 1 }));
});
});
}
});
it('should invalidate change stream on database drop using imperative callback form', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database.collection('invalidateCallback').watch(pipeline);
this.defer(() => changeStream.close());
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(database.collection('invalidateCallback').insert({ a: 1 }));
});
changeStream.next((err, change) => {
expect(err).to.not.exist;
assert.equal(change.operationType, 'insert');
database.dropDatabase(err => {
expect(err).to.not.exist;
function completeStream() {
changeStream.hasNext(function(err, hasNext) {
expect(err).to.not.exist;
assert.equal(hasNext, false);
assert.equal(changeStream.isClosed(), true);
done();
});
}
function checkInvalidate() {
changeStream.next(function(err, change) {
expect(err).to.not.exist;
// Check the cursor invalidation has occured
if (change.operationType === 'invalidate') {
return completeStream();
}
checkInvalidate();
});
}
checkInvalidate();
});
});
});
}
});
it('should invalidate change stream on collection drop using promises', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
function checkInvalidate(changeStream) {
return changeStream.next().then(change => {
if (change.operationType === 'invalidate') {
return Promise.resolve();
}
return checkInvalidate(changeStream);
});
}
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const changeStream = database
.collection('invalidateCollectionDropPromises')
.watch(pipeline);
this.defer(() => changeStream.close());
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(database.collection('invalidateCollectionDropPromises').insert({ a: 1 }));
});
return changeStream
.next()
.then(function(change) {
assert.equal(change.operationType, 'insert');
return database.dropCollection('invalidateCollectionDropPromises');
})
.then(() => checkInvalidate(changeStream))
.then(() => changeStream.hasNext())
.then(function(hasNext) {
assert.equal(hasNext, false);
assert.equal(changeStream.isClosed(), true);
done();
});
});
}
});
it.skip('should return MongoNetworkError after first retry attempt fails using promises', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.6'
}
},
test: function(done) {
const configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
// Contain mock server
let primaryServer = null;
// Default message fields
const defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
co(function*() {
primaryServer = yield mock.createServer(32000, 'localhost');
primaryServer.setMessageHandler(request => {
const doc = request.document;
if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
},
defaultFields
)
);
} else {
// kill the connection, simulating a network error
request.connection.destroy();
}
});
});
const mockServerURL = 'mongodb://localhost:32000/';
const client = configuration.newClient(mockServerURL);
client.connect((err, client) => {
expect(err).to.not.exist;
const database = client.db('integration_tests');
const collection = database.collection('MongoNetworkErrorTestPromises');
const changeStream = collection.watch(pipeline);
return changeStream
.next()
.then(function() {
// We should never execute this line because calling changeStream.next() should throw an error
throw new Error(
'ChangeStream.next() returned a change document but it should have returned a MongoNetworkError'
);
})
.catch(err => {
assert.ok(
err instanceof MongoNetworkError,
'error was not instance of MongoNetworkError'
);
assert.ok(err.message);
assert.ok(err.message.indexOf('closed') > -1);
changeStream.close(err => {
expect(err).to.not.exist;
changeStream.close();
// running = false;
primaryServer.destroy();
client.close(() => mock.cleanup(() => done()));
});
})
.catch(err => done(err));
});
}
});
it.skip('should return MongoNetworkError after first retry attempt fails using callbacks', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.6'
}
},
test: function(done) {
const configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
// Contain mock server
let primaryServer = null;
// Default message fields
const defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
// Die
let die = false;
co(function*() {
primaryServer = yield mock.createServer(32000, 'localhost');
primaryServer.setMessageHandler(request => {
const doc = request.document;
if (die) {
request.connection.destroy();
} else if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
},
defaultFields
)
);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
// Do not respond to other requests
});
});
const client = configuration.newClient('mongodb://localhost:32000/', {
socketTimeoutMS: 500,
validateOptions: true
});
client.connect((err, client) => {
expect(err).to.not.exist;
const database = client.db('integration_tests');
const collection = database.collection('MongoNetworkErrorTestPromises');
const changeStream = collection.watch(pipeline);
changeStream.next(function(err, change) {
assert.ok(err instanceof MongoNetworkError);
assert.ok(err.message);
assert.ok(err.message.indexOf('timed out') > -1);
assert.equal(
change,
null,
'ChangeStream.next() returned a change document but it should have returned a MongoNetworkError'
);
changeStream.close(err => {
expect(err).to.not.exist;
changeStream.close();
client.close(() => mock.cleanup(() => done()));
});
});
});
}
});
it.skip('should resume Change Stream when a resumable error is encountered', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.6'
}
},
test: function(done) {
const configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
const Timestamp = configuration.require.Timestamp;
const Long = configuration.require.Long;
// Contain mock server
let primaryServer = null;
// Default message fields
const defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
// Die
let callsToGetMore = 0;
// Boot the mock
co(function*() {
primaryServer = yield mock.createServer(32000, 'localhost');
let counter = 0;
primaryServer.setMessageHandler(request => {
const doc = request.document;
// Create a server that responds to the initial aggregation to connect to the server, but not to subsequent getMore requests
if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: 'localhost:32000',
primary: 'localhost:32000',
tags: { loc: 'ny' }
},
defaultFields
)
);
} else if (doc.getMore) {
callsToGetMore++;
} else if (doc.aggregate) {
var changeDoc = {
_id: {
ts: new Timestamp(4, 1501511802),
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: counter++
}
};
request.reply({
ok: 1,
cursor: {
id: new Long(1407, 1407),
firstBatch: [changeDoc]
}
});
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
});
});
let finalError = undefined;
const client = configuration.newClient('mongodb://localhost:32000/', {
socketTimeoutMS: 500,
validateOptions: true
});
client
.connect()
.then(client => {
const database = client.db('integration_tests');
const collection = database.collection('MongoNetworkErrorTestPromises');
const changeStream = collection.watch(pipeline);
return changeStream
.next()
.then(function(change) {
assert.ok(change);
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.counter, 0);
// Add a tag to the cursor
changeStream.cursor.track = 1;
return changeStream.next();
})
.then(function(change) {
assert.ok(change);
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.counter, 1);
// Check this cursor doesn't have the tag added earlier (therefore it is a new cursor)
assert.notEqual(changeStream.cursor.track, 1);
// Check that only one getMore call was made
assert.equal(callsToGetMore, 1);
return Promise.all([changeStream.close(), primaryServer.destroy]).then(() =>
client.close()
);
});
})
.catch(err => (finalError = err))
.then(() => mock.cleanup())
.catch(err => (finalError = err))
.then(() => done(finalError));
}
});
it('should resume from point in time using user-provided resumeAfter', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function() {
const configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(client => {
this.defer(() => client.close());
const database = client.db('integration_tests');
const collection = database.collection('resumeAfterTest2');
let firstChangeStream, secondChangeStream;
let resumeToken;
const docs = [{ a: 0 }, { a: 1 }, { a: 2 }];
firstChangeStream = collection.watch(pipeline);
this.defer(() => firstChangeStream.close());
// Trigger the first database event
waitForStarted(firstChangeStream, () => {
this.defer(
collection
.insert(docs[0])
.then(() => collection.insertOne(docs[1]))
.then(() => collection.insertOne(docs[2]))
);
});
return firstChangeStream
.hasNext()
.then(hasNext => {
assert.equal(true, hasNext);
return firstChangeStream.next();
})
.then(change => {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[0].a);
// Save the resumeToken
resumeToken = change._id;
return firstChangeStream.next();
})
.then(change => {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[1].a);
return firstChangeStream.next();
})
.then(change => {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[2].a);
return firstChangeStream.close();
})
.then(() => {
secondChangeStream = collection.watch(pipeline, {
resumeAfter: resumeToken
});
this.defer(() => secondChangeStream.close());
return delay(200);
})
.then(() => secondChangeStream.hasNext())
.then(hasNext => {
assert.equal(true, hasNext);
return secondChangeStream.next();
})
.then(change => {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[1].a);
return secondChangeStream.next();
})
.then(change => {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.a, docs[2].a);
return secondChangeStream.close();
});
});
}
});
it('should support full document lookup', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function() {
const configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(client => {
this.defer(() => client.close());
const database = client.db('integration_tests');
const collection = database.collection('fullDocumentLookup');
const changeStream = collection.watch(pipeline, {
fullDocument: 'updateLookup'
});
this.defer(() => changeStream.close());
waitForStarted(changeStream, () => {
this.defer(collection.insert({ f: 128 }));
});
return changeStream
.hasNext()
.then(function(hasNext) {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.f, 128);
assert.equal(change.ns.db, database.databaseName);
assert.equal(change.ns.coll, collection.collectionName);
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
return collection.update({ f: 128 }, { $set: { c: 2 } });
})
.then(function() {
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'update');
// Check the correct fullDocument is present
assert.ok(change.fullDocument);
assert.equal(change.fullDocument.f, 128);
assert.equal(change.fullDocument.c, 2);
});
});
}
});
it('should support full document lookup with deleted documents', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function() {
const configuration = this.configuration;
const client = configuration.newClient();
return client.connect().then(client => {
this.defer(() => client.close());
const database = client.db('integration_tests');
const collection = database.collection('fullLookupTest');
const changeStream = collection.watch(pipeline, {
fullDocument: 'updateLookup'
});
this.defer(() => changeStream.close());
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(collection.insert({ i: 128 }).then(() => collection.deleteOne({ i: 128 })));
});
return changeStream
.hasNext()
.then(function(hasNext) {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'insert');
assert.equal(change.fullDocument.i, 128);
assert.equal(change.ns.db, database.databaseName);
assert.equal(change.ns.coll, collection.collectionName);
assert.ok(!change.documentKey);
assert.equal(
change.comment,
'The documentKey field has been projected out of this document.'
);
// Trigger the second database event
return collection.update({ i: 128 }, { $set: { c: 2 } });
})
.then(() => changeStream.hasNext())
.then(function(hasNext) {
assert.equal(true, hasNext);
return changeStream.next();
})
.then(function(change) {
assert.equal(change.operationType, 'delete');
assert.equal(change.lookedUpDocument, null);
});
});
}
});
it('should create Change Streams with correct read preferences', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function() {
const configuration = this.configuration;
const ReadPreference = configuration.require.ReadPreference;
const client = configuration.newClient();
return client.connect().then(client => {
this.defer(() => client.close());
// should get preference from database
const database = client.db('integration_tests', {
readPreference: ReadPreference.PRIMARY_PREFERRED
});
const changeStream0 = database.collection('docs0').watch(pipeline);
this.defer(() => changeStream0.close());
assert.deepEqual(
changeStream0.cursor.readPreference.preference,
ReadPreference.PRIMARY_PREFERRED
);
// should get preference from collection
const collection = database.collection('docs1', {
readPreference: ReadPreference.SECONDARY_PREFERRED
});
const changeStream1 = collection.watch(pipeline);
assert.deepEqual(
changeStream1.cursor.readPreference.preference,
ReadPreference.SECONDARY_PREFERRED
);
this.defer(() => changeStream1.close());
// should get preference from Change Stream options
const changeStream2 = collection.watch(pipeline, {
readPreference: ReadPreference.NEAREST
});
this.defer(() => changeStream2.close());
assert.deepEqual(changeStream2.cursor.readPreference.preference, ReadPreference.NEAREST);
});
}
});
it('should support piping of Change Streams', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
// The actual test we wish to run
test: function(done) {
const configuration = this.configuration;
const stream = require('stream');
const client = configuration.newClient();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests');
const collection = database.collection('pipeTest');
const changeStream = collection.watch(pipeline);
this.defer(() => changeStream.close());
const outStream = new stream.PassThrough({ objectMode: true });
// Make a stream transforming to JSON and piping to the file
changeStream.stream({ transform: JSON.stringify }).pipe(outStream);
outStream
.on('data', data => {
try {
const parsedEvent = JSON.parse(data);
assert.equal(parsedEvent.fullDocument.a, 1);
done();
} catch (e) {
done(e);
}
})
.on('error', done);
waitForStarted(changeStream, () => {
this.defer(collection.insert({ a: 1 }));
});
});
}
});
it('should resume piping of Change Streams when a resumable error is encountered', {
metadata: {
requires: {
generators: true,
topology: 'single',
mongodb: '>=3.6'
}
},
test: function(done) {
const filename = path.join(__dirname, '_nodemongodbnative_resumepipe.txt');
this.defer(() => fs.unlinkSync(filename));
const configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
const Timestamp = configuration.require.Timestamp;
const Long = configuration.require.Long;
// Default message fields
const defaultFields = {
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 4,
minWireVersion: 0,
ok: 1,
hosts: ['localhost:32000', 'localhost:32001', 'localhost:32002']
};
mock.createServer(32000, 'localhost').then(primaryServer => {
this.defer(() => mock.cleanup());
let counter = 0;
primaryServer.setMessageHandler(request => {
const doc = request.document;
// Create a server that responds to the initial aggregation to connect to the server, but not to subsequent getMore requests
if (doc.ismaster) {
request.reply(
Object.assign(
{
ismaster: true,
secondary: false,
me: primaryServer.uri(),
primary: primaryServer.uri(),
tags: { loc: 'ny' }
},
defaultFields
)
);
} else if (doc.getMore) {
var changeDoc = {
cursor: {
id: new Long(1407, 1407),
nextBatch: [
{
_id: {
ts: new Timestamp(4, 1501511802),
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: counter++
}
}
]
},
ok: 1
};
request.reply(changeDoc, {
cursorId: new Long(1407, 1407)
});
} else if (doc.aggregate) {
changeDoc = {
_id: {
ts: new Timestamp(4, 1501511802),
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: counter++
}
};
request.reply({
ok: 1,
cursor: {
id: new Long(1407, 1407),
firstBatch: [changeDoc]
}
});
} else if (doc.endSessions) {
request.reply({ ok: 1 });
}
});
const client = configuration.newClient(`mongodb://${primaryServer.uri()}/`, {
socketTimeoutMS: 500,
validateOptions: true
});
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const database = client.db('integration_tests5');
const collection = database.collection('MongoNetworkErrorTestPromises');
const changeStream = collection.watch(pipeline);
const outStream = fs.createWriteStream(filename);
changeStream.stream({ transform: JSON.stringify }).pipe(outStream);
this.defer(() => changeStream.close());
// Listen for changes to the file
const watcher = fs.watch(filename, eventType => {
this.defer(() => watcher.close());
expect(eventType).to.equal('change');
const fileContents = fs.readFileSync(filename, 'utf8');
const parsedFileContents = JSON.parse(fileContents);
expect(parsedFileContents).to.have.nested.property('fullDocument.a', 1);
done();
});
});
});
}
});
it('should support piping of Change Streams through multiple pipes', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient(configuration.url(), {
poolSize: 1,
autoReconnect: false
});
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const cipher = crypto.createCipher('aes192', 'a password');
const decipher = crypto.createDecipher('aes192', 'a password');
const database = client.db('integration_tests');
const collection = database.collection('multiPipeTest');
const changeStream = collection.watch(pipeline);
this.defer(() => changeStream.close());
// Make a stream transforming to JSON and piping to the file
const basicStream = changeStream.pipe(
new Transform({
transform: (data, encoding, callback) => callback(null, JSON.stringify(data)),
objectMode: true
})
);
const pipedStream = basicStream.pipe(cipher).pipe(decipher);
let dataEmitted = '';
pipedStream.on('data', function(data) {
dataEmitted += data.toString();
// Work around poor compatibility with crypto cipher
changeStream.cursor.emit('end');
});
pipedStream.on('end', function() {
const parsedData = JSON.parse(dataEmitted.toString());
assert.equal(parsedData.operationType, 'insert');
assert.equal(parsedData.fullDocument.a, 1407);
basicStream.emit('close');
done();
});
pipedStream.on('error', err => {
done(err);
});
waitForStarted(changeStream, () => {
this.defer(collection.insert({ a: 1407 }));
});
});
}
});
it('should maintain change stream options on resume', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function() {
const configuration = this.configuration;
const client = configuration.newClient();
const collectionName = 'resumeAfterKillCursor';
const changeStreamOptions = {
fullDocument: 'updateLookup',
collation: { maxVariable: 'punct' },
maxAwaitTimeMS: 20000,
batchSize: 200
};
return client.connect().then(() => {
this.defer(() => client.close());
const db = client.db('integration_tests');
const coll = db.collection(collectionName);
const changeStream = coll.watch([], changeStreamOptions);
this.defer(() => changeStream.close());
expect(changeStream.cursor.resumeOptions).to.containSubset(changeStreamOptions);
});
}
});
// 9. $changeStream stage for ChangeStream against a server >=4.0 and <4.0.7 that has not received
// any results yet MUST include a startAtOperationTime option when resuming a change stream.
it('should include a startAtOperationTime field when resuming if no changes have been received', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=4.0 <4.0.7' } },
test: function(done) {
const configuration = this.configuration;
const ObjectId = configuration.require.ObjectId;
const Timestamp = configuration.require.Timestamp;
const Long = configuration.require.Long;
const OPERATION_TIME = new Timestamp(4, 1501511802);
const makeIsMaster = server => ({
__nodejs_mock_server__: true,
ismaster: true,
secondary: false,
me: server.uri(),
primary: server.uri(),
tags: { loc: 'ny' },
setName: 'rs',
setVersion: 1,
electionId: new ObjectId(0),
maxBsonObjectSize: 16777216,
maxMessageSizeBytes: 48000000,
maxWriteBatchSize: 1000,
localTime: new Date(),
maxWireVersion: 7,
minWireVersion: 0,
ok: 1,
hosts: [server.uri()],
operationTime: OPERATION_TIME,
$clusterTime: {
clusterTime: OPERATION_TIME
}
});
const AGGREGATE_RESPONSE = {
ok: 1,
cursor: {
firstBatch: [],
id: new Long('9064341847921713401'),
ns: 'test.test'
},
operationTime: OPERATION_TIME,
$clusterTime: {
clusterTime: OPERATION_TIME
}
};
const CHANGE_DOC = {
_id: {
ts: OPERATION_TIME,
ns: 'integration_tests.docsDataEvent',
_id: new ObjectId('597f407a8fd4abb616feca93')
},
operationType: 'insert',
ns: {
db: 'integration_tests',
coll: 'docsDataEvent'
},
fullDocument: {
_id: new ObjectId('597f407a8fd4abb616feca93'),
a: 1,
counter: 0
}
};
const GET_MORE_RESPONSE = {
ok: 1,
cursor: {
nextBatch: [CHANGE_DOC],
id: new Long('9064341847921713401'),
ns: 'test.test'
},
cursorId: new Long('9064341847921713401')
};
const dbName = 'integration_tests';
const collectionName = 'resumeWithStartAtOperationTime';
const connectOptions = {
validateOptions: true,
monitorCommands: true
};
let getMoreCounter = 0;
let changeStream;
let server;
let client;
let finish = err => {
finish = () => {};
Promise.resolve()
.then(() => changeStream && changeStream.close())
.then(() => client && client.close())
.then(() => done(err));
};
function primaryServerHandler(request) {
try {
const doc = request.document;
if (doc.ismaster) {
return request.reply(makeIsMaster(server));
} else if (doc.aggregate) {
return request.reply(AGGREGATE_RESPONSE);
} else if (doc.getMore) {
if (getMoreCounter++ === 0) {
request.reply({ ok: 0 });
return;
}
request.reply(GET_MORE_RESPONSE);
} else if (doc.endSessions) {
request.reply({ ok: 1 });
} else if (doc.killCursors) {
request.reply({ ok: 1 });
}
} catch (e) {
finish(e);
}
}
const started = [];
mock
.createServer()
.then(_server => (server = _server))
.then(() => server.setMessageHandler(primaryServerHandler))
.then(() => (client = configuration.newClient(`mongodb://${server.uri()}`, connectOptions)))
.then(() => client.connect())
.then(() => {
client.on('commandStarted', e => {
if (e.commandName === 'aggregate') {
started.push(e);
}
});
})
.then(() => client.db(dbName))
.then(db => db.collection(collectionName))
.then(col => col.watch(pipeline))
.then(_changeStream => (changeStream = _changeStream))
.then(() => changeStream.next())
.then(() => {
const first = started[0].command;
expect(first).to.have.nested.property('pipeline[0].$changeStream');
const firstStage = first.pipeline[0].$changeStream;
expect(firstStage).to.not.have.property('resumeAfter');
expect(firstStage).to.not.have.property('startAtOperationTime');
const second = started[1].command;
expect(second).to.have.nested.property('pipeline[0].$changeStream');
const secondStage = second.pipeline[0].$changeStream;
expect(secondStage).to.not.have.property('resumeAfter');
expect(secondStage).to.have.property('startAtOperationTime');
expect(secondStage.startAtOperationTime.equals(OPERATION_TIME)).to.be.ok;
})
.then(
() => finish(),
err => finish(err)
);
}
});
it('should emit close event after error event', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
const closeSpy = sinon.spy();
client.connect((err, client) => {
expect(err).to.not.exist;
this.defer(() => client.close());
const db = client.db('integration_tests');
const coll = db.collection('event_test');
// This will cause an error because the _id will be projected out, which causes the following error:
// "A change stream document has been received that lacks a resume token (_id)."
const changeStream = coll.watch([{ $project: { _id: false } }]);
changeStream.on('close', closeSpy);
changeStream.on('change', changeDoc => {
expect(changeDoc).to.be.null;
});
changeStream.on('error', err => {
expect(err).to.exist;
changeStream.close(() => {
expect(closeSpy).property('calledOnce').to.be.true;
done();
});
});
// Trigger the first database event
waitForStarted(changeStream, () => {
this.defer(coll.insertOne({ a: 1 }));
});
});
}
});
describe('should properly handle a changeStream event being processed mid-close', function() {
let client, coll, changeStream;
function write() {
return Promise.resolve()
.then(() => coll.insertOne({ a: 1 }))
.then(() => coll.insertOne({ b: 2 }));
}
function lastWrite() {
return coll.insertOne({ c: 3 });
}
beforeEach(function() {
client = this.configuration.newClient();
return client.connect().then(_client => {
client = _client;
coll = client.db(this.configuration.db).collection('tester');
changeStream = coll.watch();
});
});
afterEach(function() {
return Promise.resolve()
.then(() => {
if (changeStream && !changeStream.isClosed()) {
return changeStream.close();
}
})
.then(() => {
if (client) {
return client.close();
}
})
.then(() => {
coll = undefined;
changeStream = undefined;
client = undefined;
});
});
it('when invoked with promises', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function() {
const test = this;
function read() {
return Promise.resolve()
.then(() => changeStream.next())
.then(() => changeStream.next())
.then(() => {
test.defer(lastWrite());
const nextP = changeStream.next();
return changeStream.close().then(() => nextP);
});
}
return Promise.all([read(), write()]).then(
() => Promise.reject(new Error('Expected operation to fail with error')),
err => expect(err.message).to.equal('ChangeStream is closed')
);
}
});
it('when invoked with callbacks', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function(done) {
changeStream.next(() => {
changeStream.next(() => {
this.defer(lastWrite());
changeStream.next(err => {
try {
expect(err)
.property('message')
.to.equal('ChangeStream is closed');
done();
} catch (e) {
done(e);
}
});
// explicitly close the change stream after the write has begun
this.defer(changeStream.close());
});
});
this.defer(write().catch(() => {}));
}
});
it('when invoked using eventEmitter API', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: function(done) {
let closed = false;
const close = _err => {
if (closed) {
return;
}
closed = true;
return done(_err);
};
let counter = 0;
changeStream.on('change', () => {
counter += 1;
if (counter === 2) {
changeStream.close();
setTimeout(() => close());
} else if (counter >= 3) {
close(new Error('should not have received more than 2 events'));
}
});
changeStream.on('error', err => close(err));
waitForStarted(changeStream, () =>
write()
.then(() => lastWrite())
.catch(() => {})
);
}
});
});
describe('resumeToken', function() {
class MockServerManager {
constructor(config, commandIterators) {
this.config = config;
this.cmdList = new Set(['ismaster', 'endSessions', 'aggregate', 'getMore']);
this.database = 'test_db';
this.collection = 'test_coll';
this.ns = `${this.database}.${this.collection}`;
this._timestampCounter = 0;
this.cursorId = new this.config.require.Long('9064341847921713401');
this.commandIterators = commandIterators;
this.promise = this.init();
}
init() {
return mock.createServer().then(server => {
this.server = server;
this.server.setMessageHandler(request => {
const doc = request.document;
const opname = Object.keys(doc)[0];
let response = { ok: 0 };
if (this.cmdList.has(opname) && this[opname]) {
response = this[opname](doc);
}
request.reply(this.applyOpTime(response));
});
this.client = this.config.newClient(this.mongodbURI, { monitorCommands: true });
return this.client.connect().then(() => {
this.apm = { started: [], succeeded: [], failed: [] };
[
['commandStarted', this.apm.started],
['commandSucceeded', this.apm.succeeded],
['commandFailed', this.apm.failed]
].forEach(opts => {
const eventName = opts[0];
const target = opts[1];
this.client.on(eventName, e => {
if (e.commandName === 'aggregate' || e.commandName === 'getMore') {
target.push(e);
}
});
});
});
});
}
makeChangeStream(options) {
this.changeStream = this.client
.db(this.database)
.collection(this.collection)
.watch(options);
this.resumeTokenChangedEvents = [];
this.changeStream.on('resumeTokenChanged', resumeToken => {
this.resumeTokenChangedEvents.push({ resumeToken });
});
return this.changeStream;
}
teardown(e) {
let promise = Promise.resolve();
if (this.changeStream) {
promise = promise.then(() => this.changeStream.close()).catch();
}
if (this.client) {
promise = promise.then(() => this.client.close()).catch();
}
return promise.then(function() {
if (e) {
throw e;
}
});
}
ready() {
return this.promise;
}
get mongodbURI() {
return `mongodb://${this.server.uri()}`;
}
// Handlers for specific commands
ismaster() {
const uri = this.server.uri();
return Object.assign({}, mock.DEFAULT_ISMASTER_36, {
ismaster: true,
secondary: false,
me: uri,
primary: uri,
setName: 'rs',
localTime: new Date(),
ok: 1,
hosts: [uri]
});
}
endSessions() {
return { ok: 1 };
}
aggregate() {
let cursor;
try {
cursor = this._buildCursor('aggregate', 'firstBatch');
} catch (e) {
return { ok: 0, errmsg: e.message };
}
return {
ok: 1,
cursor
};
}
getMore() {
let cursor;
try {
cursor = this._buildCursor('getMore', 'nextBatch');
} catch (e) {
return { ok: 0, errmsg: e.message };
}
return {
ok: 1,
cursor,
cursorId: this.cursorId
};
}
// Helpers
timestamp() {
return new this.config.require.Timestamp(this._timestampCounter++, Date.now());
}
applyOpTime(obj) {
const operationTime = this.timestamp();
return Object.assign({}, obj, {
$clusterTime: { clusterTime: operationTime },
operationTime
});
}
_buildCursor(type, batchKey) {
const config = this.commandIterators[type].next().value;
if (!config) {
throw new Error('no more config for ' + type);
}
const batch = Array.from({ length: config.numDocuments || 0 }).map(() =>
this.changeEvent()
);
const cursor = {
[batchKey]: batch,
id: this.cursorId,
ns: this.ns
};
if (config.postBatchResumeToken) {
cursor.postBatchResumeToken = this.resumeToken();
}
return cursor;
}
changeEvent(operationType, fullDocument) {
fullDocument = fullDocument || {};
return {
_id: this.resumeToken(),
operationType,
ns: {
db: this.database,
coll: this.collection
},
fullDocument
};
}
resumeToken() {
return {
ts: this.timestamp(),
ns: this.namespace,
_id: new this.config.require.ObjectId()
};
}
}
// 11. For a ChangeStream under these conditions:
// Running against a server >=4.0.7.
// The batch is empty or has been iterated to the last document.
// Expected result:
// getResumeToken must return the postBatchResumeToken from the current command response.
describe('for emptied batch on server >= 4.0.7', function() {
it('must return the postBatchResumeToken from the current command response', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: true };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: true };
})()
});
return manager
.ready()
.then(() => {
return manager.makeChangeStream().next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const tokens = manager.resumeTokenChangedEvents.map(e => e.resumeToken);
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(2);
expect(successes[0]).to.have.a.property('postBatchResumeToken');
expect(successes[1]).to.have.a.property('postBatchResumeToken');
expect(successes[1]).to.have.a.nested.property('nextBatch[0]._id');
expect(tokens).to.have.a.lengthOf(2);
expect(tokens[0]).to.deep.equal(successes[0].postBatchResumeToken);
expect(tokens[1])
.to.deep.equal(successes[1].postBatchResumeToken)
.and.to.not.deep.equal(successes[1].nextBatch[0]._id);
});
});
});
// 12. For a ChangeStream under these conditions:
// Running against a server <4.0.7.
// The batch is empty or has been iterated to the last document.
// Expected result:
// getResumeToken must return the _id of the last document returned if one exists.
// getResumeToken must return resumeAfter from the initial aggregate if the option was specified.
// If ``resumeAfter`` was not specified, the ``getResumeToken`` result must be empty.
describe('for emptied batch on server <= 4.0.7', function() {
it('must return the _id of the last document returned if one exists', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 1, postBatchResumeToken: false };
})()
});
return manager
.ready()
.then(() => manager.makeChangeStream().next())
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const tokens = manager.resumeTokenChangedEvents.map(e => e.resumeToken);
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(2);
expect(successes[1]).to.have.a.nested.property('nextBatch[0]._id');
expect(tokens).to.have.a.lengthOf(1);
expect(tokens[0]).to.deep.equal(successes[1].nextBatch[0]._id);
});
});
it('must return resumeAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ resumeAfter });
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
resolve();
}
counter += 1;
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.deep.equal(resumeAfter);
});
});
it('must be empty if resumeAfter options was not specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream();
let counter = 0;
changeStream.cursor.on('response', () => {
if (counter === 1) {
token = changeStream.resumeToken;
resolve();
}
counter += 1;
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.not.exist;
});
});
});
// 13. For a ChangeStream under these conditions:
// The batch is not empty.
// The batch has been iterated up to but not including the last element.
// Expected result:
// getResumeToken must return the _id of the previous document returned.
describe('for non-empty batch iterated up to but not including the last element', function() {
it('must return the _id of the previous document returned', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 2, postBatchResumeToken: true };
})(),
getMore: (function*() {})()
});
return manager
.ready()
.then(() => {
return manager.makeChangeStream().next();
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
const tokens = manager.resumeTokenChangedEvents.map(e => e.resumeToken);
const successes = manager.apm.succeeded.map(e => {
try {
return e.reply.cursor;
} catch (e) {
return {};
}
});
expect(successes).to.have.a.lengthOf(1);
expect(successes[0]).to.have.a.nested.property('firstBatch[0]._id');
expect(successes[0]).to.have.a.property('postBatchResumeToken');
expect(tokens).to.have.a.lengthOf(1);
expect(tokens[0])
.to.deep.equal(successes[0].firstBatch[0]._id)
.and.to.not.deep.equal(successes[0].postBatchResumeToken);
});
});
});
// 14. For a ChangeStream under these conditions:
// The batch is not empty.
// The batch hasn’t been iterated at all.
// Only the initial aggregate command has been executed.
// Expected result:
// getResumeToken must return startAfter from the initial aggregate if the option was specified.
// getResumeToken must return resumeAfter from the initial aggregate if the option was specified.
// If neither the startAfter nor resumeAfter options were specified, the getResumeToken result must be empty.
describe('for non-empty non-iterated batch where only the initial aggregate command has been executed', function() {
it('must return startAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const startAfter = manager.resumeToken();
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ startAfter, resumeAfter });
changeStream.cursor.once('response', () => {
token = changeStream.resumeToken;
resolve();
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token)
.to.deep.equal(startAfter)
.and.to.not.deep.equal(resumeAfter);
});
});
it('must return resumeAfter from the initial aggregate if the option was specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
const resumeAfter = manager.resumeToken();
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream({ resumeAfter });
changeStream.cursor.once('response', () => {
token = changeStream.resumeToken;
resolve();
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.deep.equal(resumeAfter);
});
});
it('must be empty if neither the startAfter nor resumeAfter options were specified', function() {
const manager = new MockServerManager(this.configuration, {
aggregate: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})(),
getMore: (function*() {
yield { numDocuments: 0, postBatchResumeToken: false };
})()
});
let token;
return manager
.ready()
.then(() => {
return new Promise(resolve => {
const changeStream = manager.makeChangeStream();
changeStream.cursor.once('response', () => {
token = changeStream.resumeToken;
resolve();
});
// Note: this is expected to fail
changeStream.next().catch(() => {});
});
})
.then(
() => manager.teardown(),
err => manager.teardown(err)
)
.then(() => {
expect(token).to.not.exist;
});
});
});
});
describe('tryNext', function() {
it('should return null on single iteration of empty cursor', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: withChangeStream((collection, changeStream, done) => {
tryNext(changeStream, (err, doc) => {
expect(err).to.not.exist;
expect(doc).to.not.exist;
done();
});
})
});
it('should iterate a change stream until first empty batch', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: withChangeStream((collection, changeStream, done) => {
waitForStarted(changeStream, () => {
collection.insertOne({ a: 42 }, err => {
expect(err).to.not.exist;
collection.insertOne({ b: 24 }, err => {
expect(err).to.not.exist;
});
});
});
tryNext(changeStream, (err, doc) => {
expect(err).to.not.exist;
expect(doc).to.exist;
tryNext(changeStream, (err, doc) => {
expect(err).to.not.exist;
expect(doc).to.exist;
tryNext(changeStream, (err, doc) => {
expect(err).to.not.exist;
expect(doc).to.not.exist;
done();
});
});
});
})
});
});
describe('startAfter', function() {
let client;
let coll;
let startAfter;
function recordEvent(events, e) {
if (e.commandName !== 'aggregate') return;
events.push({ $changeStream: e.command.pipeline[0].$changeStream });
}
beforeEach(function(done) {
const configuration = this.configuration;
client = configuration.newClient({ monitorCommands: true });
client.connect(err => {
expect(err).to.not.exist;
coll = client.db('integration_tests').collection('setupAfterTest');
const changeStream = coll.watch();
waitForStarted(changeStream, () => {
coll.insertOne({ x: 1 }, { w: 'majority', j: true }, err => {
expect(err).to.not.exist;
coll.drop(err => {
expect(err).to.not.exist;
});
});
});
changeStream.on('change', change => {
if (change.operationType === 'invalidate') {
startAfter = change._id;
changeStream.close(done);
}
});
});
});
afterEach(function(done) {
client.close(done);
});
it('should work with events', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=4.1.1' } },
test: function(done) {
const changeStream = coll.watch([], { startAfter });
this.defer(() => changeStream.close());
coll.insertOne({ x: 2 }, { w: 'majority', j: true }, err => {
expect(err).to.not.exist;
changeStream.once('change', change => {
expect(change).to.containSubset({
operationType: 'insert',
fullDocument: { x: 2 }
});
done();
});
});
}
});
it('should work with callbacks', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=4.1.1' } },
test: function(done) {
const changeStream = coll.watch([], { startAfter });
this.defer(() => changeStream.close());
coll.insertOne({ x: 2 }, { w: 'majority', j: true }, err => {
expect(err).to.not.exist;
exhaust(changeStream, (err, bag) => {
expect(err).to.not.exist;
const finalOperation = bag.pop();
expect(finalOperation).to.containSubset({
operationType: 'insert',
fullDocument: { x: 2 }
});
done();
});
});
}
});
// 17. $changeStream stage for ChangeStream started with startAfter against a server >=4.1.1
// that has not received any results yet
// - MUST include a startAfter option
// - MUST NOT include a resumeAfter option
// when resuming a change stream.
it('$changeStream without results must include startAfter and not resumeAfter', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=4.1.1' } },
test: function(done) {
const events = [];
client.on('commandStarted', e => recordEvent(events, e));
const changeStream = coll.watch([], { startAfter });
this.defer(() => changeStream.close());
changeStream.once('change', change => {
expect(change).to.containSubset({
operationType: 'insert',
fullDocument: { x: 2 }
});
expect(events)
.to.be.an('array')
.with.lengthOf(3);
expect(events[0]).nested.property('$changeStream.startAfter').to.exist;
expect(events[1]).to.equal('error');
expect(events[2]).nested.property('$changeStream.startAfter').to.exist;
done();
});
waitForStarted(changeStream, () => {
triggerResumableError(changeStream, () => events.push('error'));
this.defer(coll.insertOne({ x: 2 }, { w: 'majority', j: true }));
});
}
});
// 18. $changeStream stage for ChangeStream started with startAfter against a server >=4.1.1
// that has received at least one result
// - MUST include a resumeAfter option
// - MUST NOT include a startAfter option
// when resuming a change stream.
it('$changeStream with results must include resumeAfter and not startAfter', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=4.1.1' } },
test: function(done) {
let events = [];
client.on('commandStarted', e => recordEvent(events, e));
const changeStream = coll.watch([], { startAfter });
this.defer(() => changeStream.close());
changeStream.on('change', change => {
events.push({ change: { insert: { x: change.fullDocument.x } } });
switch (change.fullDocument.x) {
case 2:
// only events after this point are relevant to this test
events = [];
triggerResumableError(changeStream, () => events.push('error'));
break;
case 3:
expect(events)
.to.be.an('array')
.with.lengthOf(3);
expect(events[0]).to.equal('error');
expect(events[1]).nested.property('$changeStream.resumeAfter').to.exist;
expect(events[2]).to.eql({ change: { insert: { x: 3 } } });
done();
break;
}
});
waitForStarted(changeStream, () =>
this.defer(
coll
.insertOne({ x: 2 }, { w: 'majority', j: true })
.then(() => coll.insertOne({ x: 3 }, { w: 'majority', j: true }))
)
);
}
});
});
});
describe('Change Stream Resume Error Tests', function() {
it('should continue emitting change events after a resumable error', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: withChangeStream((collection, changeStream, done) => {
const docs = [];
changeStream.on('change', change => {
expect(change).to.exist;
docs.push(change);
if (docs.length === 2) {
expect(docs[0]).to.containSubset({
operationType: 'insert',
fullDocument: { a: 42 }
});
expect(docs[1]).to.containSubset({
operationType: 'insert',
fullDocument: { b: 24 }
});
done();
}
});
waitForStarted(changeStream, () => {
collection.insertOne({ a: 42 }, err => {
expect(err).to.not.exist;
triggerResumableError(changeStream, 1000, () => {
collection.insertOne({ b: 24 }, err => {
expect(err).to.not.exist;
});
});
});
});
})
});
it('should continue iterating changes after a resumable error', {
metadata: { requires: { topology: 'replicaset', mongodb: '>=3.6' } },
test: withChangeStream((collection, changeStream, done) => {
waitForStarted(changeStream, () => {
collection.insertOne({ a: 42 }, err => {
expect(err).to.not.exist;
triggerResumableError(changeStream, 250, () => {
changeStream.hasNext((err1, hasNext) => {
expect(err1).to.not.exist;
expect(hasNext).to.be.true;
changeStream.next((err, change) => {
expect(err).to.not.exist;
expect(change).to.containSubset({
operationType: 'insert',
fullDocument: { b: 24 }
});
done();
});
});
collection.insertOne({ b: 24 });
});
});
});
changeStream.hasNext((err, hasNext) => {
expect(err).to.not.exist;
expect(hasNext).to.be.true;
changeStream.next((err, change) => {
expect(err).to.not.exist;
expect(change).to.containSubset({
operationType: 'insert',
fullDocument: { a: 42 }
});
});
});
})
});
});
context('NODE-2626', function() {
let mockServer;
afterEach(() => mock.cleanup());
beforeEach(() => mock.createServer().then(server => (mockServer = server)));
it('changeStream should close if cursor id for initial aggregate is Long.ZERO', function(done) {
mockServer.setMessageHandler(req => {
const doc = req.document;
if (doc.ismaster) {
return req.reply(mock.DEFAULT_ISMASTER_36);
}
if (doc.aggregate) {
return req.reply({
ok: 1,
cursor: {
id: Long.ZERO,
firstBatch: []
}
});
}
if (doc.getMore) {
return req.reply({
ok: 1,
cursor: {
id: new Long(1407, 1407),
nextBatch: []
}
});
}
req.reply({ ok: 1 });
});
const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`, {
useUnifiedTopology: true
});
client.connect(err => {
expect(err).to.not.exist;
const collection = client.db('cs').collection('test');
const changeStream = collection.watch();
changeStream.next((err, doc) => {
expect(err).to.exist;
expect(doc).to.not.exist;
expect(err.message).to.equal('ChangeStream is closed');
changeStream.close(() => client.close(done));
});
});
});
});
| 1 | 19,152 | Is this a sometimes leaky test? | mongodb-node-mongodb-native | js |
@@ -96,7 +96,11 @@ func (v ConstraintGenerator) typeof(n Node) (PolyType, error) {
ftv := n.ExternType.freeVars(nil)
subst := make(Substitution, len(ftv))
for _, tv := range ftv {
- subst[tv] = v.cs.f.Fresh()
+ f := v.cs.f.Fresh()
+ for ftv.contains(f) {
+ f = v.cs.f.Fresh()
+ }
+ subst[tv] = f
}
t := subst.ApplyType(n.ExternType)
// Check if this type knows about its kind constraints | 1 | package semantic
import (
"fmt"
"strings"
"github.com/influxdata/flux/ast"
"github.com/pkg/errors"
)
// GenerateConstraints walks the graph and generates constraints between type vairables provided in the annotations.
func GenerateConstraints(node Node, annotator Annotator) (*Constraints, error) {
cg := ConstraintGenerator{
cs: &Constraints{
f: annotator.f,
annotations: annotator.annotations,
kindConst: make(map[Tvar][]Kind),
},
env: NewEnv(),
err: new(error),
}
Walk(NewScopedVisitor(cg), node)
//log.Println("GenerateConstraints", cg.cs)
return cg.cs, *cg.err
}
// ConstraintGenerator implements NestingVisitor and generates constraints as it walks the graph.
type ConstraintGenerator struct {
cs *Constraints
env *Env
err *error
}
// Nest nests the internal type environment to obey scoping rules.
func (v ConstraintGenerator) Nest() NestingVisitor {
return ConstraintGenerator{
cs: v.cs,
env: v.env.Nest(),
err: v.err,
}
}
// Visit visits each node, the algorithm is depth first so nothing is performed in Visit except for an error check.
func (v ConstraintGenerator) Visit(node Node) Visitor {
if *v.err != nil {
return nil
}
return v
}
// Done visits nodes after all children of the node have been visited.
func (v ConstraintGenerator) Done(node Node) {
a := v.cs.annotations[node]
a.Type, a.Err = v.typeof(node)
if a.Type != nil {
v.cs.annotations[node] = a
if !a.Var.Equal(a.Type) {
v.cs.AddTypeConst(a.Var, a.Type, node.Location())
}
}
a.Err = errors.Wrapf(a.Err, "type error %v", node.Location())
//log.Printf("typeof %T@%v %v %v %v", node, node.Location(), a.Var, a.Type, a.Err)
if *v.err == nil && a.Err != nil {
*v.err = a.Err
}
}
// lookup returns the poly type of the visited node.
func (v ConstraintGenerator) lookup(n Node) (PolyType, error) {
a, ok := v.cs.annotations[n]
if !ok {
return nil, fmt.Errorf("no annotation found for %T@%v", n, n.Location())
}
if a.Type == nil {
return nil, fmt.Errorf("no type annotation found for %T@%v", n, n.Location())
}
return a.Type, a.Err
}
// scheme produces a type scheme from a poly type, this includes the generalize step.
func (v ConstraintGenerator) scheme(t PolyType) Scheme {
ftv := t.freeVars(v.cs).diff(v.env.freeVars(v.cs))
return Scheme{
T: t,
Free: ftv,
}
}
// typeof determines the poly type of a node.
func (v ConstraintGenerator) typeof(n Node) (PolyType, error) {
nodeVar := v.cs.annotations[n].Var
switch n := n.(type) {
case *ExternalVariableAssignment:
// Do not trust external type variables,
// substitute them with fresh vars.
ftv := n.ExternType.freeVars(nil)
subst := make(Substitution, len(ftv))
for _, tv := range ftv {
subst[tv] = v.cs.f.Fresh()
}
t := subst.ApplyType(n.ExternType)
// Check if this type knows about its kind constraints
if kt, ok := t.(KindConstrainter); ok {
tv := v.cs.f.Fresh()
v.cs.AddKindConst(tv, kt.KindConstraint())
t = tv
}
existing, ok := v.env.LocalLookup(n.Identifier.Name)
if ok {
v.cs.AddTypeConst(t, existing.T, n.Location())
}
scheme := v.scheme(t)
v.env.Set(n.Identifier.Name, scheme)
return nil, nil
case *NativeVariableAssignment:
t, err := v.lookup(n.Init)
if err != nil {
return nil, err
}
existing, ok := v.env.LocalLookup(n.Identifier.Name)
if ok {
v.cs.AddTypeConst(t, existing.T, n.Location())
}
scheme := v.scheme(t)
v.env.Set(n.Identifier.Name, scheme)
return nil, nil
case *IdentifierExpression:
scheme, ok := v.env.Lookup(n.Name)
if !ok {
return nil, fmt.Errorf("undefined identifier %q", n.Name)
}
t := v.cs.Instantiate(scheme, n.Location())
return t, nil
case *ReturnStatement:
return v.lookup(n.Argument)
case *Block:
return v.lookup(n.ReturnStatement())
case *BinaryExpression:
l, err := v.lookup(n.Left)
if err != nil {
return nil, err
}
r, err := v.lookup(n.Right)
if err != nil {
return nil, err
}
switch n.Operator {
case
ast.AdditionOperator,
ast.SubtractionOperator,
ast.MultiplicationOperator,
ast.DivisionOperator:
v.cs.AddTypeConst(l, r, n.Location())
return l, nil
case
ast.GreaterThanEqualOperator,
ast.LessThanEqualOperator,
ast.GreaterThanOperator,
ast.LessThanOperator,
ast.NotEqualOperator,
ast.EqualOperator:
return Bool, nil
case
ast.RegexpMatchOperator,
ast.NotRegexpMatchOperator:
v.cs.AddTypeConst(l, String, n.Location())
v.cs.AddTypeConst(r, Regexp, n.Location())
return Bool, nil
default:
return nil, fmt.Errorf("unsupported binary operator %v", n.Operator)
}
case *LogicalExpression:
l, err := v.lookup(n.Left)
if err != nil {
return nil, err
}
r, err := v.lookup(n.Right)
if err != nil {
return nil, err
}
v.cs.AddTypeConst(l, Bool, n.Location())
v.cs.AddTypeConst(r, Bool, n.Location())
return Bool, nil
case *UnaryExpression:
t, err := v.lookup(n.Argument)
if err != nil {
return nil, err
}
switch n.Operator {
case ast.NotOperator:
v.cs.AddTypeConst(t, Bool, n.Location())
return Bool, nil
}
return t, nil
case *FunctionExpression:
var parameters map[string]PolyType
var required LabelSet
var pipeArgument string
if n.Block.Parameters != nil {
if n.Block.Parameters.Pipe != nil {
pipeArgument = n.Block.Parameters.Pipe.Name
}
parameters = make(map[string]PolyType, len(n.Block.Parameters.List))
required = make([]string, 0, len(parameters))
for _, param := range n.Block.Parameters.List {
t, err := v.lookup(param)
if err != nil {
return nil, err
}
isPipe := param.Key.Name == pipeArgument
parameters[param.Key.Name] = t
if isPipe {
parameters[pipeLabel] = t
}
hasDefault := false
if n.Defaults != nil {
for _, p := range n.Defaults.Properties {
if p.Key.Name == param.Key.Name {
hasDefault = true
dt, err := v.lookup(p)
if err != nil {
return nil, err
}
v.cs.AddTypeConst(t, dt, p.Location())
break
}
}
}
if !hasDefault && !isPipe {
required = append(required, param.Key.Name)
}
}
}
ret, err := v.lookup(n.Block)
if err != nil {
return nil, err
}
return function{
parameters: parameters,
required: required,
ret: ret,
pipeArgument: pipeArgument,
}, nil
case *FunctionParameter:
v.env.Set(n.Key.Name, Scheme{T: nodeVar})
return nodeVar, nil
case *FunctionBlock:
return v.lookup(n.Body)
case *CallExpression:
typ, err := v.lookup(n.Callee)
if err != nil {
return nil, err
}
parameters := make(map[string]PolyType, len(n.Arguments.Properties))
required := make([]string, 0, len(parameters))
for _, arg := range n.Arguments.Properties {
t, err := v.lookup(arg.Value)
if err != nil {
return nil, err
}
parameters[arg.Key.Name] = t
required = append(required, arg.Key.Name)
}
if n.Pipe != nil {
t, err := v.lookup(n.Pipe)
if err != nil {
return nil, err
}
parameters[pipeLabel] = t
}
ft := function{
parameters: parameters,
required: required,
ret: v.cs.f.Fresh(),
}
v.cs.AddTypeConst(typ, ft, n.Location())
return ft.ret, nil
case *ObjectExpression:
properties := make(map[string]PolyType, len(n.Properties))
upper := make([]string, 0, len(properties))
for _, field := range n.Properties {
t, err := v.lookup(field.Value)
if err != nil {
return nil, err
}
properties[field.Key.Name] = t
upper = append(upper, field.Key.Name)
}
v.cs.AddKindConst(nodeVar, ObjectKind{
properties: properties,
lower: nil,
upper: upper,
})
return nodeVar, nil
case *Property:
return v.lookup(n.Value)
case *MemberExpression:
ptv := v.cs.f.Fresh()
t, err := v.lookup(n.Object)
if err != nil {
return nil, err
}
tv, ok := t.(Tvar)
if !ok {
return nil, errors.New("member object must be a type variable")
}
v.cs.AddKindConst(tv, ObjectKind{
properties: map[string]PolyType{n.Property: ptv},
lower: LabelSet{n.Property},
upper: AllLabels(),
})
return ptv, nil
case *IndexExpression:
ptv := v.cs.f.Fresh()
t, err := v.lookup(n.Array)
if err != nil {
return nil, err
}
tv, ok := t.(Tvar)
if !ok {
return nil, errors.New("array must be a type variable")
}
idx, err := v.lookup(n.Index)
if err != nil {
return nil, err
}
v.cs.AddKindConst(tv, ArrayKind{ptv})
v.cs.AddTypeConst(idx, Int, n.Index.Location())
return ptv, nil
case *ArrayExpression:
elt := v.cs.f.Fresh()
at := array{elt}
for _, el := range n.Elements {
t, err := v.lookup(el)
if err != nil {
return nil, err
}
v.cs.AddTypeConst(t, elt, el.Location())
}
v.cs.AddKindConst(nodeVar, ArrayKind{at.typ})
v.cs.AddTypeConst(nodeVar, at, n.Location())
return nodeVar, nil
case *StringLiteral:
return String, nil
case *IntegerLiteral:
return Int, nil
case *UnsignedIntegerLiteral:
return UInt, nil
case *FloatLiteral:
return Float, nil
case *BooleanLiteral:
return Bool, nil
case *DateTimeLiteral:
return Time, nil
case *DurationLiteral:
return Duration, nil
case *RegexpLiteral:
return Regexp, nil
// Explictly list nodes that do not produce constraints
case *Program,
*Extern,
*ExternBlock,
*OptionStatement,
*Identifier,
*FunctionParameters,
*ExpressionStatement:
return nil, nil
default:
return nil, fmt.Errorf("unsupported %T", n)
}
}
// Constraints is a set of constraints.
type Constraints struct {
f *fresher
annotations map[Node]annotation
typeConst []TypeConstraint
kindConst map[Tvar][]Kind
}
func (c *Constraints) Copy() *Constraints {
n := &Constraints{
f: new(fresher),
annotations: make(map[Node]annotation, len(c.annotations)),
typeConst: make([]TypeConstraint, len(c.typeConst)),
kindConst: make(map[Tvar][]Kind, len(c.kindConst)),
}
*n.f = *c.f
for k, v := range c.annotations {
n.annotations[k] = v
}
copy(n.typeConst, c.typeConst)
for k, v := range c.kindConst {
kinds := make([]Kind, len(v))
copy(kinds, v)
n.kindConst[k] = kinds
}
return n
}
// TypeConstraint states that the left and right types must be equal.
type TypeConstraint struct {
l, r PolyType
loc ast.SourceLocation
}
func (tc TypeConstraint) String() string {
return fmt.Sprintf("%v = %v @ %v", tc.l, tc.r, tc.loc)
}
func (c *Constraints) AddTypeConst(l, r PolyType, loc ast.SourceLocation) {
c.typeConst = append(c.typeConst, TypeConstraint{
l: l,
r: r,
loc: loc,
})
}
func (c *Constraints) AddKindConst(tv Tvar, k Kind) {
c.kindConst[tv] = append(c.kindConst[tv], k)
}
// Instantiate produces a new poly type where the free variables from the scheme have been made fresh.
// This way each new instantiation of a scheme is independent of the other but all have the same constraint structure.
func (c *Constraints) Instantiate(s Scheme, loc ast.SourceLocation) (t PolyType) {
if len(s.Free) == 0 {
return s.T
}
// Create a substituion for the new type variables
subst := make(Substitution, len(s.Free))
for _, tv := range s.Free {
fresh := c.f.Fresh()
subst[tv] = fresh
}
// Add any new kind constraints
for _, tv := range s.Free {
ks, ok := c.kindConst[tv]
if ok {
ntv := subst.ApplyTvar(tv)
for _, k := range ks {
nk := subst.ApplyKind(k)
c.AddKindConst(ntv, nk)
}
}
}
// Add any new type constraints
for _, tc := range c.typeConst {
fvs := tc.l.freeVars(c)
// Only add new constraints that constrain the left hand free vars
if fvs.hasIntersect(s.Free) {
l := subst.ApplyType(tc.l)
r := subst.ApplyType(tc.r)
c.AddTypeConst(l, r, loc)
}
}
return subst.ApplyType(s.T)
}
func (c *Constraints) String() string {
var builder strings.Builder
builder.WriteString("{\nannotations:\n")
for n, ann := range c.annotations {
fmt.Fprintf(&builder, "%T@%v = %v,\n", n, n.Location(), ann.Var)
}
builder.WriteString("types:\n")
for _, tc := range c.typeConst {
fmt.Fprintf(&builder, "%v,\n", tc)
}
builder.WriteString("kinds:\n")
for tv, ks := range c.kindConst {
fmt.Fprintf(&builder, "%v = %v,\n", tv, ks)
}
builder.WriteString("}")
return builder.String()
}
| 1 | 9,155 | It should be possible to create a test case that enter the loop. That would be a good enough test case for me. Have a look at the extern type inference test cases that already exist. Since you can just pick the type variables the extern type use, you should be able to create a conflict that requires this loop to fix. | influxdata-flux | go |
@@ -368,6 +368,11 @@ This initializes all modules such as audio, IAccessible, keyboard, mouse, and GU
wxLang=locale.FindLanguageInfo(lang.split('_')[0])
if hasattr(sys,'frozen'):
locale.AddCatalogLookupPathPrefix(os.path.join(os.getcwdu(),"locale"))
+ # #8064: Wx might know the language, but may not actually contain a translation database for that language.
+ # If we try to initialize this language, wx will show a warning dialog.
+ # Therefore treat this situation like wx not knowing the language at all.
+ if not locale.IsAvailable(wxLang.Language):
+ wxLang=None
if wxLang:
try:
locale.Init(wxLang.Language) | 1 | # -*- coding: UTF-8 -*-
#core.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2018 NV Access Limited, Aleksey Sadovoy, Christopher Toth, Joseph Lee, Peter Vágner, Derek Riemer, Babbage B.V., Zahari Yurukov
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""NVDA core"""
# Do this first to initialise comtypes.client.gen_dir and the comtypes.gen search path.
import comtypes.client
# Append our comInterfaces directory to the comtypes.gen search path.
import comtypes.gen
import comInterfaces
comtypes.gen.__path__.append(comInterfaces.__path__[0])
#Apply several monky patches to comtypes
import comtypesMonkeyPatches
import sys
import winVersion
import thread
import nvwave
import os
import time
import ctypes
import logHandler
import globalVars
from logHandler import log
import addonHandler
import extensionPoints
import extensionPoints
# inform those who want to know that NVDA has finished starting up.
postNvdaStartup = extensionPoints.Action()
PUMP_MAX_DELAY = 10
#: The thread identifier of the main thread.
mainThreadId = thread.get_ident()
#: Notifies when a window message has been received by NVDA.
#: This allows components to perform an action when several system events occur,
#: such as power, screen orientation and hardware changes.
#: Handlers are called with three arguments.
#: @param msg: The window message.
#: @type msg: int
#: @param wParam: Additional message information.
#: @type wParam: int
#: @param lParam: Additional message information.
#: @type lParam: int
post_windowMessageReceipt = extensionPoints.Action()
_pump = None
_isPumpPending = False
def doStartupDialogs():
import config
import gui
# Translators: The title of the dialog to tell users that there are erros in the configuration file.
if config.conf.baseConfigError:
import wx
gui.messageBox(
# Translators: A message informing the user that there are errors in the configuration file.
_("Your configuration file contains errors. "
"Your configuration has been reset to factory defaults.\n"
"More details about the errors can be found in the log file."),
# Translators: The title of the dialog to tell users that there are errors in the configuration file.
_("Configuration File Error"),
wx.OK | wx.ICON_EXCLAMATION)
if config.conf["general"]["showWelcomeDialogAtStartup"]:
gui.WelcomeDialog.run()
if config.conf["speechViewer"]["showSpeechViewerAtStartup"]:
gui.mainFrame.onToggleSpeechViewerCommand(evt=None)
import inputCore
if inputCore.manager.userGestureMap.lastUpdateContainedError:
import wx
gui.messageBox(_("Your gesture map file contains errors.\n"
"More details about the errors can be found in the log file."),
_("gesture map File Error"), wx.OK|wx.ICON_EXCLAMATION)
if not globalVars.appArgs.secure and not config.isAppX and not config.conf['update']['askedAllowUsageStats']:
gui.runScriptModalDialog(gui.AskAllowUsageStatsDialog(None))
def restart(disableAddons=False, debugLogging=False):
"""Restarts NVDA by starting a new copy with -r."""
if globalVars.appArgs.launcher:
import wx
globalVars.exitCode=3
wx.GetApp().ExitMainLoop()
return
import subprocess
import winUser
import shellapi
options=[]
if "-r" not in sys.argv:
options.append("-r")
try:
sys.argv.remove('--disable-addons')
except ValueError:
pass
try:
sys.argv.remove('--debug-logging')
except ValueError:
pass
if disableAddons:
options.append('--disable-addons')
if debugLogging:
options.append('--debug-logging')
try:
sys.argv.remove("--ease-of-access")
except ValueError:
pass
shellapi.ShellExecute(None, None,
sys.executable.decode("mbcs"),
subprocess.list2cmdline(sys.argv + options).decode("mbcs"),
None,
# #4475: ensure that the first window of the new process is not hidden by providing SW_SHOWNORMAL
winUser.SW_SHOWNORMAL)
def resetConfiguration(factoryDefaults=False):
"""Loads the configuration, installs the correct language support and initialises audio so that it will use the configured synth and speech settings.
"""
import config
import braille
import brailleInput
import speech
import languageHandler
import inputCore
log.debug("Terminating braille")
braille.terminate()
log.debug("Terminating brailleInput")
brailleInput.terminate()
log.debug("terminating speech")
speech.terminate()
log.debug("terminating addonHandler")
addonHandler.terminate()
log.debug("Reloading config")
config.conf.reset(factoryDefaults=factoryDefaults)
logHandler.setLogLevelFromConfig()
#Language
lang = config.conf["general"]["language"]
log.debug("setting language to %s"%lang)
languageHandler.setLanguage(lang)
# Addons
addonHandler.initialize()
#Speech
log.debug("initializing speech")
speech.initialize()
#braille
log.debug("Initializing brailleInput")
brailleInput.initialize()
log.debug("Initializing braille")
braille.initialize()
log.debug("Reloading user and locale input gesture maps")
inputCore.manager.loadUserGestureMap()
inputCore.manager.loadLocaleGestureMap()
import audioDucking
if audioDucking.isAudioDuckingSupported():
audioDucking.handlePostConfigProfileSwitch()
log.info("Reverted to saved configuration")
def _setInitialFocus():
"""Sets the initial focus if no focus event was received at startup.
"""
import eventHandler
import api
if eventHandler.lastQueuedFocusObject:
# The focus has already been set or a focus event is pending.
return
try:
focus = api.getDesktopObject().objectWithFocus()
if focus:
eventHandler.queueEvent('gainFocus', focus)
except:
log.exception("Error retrieving initial focus")
def main():
"""NVDA's core main loop.
This initializes all modules such as audio, IAccessible, keyboard, mouse, and GUI. Then it initialises the wx application object and sets up the core pump, which checks the queues and executes functions when requested. Finally, it starts the wx main loop.
"""
log.debug("Core starting")
ctypes.windll.user32.SetProcessDPIAware()
import config
if not globalVars.appArgs.configPath:
globalVars.appArgs.configPath=config.getUserDefaultConfigPath(useInstalledPathIfExists=globalVars.appArgs.launcher)
#Initialize the config path (make sure it exists)
config.initConfigPath()
log.info("Config dir: %s"%os.path.abspath(globalVars.appArgs.configPath))
log.debug("loading config")
import config
config.initialize()
if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]:
try:
nvwave.playWaveFile("waves\\start.wav")
except:
pass
logHandler.setLogLevelFromConfig()
try:
lang = config.conf["general"]["language"]
import languageHandler
log.debug("setting language to %s"%lang)
languageHandler.setLanguage(lang)
except:
log.warning("Could not set language to %s"%lang)
import versionInfo
log.info("NVDA version %s" % versionInfo.version)
log.info("Using Windows version %s" % winVersion.winVersionText)
log.info("Using Python version %s"%sys.version)
log.info("Using comtypes version %s"%comtypes.__version__)
# Set a reasonable timeout for any socket connections NVDA makes.
import socket
socket.setdefaulttimeout(10)
log.debug("Initializing add-ons system")
addonHandler.initialize()
if globalVars.appArgs.disableAddons:
log.info("Add-ons are disabled. Restart NVDA to enable them.")
import appModuleHandler
log.debug("Initializing appModule Handler")
appModuleHandler.initialize()
import NVDAHelper
log.debug("Initializing NVDAHelper")
NVDAHelper.initialize()
import speechDictHandler
log.debug("Speech Dictionary processing")
speechDictHandler.initialize()
import speech
log.debug("Initializing speech")
speech.initialize()
if not globalVars.appArgs.minimal and (time.time()-globalVars.startTime)>5:
log.debugWarning("Slow starting core (%.2f sec)" % (time.time()-globalVars.startTime))
# Translators: This is spoken when NVDA is starting.
speech.speakMessage(_("Loading NVDA. Please wait..."))
import wx
# wxPython 4 no longer has either of these constants (despite the documentation saying so), some add-ons may rely on
# them so we add it back into wx. https://wxpython.org/Phoenix/docs/html/wx.Window.html#wx.Window.Centre
wx.CENTER_ON_SCREEN = wx.CENTRE_ON_SCREEN = 0x2
log.info("Using wx version %s"%wx.version())
class App(wx.App):
def OnAssert(self,file,line,cond,msg):
message="{file}, line {line}:\nassert {cond}: {msg}".format(file=file,line=line,cond=cond,msg=msg)
log.debugWarning(message,codepath="WX Widgets",stack_info=True)
app = App(redirect=False)
# We support queryEndSession events, but in general don't do anything for them.
# However, when running as a Windows Store application, we do want to request to be restarted for updates
def onQueryEndSession(evt):
if config.isAppX:
# Automatically restart NVDA on Windows Store update
ctypes.windll.kernel32.RegisterApplicationRestart(None,0)
app.Bind(wx.EVT_QUERY_END_SESSION, onQueryEndSession)
def onEndSession(evt):
# NVDA will be terminated as soon as this function returns, so save configuration if appropriate.
config.saveOnExit()
speech.cancelSpeech()
if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]:
try:
nvwave.playWaveFile("waves\\exit.wav",async=False)
except:
pass
log.info("Windows session ending")
app.Bind(wx.EVT_END_SESSION, onEndSession)
log.debug("Initializing braille input")
import brailleInput
brailleInput.initialize()
import braille
log.debug("Initializing braille")
braille.initialize()
import displayModel
log.debug("Initializing displayModel")
displayModel.initialize()
log.debug("Initializing GUI")
import gui
gui.initialize()
import audioDucking
if audioDucking.isAudioDuckingSupported():
# the GUI mainloop must be running for this to work so delay it
wx.CallAfter(audioDucking.initialize)
# #3763: In wxPython 3, the class name of frame windows changed from wxWindowClassNR to wxWindowNR.
# NVDA uses the main frame to check for and quit another instance of NVDA.
# To remain compatible with older versions of NVDA, create our own wxWindowClassNR.
# We don't need to do anything else because wx handles WM_QUIT for all windows.
import windowUtils
class MessageWindow(windowUtils.CustomWindow):
className = u"wxWindowClassNR"
#Just define these constants here, so we don't have to import win32con
WM_POWERBROADCAST = 0x218
WM_DISPLAYCHANGE = 0x7e
PBT_APMPOWERSTATUSCHANGE = 0xA
UNKNOWN_BATTERY_STATUS = 0xFF
AC_ONLINE = 0X1
NO_SYSTEM_BATTERY = 0X80
#States for screen orientation
ORIENTATION_NOT_INITIALIZED = 0
ORIENTATION_PORTRAIT = 1
ORIENTATION_LANDSCAPE = 2
def __init__(self, windowName=None):
super(MessageWindow, self).__init__(windowName)
self.oldBatteryStatus = None
self.orientationStateCache = self.ORIENTATION_NOT_INITIALIZED
self.orientationCoordsCache = (0,0)
self.handlePowerStatusChange()
def windowProc(self, hwnd, msg, wParam, lParam):
post_windowMessageReceipt.notify(msg=msg, wParam=wParam, lParam=lParam)
if msg == self.WM_POWERBROADCAST and wParam == self.PBT_APMPOWERSTATUSCHANGE:
self.handlePowerStatusChange()
elif msg == self.WM_DISPLAYCHANGE:
self.handleScreenOrientationChange(lParam)
def handleScreenOrientationChange(self, lParam):
import ui
import winUser
# Resolution detection comes from an article found at https://msdn.microsoft.com/en-us/library/ms812142.aspx.
#The low word is the width and hiword is height.
width = winUser.LOWORD(lParam)
height = winUser.HIWORD(lParam)
self.orientationCoordsCache = (width,height)
if width > height:
# If the height and width are the same, it's actually a screen flip, and we do want to alert of those!
if self.orientationStateCache == self.ORIENTATION_LANDSCAPE and self.orientationCoordsCache != (width,height):
return
#Translators: The screen is oriented so that it is wider than it is tall.
ui.message(_("Landscape" ))
self.orientationStateCache = self.ORIENTATION_LANDSCAPE
else:
if self.orientationStateCache == self.ORIENTATION_PORTRAIT and self.orientationCoordsCache != (width,height):
return
#Translators: The screen is oriented in such a way that the height is taller than it is wide.
ui.message(_("Portrait"))
self.orientationStateCache = self.ORIENTATION_PORTRAIT
def handlePowerStatusChange(self):
#Mostly taken from script_say_battery_status, but modified.
import ui
import winKernel
sps = winKernel.SYSTEM_POWER_STATUS()
if not winKernel.GetSystemPowerStatus(sps) or sps.BatteryFlag is self.UNKNOWN_BATTERY_STATUS:
return
if sps.BatteryFlag & self.NO_SYSTEM_BATTERY:
return
if self.oldBatteryStatus is None:
#Just initializing the cache, do not report anything.
self.oldBatteryStatus = sps.ACLineStatus
return
if sps.ACLineStatus == self.oldBatteryStatus:
#Sometimes, this double fires. This also fires when the battery level decreases by 3%.
return
self.oldBatteryStatus = sps.ACLineStatus
if sps.ACLineStatus & self.AC_ONLINE:
#Translators: Reported when the battery is plugged in, and now is charging.
ui.message(_("Charging battery. %d percent") % sps.BatteryLifePercent)
else:
#Translators: Reported when the battery is no longer plugged in, and now is not charging.
ui.message(_("Not charging battery. %d percent") %sps.BatteryLifePercent)
messageWindow = MessageWindow(unicode(versionInfo.name))
# initialize wxpython localization support
locale = wx.Locale()
lang=languageHandler.getLanguage()
wxLang=locale.FindLanguageInfo(lang)
if not wxLang and '_' in lang:
wxLang=locale.FindLanguageInfo(lang.split('_')[0])
if hasattr(sys,'frozen'):
locale.AddCatalogLookupPathPrefix(os.path.join(os.getcwdu(),"locale"))
if wxLang:
try:
locale.Init(wxLang.Language)
except:
log.error("Failed to initialize wx locale",exc_info=True)
else:
log.debugWarning("wx does not support language %s" % lang)
import api
import winUser
import NVDAObjects.window
desktopObject=NVDAObjects.window.Window(windowHandle=winUser.getDesktopWindow())
api.setDesktopObject(desktopObject)
api.setFocusObject(desktopObject)
api.setNavigatorObject(desktopObject)
api.setMouseObject(desktopObject)
import JABHandler
log.debug("initializing Java Access Bridge support")
try:
JABHandler.initialize()
except NotImplementedError:
log.warning("Java Access Bridge not available")
except:
log.error("Error initializing Java Access Bridge support", exc_info=True)
import winConsoleHandler
log.debug("Initializing winConsole support")
winConsoleHandler.initialize()
import UIAHandler
log.debug("Initializing UIA support")
try:
UIAHandler.initialize()
except NotImplementedError:
log.warning("UIA not available")
except:
log.error("Error initializing UIA support", exc_info=True)
import IAccessibleHandler
log.debug("Initializing IAccessible support")
IAccessibleHandler.initialize()
log.debug("Initializing input core")
import inputCore
inputCore.initialize()
import keyboardHandler
log.debug("Initializing keyboard handler")
keyboardHandler.initialize()
import mouseHandler
log.debug("initializing mouse handler")
mouseHandler.initialize()
import touchHandler
log.debug("Initializing touchHandler")
try:
touchHandler.initialize()
except NotImplementedError:
pass
import globalPluginHandler
log.debug("Initializing global plugin handler")
globalPluginHandler.initialize()
if globalVars.appArgs.install or globalVars.appArgs.installSilent:
import gui.installerGui
wx.CallAfter(gui.installerGui.doSilentInstall,startAfterInstall=not globalVars.appArgs.installSilent)
elif globalVars.appArgs.portablePath and (globalVars.appArgs.createPortable or globalVars.appArgs.createPortableSilent):
import gui.installerGui
wx.CallAfter(gui.installerGui.doCreatePortable,portableDirectory=globalVars.appArgs.portablePath,
silent=globalVars.appArgs.createPortableSilent,startAfterCreate=not globalVars.appArgs.createPortableSilent)
elif not globalVars.appArgs.minimal:
try:
# Translators: This is shown on a braille display (if one is connected) when NVDA starts.
braille.handler.message(_("NVDA started"))
except:
log.error("", exc_info=True)
if globalVars.appArgs.launcher:
gui.LauncherDialog.run()
# LauncherDialog will call doStartupDialogs() afterwards if required.
else:
wx.CallAfter(doStartupDialogs)
import queueHandler
# Queue the handling of initial focus,
# as API handlers might need to be pumped to get the first focus event.
queueHandler.queueFunction(queueHandler.eventQueue, _setInitialFocus)
import watchdog
import baseObject
# Doing this here is a bit ugly, but we don't want these modules imported
# at module level, including wx.
log.debug("Initializing core pump")
class CorePump(gui.NonReEntrantTimer):
"Checks the queues and executes functions."
def run(self):
global _isPumpPending
_isPumpPending = False
watchdog.alive()
try:
if touchHandler.handler:
touchHandler.handler.pump()
JABHandler.pumpAll()
IAccessibleHandler.pumpAll()
queueHandler.pumpAll()
mouseHandler.pumpAll()
braille.pumpAll()
except:
log.exception("errors in this core pump cycle")
baseObject.AutoPropertyObject.invalidateCaches()
watchdog.asleep()
if _isPumpPending and not _pump.IsRunning():
# #3803: Another pump was requested during this pump execution.
# As our pump is not re-entrant, schedule another pump.
_pump.Start(PUMP_MAX_DELAY, True)
global _pump
_pump = CorePump()
requestPump()
log.debug("Initializing watchdog")
watchdog.initialize()
try:
import updateCheck
except RuntimeError:
updateCheck=None
log.debug("Update checking not supported")
else:
log.debug("initializing updateCheck")
updateCheck.initialize()
log.info("NVDA initialized")
postNvdaStartup.notify()
log.debug("entering wx application main loop")
app.MainLoop()
log.info("Exiting")
if updateCheck:
_terminate(updateCheck)
_terminate(watchdog)
_terminate(globalPluginHandler, name="global plugin handler")
_terminate(gui)
config.saveOnExit()
try:
if globalVars.focusObject and hasattr(globalVars.focusObject,"event_loseFocus"):
log.debug("calling lose focus on object with focus")
globalVars.focusObject.event_loseFocus()
except:
log.exception("Lose focus error")
try:
speech.cancelSpeech()
except:
pass
import treeInterceptorHandler
_terminate(treeInterceptorHandler)
_terminate(IAccessibleHandler, name="IAccessible support")
_terminate(UIAHandler, name="UIA support")
_terminate(winConsoleHandler, name="winConsole support")
_terminate(JABHandler, name="Java Access Bridge support")
_terminate(appModuleHandler, name="app module handler")
_terminate(NVDAHelper)
_terminate(touchHandler)
_terminate(keyboardHandler, name="keyboard handler")
_terminate(mouseHandler)
_terminate(inputCore)
_terminate(brailleInput)
_terminate(braille)
_terminate(speech)
_terminate(addonHandler)
if not globalVars.appArgs.minimal and config.conf["general"]["playStartAndExitSounds"]:
try:
nvwave.playWaveFile("waves\\exit.wav",async=False)
except:
pass
# #5189: Destroy the message window as late as possible
# so new instances of NVDA can find this one even if it freezes during exit.
messageWindow.destroy()
log.debug("core done")
def _terminate(module, name=None):
if name is None:
name = module.__name__
log.debug("Terminating %s" % name)
try:
module.terminate()
except:
log.exception("Error terminating %s" % name)
def requestPump():
"""Request a core pump.
This will perform any queued activity.
It is delayed slightly so that queues can implement rate limiting,
filter extraneous events, etc.
"""
global _isPumpPending
if not _pump or _isPumpPending:
return
_isPumpPending = True
if thread.get_ident() == mainThreadId:
_pump.Start(PUMP_MAX_DELAY, True)
return
# This isn't the main thread. wx timers cannot be run outside the main thread.
# Therefore, Have wx start it in the main thread with a CallAfter.
import wx
wx.CallAfter(_pump.Start,PUMP_MAX_DELAY, True)
def callLater(delay, callable, *args, **kwargs):
"""Call a callable once after the specified number of milliseconds.
As the call is executed within NVDA's core queue, it is possible that execution will take place slightly after the requested time.
This function should never be used to execute code that brings up a modal UI as it will cause NVDA's core to block.
This function can be safely called from any thread.
"""
import wx
if thread.get_ident() == mainThreadId:
return wx.CallLater(delay, _callLaterExec, callable, args, kwargs)
else:
return wx.CallAfter(wx.CallLater,delay, _callLaterExec, callable, args, kwargs)
def _callLaterExec(callable, args, kwargs):
import queueHandler
queueHandler.queueFunction(queueHandler.eventQueue,callable,*args, **kwargs)
| 1 | 23,014 | It might make sense to log this. | nvaccess-nvda | py |
@@ -340,12 +340,10 @@ module Blacklight
# too. These model names should not be `#dup`'ed or we might break ActiveModel::Naming.
def deep_copy
deep_dup.tap do |copy|
- copy.repository_class = self.repository_class
- copy.response_model = self.response_model
- copy.document_model = self.document_model
- copy.document_presenter_class = self.document_presenter_class
- copy.search_builder_class = self.search_builder_class
- copy.facet_paginator_class = self.facet_paginator_class
+ %w(repository_class response_model document_model document_presenter_class search_builder_class facet_paginator_class).each do |klass|
+ # Don't copy if nil, so as not to prematurely autoload default classes
+ copy.send("#{klass}=", send(klass)) unless fetch(klass.to_sym, nil).nil?
+ end
end
end
alias_method :inheritable_copy, :deep_copy | 1 | module Blacklight
##
# Blacklight::Configuration holds the configuration for a Blacklight::Controller, including
# fields to display, facets to show, sort options, and search fields.
class Configuration < OpenStructWithHashAccess
require 'blacklight/configuration/view_config'
require 'blacklight/configuration/tool_config'
# XXX this isn't very pretty, but it works.
require 'blacklight/configuration/fields'
require 'blacklight/configuration/field'
require 'blacklight/configuration/solr_field'
require 'blacklight/configuration/search_field'
require 'blacklight/configuration/facet_field'
require 'blacklight/configuration/sort_field'
include Fields
extend Deprecation
self.deprecation_horizon = 'blacklight 6.0'
# Set up Blacklight::Configuration.default_values to contain
# the basic, required Blacklight fields
class << self
def default_values
@default_values ||= begin
{
##
# === Search request configuration
##
# HTTP method to use when making requests to solr; valid
# values are :get and :post.
http_method: :get,
# The solr request handler ('qt') to use for search requests
qt: 'search',
# The path to send requests to solr.
solr_path: 'select',
# Default values of parameters to send with every search request
default_solr_params: {},
## deprecated; use add_facet_field :include_in_request instead;
# if this is configured true, all facets will be included in the solr request
# unless explicitly disabled.
add_facet_fields_to_solr_request: false,
## deprecated; use add_index_field :include_in_request instead;
# if this is configured true, all show and index will be included in the solr request
# unless explicitly disabled.
add_field_configuration_to_solr_request: false,
##
# === Single document request configuration
##
# The solr rqeuest handler to use when requesting only a single document
document_solr_request_handler: 'document',
# THe path to send single document requests to solr
document_solr_path: nil,
document_unique_id_param: :id,
# Default values of parameters to send when requesting a single document
default_document_solr_params: {
## Blacklight provides these settings in the /document request handler
## by default, we just ask for all fields.
#fl: '*',
## this is a fancy way to say "find the document by id using
## the value in the id query parameter"
#q: "{!raw f=#{unique_key} v=$id}",
## disable features we don't need
#facet: false,
#rows: 1
},
##
# == Response models
## Class for sending and receiving requests from a search index
repository_class: nil,
## Class for converting Blacklight parameters to request parameters for the repository_class
search_builder_class: nil,
# model that maps index responses to the blacklight response model
response_model: nil,
# the model to use for each response document
document_model: nil,
# document presenter class used by helpers and views
document_presenter_class: nil,
# Class for paginating long lists of facet fields
facet_paginator_class: nil,
# repository connection configuration
connection_config: nil,
##
# == Blacklight view configuration
##
navbar: OpenStructWithHashAccess.new(partials: { }),
# General configuration for all views
index: ViewConfig::Index.new(
# solr field to use to render a document title
title_field: nil,
# solr field to use to render format-specific partials
display_type_field: 'format',
# partials to render for each document(see #render_document_partials)
partials: [:index_header, :thumbnail, :index],
document_actions: NestedOpenStructWithHashAccess.new(ToolConfig),
collection_actions: NestedOpenStructWithHashAccess.new(ToolConfig),
# what field, if any, to use to render grouped results
group: false,
# additional response formats for search results
respond_to: OpenStructWithHashAccess.new
),
# Additional configuration when displaying a single document
show: ViewConfig::Show.new(
# default route parameters for 'show' requests
# set this to a hash with additional arguments to merge into
# the route, or set `controller: :current` to route to the
# current controller.
route: nil,
# partials to render for each document(see #render_document_partials)
partials: [:show_header, :show],
document_actions: NestedOpenStructWithHashAccess.new(ToolConfig)
),
# Configurations for specific types of index views
view: NestedOpenStructWithHashAccess.new(ViewConfig,
'list',
atom: {
if: false, # by default, atom should not show up as an alternative view
partials: [:document]
},
rss: {
if: false, # by default, rss should not show up as an alternative view
partials: [:document]
}),
#
# These fields are created and managed below by `defined_field_access`
# facet_fields
# index_fields
# show_fields
# sort_fields
# search_fields
##
# === Blacklight behavior configuration
##
# Maxiumum number of spelling suggestions to offer
spell_max: 5,
# Maximum number of results to show per page
max_per_page: 100,
# Options for the user for number of results to show per page
per_page: [10,20,50,100],
default_per_page: nil,
# how many searches to save in session history
# (TODO: move the value into the configuration?)
search_history_window: Blacklight::Catalog::SearchHistoryWindow,
default_facet_limit: 10
}
end
end
end
##
# Create collections of solr field configurations.
# This will create array-like accessor methods for
# the given field, and an #add_x_field convenience
# method for adding new fields to the configuration
# facet fields
define_field_access :facet_field
# solr fields to display on search results
define_field_access :index_field
# solr fields to display when showing single documents
define_field_access :show_field
# solr "fields" to use for scoping user search queries
# to particular fields
define_field_access :search_field
# solr fields to use for sorting results
define_field_access :sort_field
def initialize(*args)
super(*args)
initialize_default_values!
yield(self) if block_given?
self
end
##
# Initialize default values from the class attribute
def initialize_default_values!
Marshal.load(Marshal.dump(self.class.default_values)).each do |k, v|
self[k] ||= v
end
end
def document_model
super || ::SolrDocument
end
alias_method :solr_document_model, :document_model
# only here to support alias_method
def document_model= *args
super
end
alias_method :solr_document_model=, :document_model=
deprecation_deprecate solr_document_model: :document_model, :solr_document_model= => :document_model=
def document_presenter_class
super || Blacklight::DocumentPresenter
end
def response_model
super || Blacklight::SolrResponse
end
alias_method :solr_response_model, :response_model
# only here to support alias_method
def response_model= *args
super
end
alias_method :solr_response_model=, :response_model=
deprecation_deprecate solr_response_model: :response_model, :solr_response_model= => :response_model=
def repository_class
super || Blacklight::SolrRepository
end
def connection_config
super || Blacklight.connection_config
end
def search_builder_class
super || locate_search_builder_class
end
def locate_search_builder_class
::SearchBuilder
rescue NameError
Deprecation.warn(Configuration, "Your application is missing the SearchBuilder. Have you run `rails generate blacklight:search_builder`? Falling back to Blacklight::Solr::SearchBuilder")
Blacklight::Solr::SearchBuilder
end
def facet_paginator_class
super || Blacklight::Solr::FacetPaginator
end
def default_per_page
super || per_page.first
end
##
# DSL helper
def configure
yield self if block_given?
self
end
##
# Returns default search field, used for simpler display in history, etc.
# if not set, defaults to first defined search field
def default_search_field
field = super
field ||= search_fields.values.find { |f| f.default == true }
field ||= search_fields.values.first
field
end
##
# Returns default sort field, used for simpler display in history, etc.
# if not set, defaults to first defined sort field
def default_sort_field
field = super
field ||= sort_fields.values.find { |f| f.default == true }
field ||= sort_fields.values.first
field
end
def default_title_field
document_model.unique_key || 'id'
end
##
# Add any configured facet fields to the default solr parameters hash
# @overload add_facet_fields_to_solr_request!
# add all facet fields to the solr request
# @overload add_facet_fields_to_solr_request! field, field, field
# @param [Symbol] Field names to add to the solr request
# @param [Symbol]
# @param [Symbol]
def add_facet_fields_to_solr_request! *fields
if fields.empty?
self.add_facet_fields_to_solr_request = true
else
facet_fields.slice(*fields).each do |k,v|
v.include_in_request = true
end
end
end
##
# Add any configured facet fields to the default solr parameters hash
# @overload add_field_configuration_to_solr_request!
# add all index, show, and facet fields to the solr request
# @overload add_field_configuration_to_solr_request! field, field, field
# @param [Symbol] Field names to add to the solr request
# @param [Symbol]
# @param [Symbol]
def add_field_configuration_to_solr_request! *fields
if fields.empty?
self.add_field_configuration_to_solr_request = true
else
index_fields.slice(*fields).each do |k,v|
v.include_in_request = true
end
show_fields.slice(*fields).each do |k,v|
v.include_in_request = true
end
facet_fields.slice(*fields).each do |k,v|
v.include_in_request = true
end
end
end
##
# Deprecated. Get the list of facet fields to explicitly
# add to the solr request
def facet_fields_to_add_to_solr
facet_fields.select { |k,v| v.include_in_request }
.reject { |k,v| v[:query] || v[:pivot] }
.map { |k,v| v.field }
end
deprecation_deprecate :facet_fields_to_add_to_solr
##
# Provide a 'deep copy' of Blacklight::Configuration that can be modifyed without affecting
# the original Blacklight::Configuration instance.
#
# The Rails 3.x version only copies hashes, and ignores arrays and similar structures
if ::Rails.version < "4.0"
def deep_copy
Marshal.load(Marshal.dump(self))
end
alias_method :inheritable_copy, :deep_copy
else
##
# Rails 4.x provides `#deep_dup`, but it aggressively `#dup`'s class names
# too. These model names should not be `#dup`'ed or we might break ActiveModel::Naming.
def deep_copy
deep_dup.tap do |copy|
copy.repository_class = self.repository_class
copy.response_model = self.response_model
copy.document_model = self.document_model
copy.document_presenter_class = self.document_presenter_class
copy.search_builder_class = self.search_builder_class
copy.facet_paginator_class = self.facet_paginator_class
end
end
alias_method :inheritable_copy, :deep_copy
end
##
# Get a view configuration for the given view type
# including default values from the index configuration
def view_config view_type
if view_type == :show
self.index.merge self.show
else
self.index.merge view.fetch(view_type, {})
end
end
##
# Add a partial to the tools when rendering a document.
# @param partial [String] the name of the document partial
# @param opts [Hash]
# @option opts [Symbol,Proc] :if render this action if the method identified by the symbol or the proc evaluates to true.
# The proc will receive the action configuration and the document or documents for the action.
# @option opts [Symbol,Proc] :unless render this action unless the method identified by the symbol or the proc evaluates to true
# The proc will receive the action configuration and the document or documents for the action.
def add_show_tools_partial(name, opts = {})
opts[:partial] ||= 'document_action'
add_action(show.document_actions, name, opts)
end
##
# Add a tool for the search result list itself
# @param partial [String] the name of the document partial
# @param opts [Hash]
# @option opts [Symbol,Proc] :if render this action if the method identified by the symbol or the proc evaluates to true.
# The proc will receive the action configuration and the document or documents for the action.
# @option opts [Symbol,Proc] :unless render this action unless the method identified by the symbol or the proc evaluates to true
# The proc will receive the action configuration and the document or documents for the action.
def add_results_collection_tool(name, opts = {})
add_action(index.collection_actions, name, opts)
end
##
# Add a partial to the tools for each document in the search results.
# @param partial [String] the name of the document partial
# @param opts [Hash]
# @option opts [Symbol,Proc] :if render this action if the method identified by the symbol or the proc evaluates to true.
# The proc will receive the action configuration and the document or documents for the action.
# @option opts [Symbol,Proc] :unless render this action unless the method identified by the symbol or the proc evaluates to true
# The proc will receive the action configuration and the document or documents for the action.
def add_results_document_tool(name, opts = {})
add_action(index.document_actions, name, opts)
end
##
# Add a partial to the header navbar
# @param partial [String] the name of the document partial
# @param opts [Hash]
# @option opts [Symbol,Proc] :if render this action if the method identified by the symbol or the proc evaluates to true.
# The proc will receive the action configuration and the document or documents for the action.
# @option opts [Symbol,Proc] :unless render this action unless the method identified by the symbol or the proc evaluates to true
# The proc will receive the action configuration and the document or documents for the action.
def add_nav_action name, opts = {}
add_action(navbar.partials, name, opts)
end
private
def add_action config_hash, name, opts
config = Blacklight::Configuration::ToolConfig.new opts
config.name = name
if block_given?
yield config
end
config_hash[name] = config
end
end
end
| 1 | 6,032 | Line is too long. [84/80] | projectblacklight-blacklight | rb |
@@ -22,6 +22,7 @@ from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class Options(object):
+ KEY = "goog:chromeOptions"
def __init__(self):
self._binary_location = '' | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import os
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class Options(object):
def __init__(self):
self._binary_location = ''
self._arguments = []
self._extension_files = []
self._extensions = []
self._experimental_options = {}
self._debugger_address = None
@property
def binary_location(self):
"""
Returns the location of the binary otherwise an empty string
"""
return self._binary_location
@binary_location.setter
def binary_location(self, value):
"""
Allows you to set where the chromium binary lives
:Args:
- value: path to the Chromium binary
"""
self._binary_location = value
@property
def debugger_address(self):
"""
Returns the address of the remote devtools instance
"""
return self._debugger_address
@debugger_address.setter
def debugger_address(self, value):
"""
Allows you to set the address of the remote devtools instance
that the ChromeDriver instance will try to connect to during an
active wait.
:Args:
- value: address of remote devtools instance if any (hostname[:port])
"""
self._debugger_address = value
@property
def arguments(self):
"""
Returns a list of arguments needed for the browser
"""
return self._arguments
def add_argument(self, argument):
"""
Adds an argument to the list
:Args:
- Sets the arguments
"""
if argument:
self._arguments.append(argument)
else:
raise ValueError("argument can not be null")
@property
def extensions(self):
"""
Returns a list of encoded extensions that will be loaded into chrome
"""
encoded_extensions = []
for ext in self._extension_files:
file_ = open(ext, 'rb')
# Should not use base64.encodestring() which inserts newlines every
# 76 characters (per RFC 1521). Chromedriver has to remove those
# unnecessary newlines before decoding, causing performance hit.
encoded_extensions.append(base64.b64encode(file_.read()).decode('UTF-8'))
file_.close()
return encoded_extensions + self._extensions
def add_extension(self, extension):
"""
Adds the path to the extension to a list that will be used to extract it
to the ChromeDriver
:Args:
- extension: path to the \*.crx file
"""
if extension:
extension_to_add = os.path.abspath(os.path.expanduser(extension))
if os.path.exists(extension_to_add):
self._extension_files.append(extension_to_add)
else:
raise IOError("Path to the extension doesn't exist")
else:
raise ValueError("argument can not be null")
def add_encoded_extension(self, extension):
"""
Adds Base64 encoded string with extension data to a list that will be used to extract it
to the ChromeDriver
:Args:
- extension: Base64 encoded string with extension data
"""
if extension:
self._extensions.append(extension)
else:
raise ValueError("argument can not be null")
@property
def experimental_options(self):
"""
Returns a dictionary of experimental options for chrome.
"""
return self._experimental_options
def add_experimental_option(self, name, value):
"""
Adds an experimental option which is passed to chrome.
Args:
name: The experimental option name.
value: The option value.
"""
self._experimental_options[name] = value
def to_capabilities(self):
"""
Creates a capabilities with all the options that have been set and
returns a dictionary with everything
"""
chrome = DesiredCapabilities.CHROME.copy()
chrome_options = self.experimental_options.copy()
chrome_options["extensions"] = self.extensions
if self.binary_location:
chrome_options["binary"] = self.binary_location
chrome_options["args"] = self.arguments
if self.debugger_address:
chrome_options["debuggerAddress"] = self.debugger_address
chrome["goog:chromeOptions"] = chrome_options
return chrome
| 1 | 14,945 | nice touch since Google likes to change things every now and then. | SeleniumHQ-selenium | java |
@@ -0,0 +1,19 @@
+# Provides a helper utility for loading branding configs.
+module Branding
+
+ module_function
+
+ # Loads branding config from YAML file.
+ #
+ # @param keys [Array<Object>] A list of the keys to return configs for.
+ #
+ # @example Return a value
+ # Branding.fetch(:settings, :should_work) # => true
+ # Branding.fetch(:settings, :email) # => '[email protected]'
+ # Branding.fetch(:settings, :missing) # => nil
+ # @return [Object] The value of the config
+ def fetch(*keys)
+ keys = keys.map(&:to_sym)
+ Rails.configuration.branding.dig(*keys)
+ end
+end | 1 | 1 | 17,781 | Thank you this is so much nicer than the old `Rails.configuration.branding['blah']['blah']['blah']` :) | DMPRoadmap-roadmap | rb |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.