patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -0,0 +1,3 @@ +ActiveAdmin.register LicensePermission do + permit_params :name, :description, :icon +end
1
1
9,107
Was this all that was needed to add CRUD operations for the LicensePermission model? :-)
blackducksoftware-ohloh-ui
rb
@@ -465,7 +465,7 @@ def installAddon(parentWindow, addonPath): prevAddon = None for addon in addonHandler.getAvailableAddons(): - if not addon.isPendingRemove and bundle.name==addon.manifest['name']: + if not addon.isPendingRemove and bundle.name.lower()==addon.manifest['name'].lower(): prevAddon=addon break if prevAddon:
1
#gui/addonGui.py #A part of NonVisual Desktop Access (NVDA) #This file is covered by the GNU General Public License. #See the file COPYING for more details. #Copyright (C) 2012-2018 NV Access Limited, Beqa Gozalishvili, Joseph Lee, Babbage B.V., Ethan Holliger import os import weakref import addonAPIVersion import wx import core import config import gui from addonHandler import addonVersionCheck from logHandler import log import addonHandler import globalVars import buildVersion from . import guiHelper from . import nvdaControls from .dpiScalingHelper import DpiScalingHelperMixin def promptUserForRestart(): # Translators: A message asking the user if they wish to restart NVDA as addons have been added, enabled/disabled or removed. restartMessage = _( "Changes were made to add-ons. " "You must restart NVDA for these changes to take effect. " "Would you like to restart now?" ) # Translators: Title for message asking if the user wishes to restart NVDA as addons have been added or removed. restartTitle = _("Restart NVDA") result = gui.messageBox( message=restartMessage, caption=restartTitle, style=wx.YES | wx.NO | wx.ICON_WARNING ) if wx.YES == result: core.restart() class ConfirmAddonInstallDialog(nvdaControls.MessageDialog): def __init__(self, parent, title, message, showAddonInfoFunction): super(ConfirmAddonInstallDialog, self).__init__( parent, title, message, dialogType=nvdaControls.MessageDialog.DIALOG_TYPE_WARNING ) self._showAddonInfoFunction = showAddonInfoFunction def _addButtons(self, buttonHelper): addonInfoButton = buttonHelper.addButton( self, # Translators: A button in the addon installation warning / blocked dialog which shows # more information about the addon label=_("&About add-on...") ) addonInfoButton.Bind(wx.EVT_BUTTON, lambda evt: self._showAddonInfoFunction()) yesButton = buttonHelper.addButton( self, id=wx.ID_YES, # Translators: A button in the addon installation warning dialog which allows the user to agree to installing # the add-on label=_("&Yes") ) yesButton.SetDefault() yesButton.Bind(wx.EVT_BUTTON, lambda evt: self.EndModal(wx.YES)) noButton = buttonHelper.addButton( self, id=wx.ID_NO, # Translators: A button in the addon installation warning dialog which allows the user to decide not to # install the add-on label=_("&No") ) noButton.Bind(wx.EVT_BUTTON, lambda evt: self.EndModal(wx.NO)) class ErrorAddonInstallDialog(nvdaControls.MessageDialog): def __init__(self, parent, title, message, showAddonInfoFunction): super(ErrorAddonInstallDialog, self).__init__( parent, title, message, dialogType=nvdaControls.MessageDialog.DIALOG_TYPE_ERROR ) self._showAddonInfoFunction = showAddonInfoFunction def _addButtons(self, buttonHelper): addonInfoButton = buttonHelper.addButton( self, # Translators: A button in the addon installation warning / blocked dialog which shows # more information about the addon label=_("&About add-on...") ) addonInfoButton.Bind(wx.EVT_BUTTON, lambda evt: self._showAddonInfoFunction()) okButton = buttonHelper.addButton( self, id=wx.ID_OK, # Translators: A button in the addon installation blocked dialog which will dismiss the dialog. label=_("OK") ) okButton.SetDefault() okButton.Bind(wx.EVT_BUTTON, lambda evt: self.EndModal(wx.OK)) def _showAddonInfo(addon): manifest = addon.manifest # Translators: message shown in the Addon Information dialog. message=[_( "{summary} ({name})\n" "Version: {version}\n" "Author: {author}\n" "Description: {description}\n" ).format(**manifest)] url=manifest.get('url') if url: # Translators: the url part of the About Add-on information message.append(_("URL: {url}").format(url=url)) minimumNVDAVersion = addonAPIVersion.formatForGUI(addon.minimumNVDAVersion) message.append( # Translators: the minimum NVDA version part of the About Add-on information _("Minimum required NVDA version: {}").format(minimumNVDAVersion) ) lastTestedNVDAVersion = addonAPIVersion.formatForGUI(addon.lastTestedNVDAVersion) message.append( # Translators: the last NVDA version tested part of the About Add-on information _("Last NVDA version tested: {}").format(lastTestedNVDAVersion) ) # Translators: title for the Addon Information dialog title=_("Add-on Information") gui.messageBox("\n".join(message), title, wx.OK) class AddonsDialog(wx.Dialog, DpiScalingHelperMixin): @classmethod def _instance(cls): """ type: () -> AddonsDialog return None until this is replaced with a weakref.ref object. Then the instance is retrieved with by treating that object as a callable. """ return None def __new__(cls, *args, **kwargs): instance = AddonsDialog._instance() if instance is None: return super(AddonsDialog, cls).__new__(cls, *args, **kwargs) return instance def __init__(self, parent): if AddonsDialog._instance() is not None: return # #7077: _instance must not be kept alive once the dialog is closed or there can be issues # when add-ons manager reopens or another add-on is installed remotely. AddonsDialog._instance = weakref.ref(self) # Translators: The title of the Addons Dialog title = _("Add-ons Manager") wx.Dialog.__init__(self, parent, title=title) DpiScalingHelperMixin.__init__(self, self.GetHandle()) mainSizer = wx.BoxSizer(wx.VERTICAL) firstTextSizer = wx.BoxSizer(wx.VERTICAL) listAndButtonsSizerHelper = guiHelper.BoxSizerHelper(self, sizer=wx.BoxSizer(wx.HORIZONTAL)) if globalVars.appArgs.disableAddons: # Translators: A message in the add-ons manager shown when all add-ons are disabled. label = _("All add-ons are currently disabled. To enable add-ons you must restart NVDA.") firstTextSizer.Add(wx.StaticText(self, label=label)) # Translators: the label for the installed addons list in the addons manager. entriesLabel = _("Installed Add-ons") firstTextSizer.Add(wx.StaticText(self, label=entriesLabel)) mainSizer.Add( firstTextSizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.TOP|wx.LEFT|wx.RIGHT ) self.addonsList = listAndButtonsSizerHelper.addItem( nvdaControls.AutoWidthColumnListCtrl( parent=self, style=wx.LC_REPORT | wx.LC_SINGLE_SEL, size=self.scaleSize((550, 350)) ) ) # Translators: The label for a column in add-ons list used to identify add-on package name (example: package is OCR). self.addonsList.InsertColumn(0, _("Package"), width=self.scaleSize(150)) # Translators: The label for a column in add-ons list used to identify add-on's running status (example: status is running). self.addonsList.InsertColumn(1, _("Status"), width=self.scaleSize(50)) # Translators: The label for a column in add-ons list used to identify add-on's version (example: version is 0.3). self.addonsList.InsertColumn(2, _("Version"), width=self.scaleSize(50)) # Translators: The label for a column in add-ons list used to identify add-on's author (example: author is NV Access). self.addonsList.InsertColumn(3, _("Author"), width=self.scaleSize(300)) self.addonsList.Bind(wx.EVT_LIST_ITEM_FOCUSED, self.onListItemSelected) # this is the group of buttons that affects the currently selected addon entryButtonsHelper=guiHelper.ButtonHelper(wx.VERTICAL) # Translators: The label for a button in Add-ons Manager dialog to show information about the selected add-on. self.aboutButton = entryButtonsHelper.addButton(self, label=_("&About add-on...")) self.aboutButton.Disable() self.aboutButton.Bind(wx.EVT_BUTTON, self.onAbout) # Translators: The label for a button in Add-ons Manager dialog to show the help for the selected add-on. self.helpButton = entryButtonsHelper.addButton(self, label=_("Add-on &help")) self.helpButton.Disable() self.helpButton.Bind(wx.EVT_BUTTON, self.onHelp) # Translators: The label for a button in Add-ons Manager dialog to enable or disable the selected add-on. self.enableDisableButton = entryButtonsHelper.addButton(self, label=_("&Disable add-on")) self.enableDisableButton.Disable() self.enableDisableButton.Bind(wx.EVT_BUTTON, self.onEnableDisable) # Translators: The label for a button to remove either: # Remove the selected add-on in Add-ons Manager dialog. # Remove a speech dictionary entry. self.removeButton = entryButtonsHelper.addButton(self, label=_("&Remove")) self.removeButton.Disable() self.removeButton.Bind(wx.EVT_BUTTON, self.onRemoveClick) listAndButtonsSizerHelper.addItem(entryButtonsHelper.sizer) mainSizer.Add( listAndButtonsSizerHelper.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL ) # the following buttons are more general and apply regardless of the current selection. generalActions=guiHelper.ButtonHelper(wx.HORIZONTAL) # Translators: The label of a button in Add-ons Manager to open the Add-ons website and get more add-ons. self.getAddonsButton = generalActions.addButton(self, label=_("&Get add-ons...")) self.getAddonsButton.Bind(wx.EVT_BUTTON, self.onGetAddonsClick) # Translators: The label for a button in Add-ons Manager dialog to install an add-on. self.addButton = generalActions.addButton(self, label=_("&Install...")) self.addButton.Bind(wx.EVT_BUTTON, self.onAddClick) # Translators: The label of a button in the Add-ons Manager to open the list of incompatible add-ons. self.incompatAddonsButton = generalActions.addButton(self, label=_("&View incompatible add-ons...")) self.incompatAddonsButton.Bind(wx.EVT_BUTTON, self.onIncompatAddonsShowClick) mainSizer.Add( generalActions.sizer, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.LEFT | wx.RIGHT ) mainSizer.Add( wx.StaticLine(self), border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.TOP | wx.BOTTOM | wx.EXPAND ) # Translators: The label of a button to close the Addons dialog. closeButton = wx.Button(self, label=_("&Close"), id=wx.ID_CLOSE) closeButton.Bind(wx.EVT_BUTTON, lambda evt: self.Close()) mainSizer.Add( closeButton, border=guiHelper.BORDER_FOR_DIALOGS, flag=wx.LEFT | wx.RIGHT | wx.BOTTOM | wx.CENTER | wx.ALIGN_RIGHT ) self.Bind(wx.EVT_CLOSE, self.onClose) self.EscapeId = wx.ID_CLOSE mainSizer.Fit(self) self.SetSizer(mainSizer) self.refreshAddonsList() self.addonsList.SetFocus() self.CentreOnScreen() def onAddClick(self, evt): # Translators: The message displayed in the dialog that allows you to choose an add-on package for installation. fd = wx.FileDialog(self, message=_("Choose Add-on Package File"), # Translators: the label for the NVDA add-on package file type in the Choose add-on dialog. wildcard=(_("NVDA Add-on Package (*.{ext})")+"|*.{ext}").format(ext=addonHandler.BUNDLE_EXTENSION), defaultDir="c:", style=wx.FD_OPEN) if fd.ShowModal() != wx.ID_OK: return addonPath = fd.GetPath() if installAddon(self, addonPath): self.refreshAddonsList(activeIndex=-1) else: self.refreshAddonsList() def onRemoveClick(self,evt): index=self.addonsList.GetFirstSelected() if index<0: return # Translators: Presented when attempting to remove the selected add-on. if gui.messageBox(_("Are you sure you wish to remove the selected add-on from NVDA?"), # Translators: Title for message asking if the user really wishes to remove the selected Addon. _("Remove Add-on"), wx.YES_NO|wx.ICON_WARNING) != wx.YES: return addon=self.curAddons[index] addon.requestRemove() self.refreshAddonsList(activeIndex=index) self.addonsList.SetFocus() def getAddonStatus(self, addon): if addon.isBlocked: # Translators: The status shown for an addon when it's not considered compatible with this version of NVDA. incompatibleStatus =_("Incompatible") # When the addon is incompatible, it can not be enabled/disabled. Its state no longer matters. # So, return early. return incompatibleStatus statusList = [] if addon.isRunning: # Translators: The status shown for an addon when its currently running in NVDA. statusList.append(_("Enabled")) elif addon.isPendingInstall: # Translators: The status shown for a newly installed addon before NVDA is restarted. statusList.append(_("Install")) # in some cases an addon can be expected to be disabled after install, so we want "install" to take precedence here elif globalVars.appArgs.disableAddons or addon.isDisabled: # Translators: The status shown for an addon when its currently suspended do to addons being disabled. statusList.append(_("Disabled")) if addon.isPendingRemove: # Translators: The status shown for an addon that has been marked as removed, before NVDA has been restarted. statusList.append(_("Removed after restart")) elif addon.isPendingDisable or (not addon.isPendingEnable and addon.isPendingInstall and addon.isDisabled): # Translators: The status shown for an addon when it requires a restart to become disabled statusList.append(_("Disabled after restart")) elif addon.isPendingEnable or (addon.isPendingInstall and not addon.isDisabled): # Translators: The status shown for an addon when it requires a restart to become enabled statusList.append(_("Enabled after restart")) return ", ".join(statusList) def refreshAddonsList(self,activeIndex=0): self.addonsList.DeleteAllItems() self.curAddons=[] anyAddonIncompatible = False for addon in addonHandler.getAvailableAddons(): self.addonsList.Append(( addon.manifest['summary'], self.getAddonStatus(addon), addon.manifest['version'], addon.manifest['author'] )) self.curAddons.append(addon) anyAddonIncompatible = ( anyAddonIncompatible # once we find one incompatible addon we don't need to continue or not addonVersionCheck.isAddonCompatible( addon, currentAPIVersion=addonAPIVersion.CURRENT, backwardsCompatToVersion=addonAPIVersion.BACK_COMPAT_TO ) ) self.incompatAddonsButton.Enable(anyAddonIncompatible) # select the given active addon or the first addon if not given curAddonsLen=len(self.curAddons) if curAddonsLen>0: if activeIndex==-1: activeIndex=curAddonsLen-1 elif activeIndex<0 or activeIndex>=curAddonsLen: activeIndex=0 self.addonsList.Select(activeIndex,on=1) self.addonsList.SetItemState(activeIndex,wx.LIST_STATE_FOCUSED,wx.LIST_STATE_FOCUSED) else: self.aboutButton.Disable() self.helpButton.Disable() self.removeButton.Disable() def _shouldDisable(self, addon): return not (addon.isPendingDisable or (addon.isDisabled and not addon.isPendingEnable)) def onListItemSelected(self, evt): index=evt.GetIndex() addon=self.curAddons[index] if index>=0 else None # #3090: Change toggle button label to indicate action to be taken if clicked. if addon is not None: # Translators: The label for a button in Add-ons Manager dialog to enable or disable the selected add-on. self.enableDisableButton.SetLabel(_("&Enable add-on") if not self._shouldDisable(addon) else _("&Disable add-on")) self.aboutButton.Enable(addon is not None and not addon.isPendingRemove) self.helpButton.Enable(bool(addon is not None and not addon.isPendingRemove and addon.getDocFilePath())) self.enableDisableButton.Enable( addon is not None and not addon.isPendingRemove and addonVersionCheck.isAddonCompatible(addon) ) self.removeButton.Enable(addon is not None and not addon.isPendingRemove) def onClose(self,evt): self.DestroyChildren() self.Destroy() needsRestart = False for addon in self.curAddons: if (addon.isPendingInstall or addon.isPendingRemove or addon.isDisabled and addon.isPendingEnable or addon.isRunning and addon.isPendingDisable): needsRestart = True break if needsRestart: promptUserForRestart() def onAbout(self,evt): index=self.addonsList.GetFirstSelected() if index<0: return addon=self.curAddons[index] _showAddonInfo(addon) def onHelp(self, evt): index = self.addonsList.GetFirstSelected() if index < 0: return path = self.curAddons[index].getDocFilePath() os.startfile(path) def onEnableDisable(self, evt): index=self.addonsList.GetFirstSelected() if index<0: return addon=self.curAddons[index] shouldDisable = self._shouldDisable(addon) try: # Counterintuitive, but makes sense when context is taken into account. addon.enable(not shouldDisable) except addonHandler.AddonError: log.error("Couldn't change state for %s add-on"%addon.name, exc_info=True) if shouldDisable: # Translators: The message displayed when the add-on cannot be disabled. message = _("Could not disable the {description} add-on.").format( description=addon.manifest['summary']) else: # Translators: The message displayed when the add-on cannot be enabled. message = _("Could not enable the {description} add-on.").format( description=addon.manifest['summary']) gui.messageBox( message, # Translators: The title of a dialog presented when an error occurs. _("Error"), wx.OK | wx.ICON_ERROR ) return self.enableDisableButton.SetLabel(_("&Enable add-on") if shouldDisable else _("&Disable add-on")) self.refreshAddonsList(activeIndex=index) def onGetAddonsClick(self, evt): ADDONS_URL = "http://addons.nvda-project.org" os.startfile(ADDONS_URL) def onIncompatAddonsShowClick(self, evt): IncompatibleAddonsDialog( parent=self, # the defaults from the addon GUI are fine. We are testing against the running version. ).ShowModal() def installAddon(parentWindow, addonPath): """ Installs the addon at path. Any error messages / warnings are presented to the user via a GUI message box. If attempting to install an addon that is pending removal, it will no longer be pending removal. :return True on success or False on failure. """ try: bundle = addonHandler.AddonBundle(addonPath) except: log.error("Error opening addon bundle from %s" % addonPath, exc_info=True) gui.messageBox( # Translators: The message displayed when an error occurs when opening an add-on package for adding. _("Failed to open add-on package file at %s - missing file or invalid file format") % addonPath, # Translators: The title of a dialog presented when an error occurs. _("Error"), wx.OK | wx.ICON_ERROR ) return False # Exit early, can't install an invalid bundle if not addonVersionCheck.hasAddonGotRequiredSupport(bundle): _showAddonRequiresNVDAUpdateDialog(parentWindow, bundle) return False # Exit early, addon does not have required support elif not addonVersionCheck.isAddonTested(bundle): _showAddonTooOldDialog(parentWindow, bundle) return False # Exit early, addon is not up to date with the latest API version. elif wx.YES != _showConfirmAddonInstallDialog(parentWindow, bundle): return False # Exit early, User changed their mind about installation. prevAddon = None for addon in addonHandler.getAvailableAddons(): if not addon.isPendingRemove and bundle.name==addon.manifest['name']: prevAddon=addon break if prevAddon: summary=bundle.manifest["summary"] curVersion=prevAddon.manifest["version"] newVersion=bundle.manifest["version"] # Translators: A title for the dialog asking if the user wishes to update a previously installed # add-on with this one. messageBoxTitle = _("Add-on Installation") # Translators: A message asking if the user wishes to update an add-on with the same version # currently installed according to the version number. overwriteExistingAddonInstallationMessage = _( "You are about to install version {newVersion} of {summary}, which appears to be already installed. " "Would you still like to update?" ).format(summary=summary, newVersion=newVersion) # Translators: A message asking if the user wishes to update a previously installed add-on with this one. updateAddonInstallationMessage = _( "A version of this add-on is already installed. " "Would you like to update {summary} version {curVersion} to version {newVersion}?" ).format(summary=summary, curVersion=curVersion, newVersion=newVersion) if gui.messageBox( overwriteExistingAddonInstallationMessage if curVersion == newVersion else updateAddonInstallationMessage, messageBoxTitle, wx.YES|wx.NO|wx.ICON_WARNING ) != wx.YES: return False prevAddon.requestRemove() from contextlib import contextmanager @contextmanager def doneAndDestroy(window): try: yield window except: # pass on any exceptions raise finally: # but ensure that done and Destroy are called. window.done() window.Destroy() # use a progress dialog so users know that something is happening. progressDialog = gui.IndeterminateProgressDialog( parentWindow, # Translators: The title of the dialog presented while an Addon is being installed. _("Installing Add-on"), # Translators: The message displayed while an addon is being installed. _("Please wait while the add-on is being installed.") ) try: # Use context manager to ensure that `done` and `Destroy` are called on the progress dialog afterwards with doneAndDestroy(progressDialog): gui.ExecAndPump(addonHandler.installAddonBundle, bundle) return True except: log.error("Error installing addon bundle from %s" % addonPath, exc_info=True) gui.messageBox( # Translators: The message displayed when an error occurs when installing an add-on package. _("Failed to install add-on from %s") % addonPath, # Translators: The title of a dialog presented when an error occurs. _("Error"), wx.OK | wx.ICON_ERROR ) return False def handleRemoteAddonInstall(addonPath): # Add-ons cannot be installed into a Windows store version of NVDA if config.isAppX: gui.messageBox( # Translators: The message displayed when an add-on cannot be installed due to NVDA running as a Windows Store app _("Add-ons cannot be installed in the Windows Store version of NVDA"), # Translators: The title of a dialog presented when an error occurs. _("Error"), wx.OK | wx.ICON_ERROR) return gui.mainFrame.prePopup() if installAddon(gui.mainFrame, addonPath): promptUserForRestart() gui.mainFrame.postPopup() def _showAddonRequiresNVDAUpdateDialog(parent, bundle): # Translators: The message displayed when installing an add-on package is prohibited, because it requires # a later version of NVDA than is currently installed. incompatibleMessage = _( "Installation of {summary} {version} has been blocked. The minimum NVDA version required for " "this add-on is {minimumNVDAVersion}, your current NVDA version is {NVDAVersion}" ).format( summary=bundle.manifest['summary'], version=bundle.manifest['version'], minimumNVDAVersion=addonAPIVersion.formatForGUI(bundle.minimumNVDAVersion), NVDAVersion=addonAPIVersion.formatForGUI(addonAPIVersion.CURRENT) ) ErrorAddonInstallDialog( parent=parent, # Translators: The title of a dialog presented when an error occurs. title=_("Add-on not compatible"), message=incompatibleMessage, showAddonInfoFunction=lambda: _showAddonInfo(bundle) ).ShowModal() def _showAddonTooOldDialog(parent, bundle): # Translators: A message informing the user that this addon can not be installed because it is not compatible. confirmInstallMessage = _( "Installation of {summary} {version} has been blocked." " An updated version of this add-on is required," " the minimum add-on API supported by this version of NVDA is {backCompatToAPIVersion}" ).format( backCompatToAPIVersion=addonAPIVersion.formatForGUI(addonAPIVersion.BACK_COMPAT_TO), **bundle.manifest ) return ErrorAddonInstallDialog( parent=parent, # Translators: The title of a dialog presented when an error occurs. title=_("Add-on not compatible"), message=confirmInstallMessage, showAddonInfoFunction=lambda: _showAddonInfo(bundle) ).ShowModal() def _showConfirmAddonInstallDialog(parent, bundle): # Translators: A message asking the user if they really wish to install an addon. confirmInstallMessage = _( "Are you sure you want to install this add-on?\n" "Only install add-ons from trusted sources.\n" "Addon: {summary} {version}" ).format(**bundle.manifest) return ConfirmAddonInstallDialog( parent=parent, # Translators: Title for message asking if the user really wishes to install an Addon. title=_("Add-on Installation"), message=confirmInstallMessage, showAddonInfoFunction=lambda: _showAddonInfo(bundle) ).ShowModal() class IncompatibleAddonsDialog(wx.Dialog, DpiScalingHelperMixin): """A dialog that lists incompatible addons, and why they are not compatible""" @classmethod def _instance(cls): """ type: () -> IncompatibleAddonsDialog return None until this is replaced with a weakref.ref object. Then the instance is retrieved with by treating that object as a callable. """ return None def __new__(cls, *args, **kwargs): instance = IncompatibleAddonsDialog._instance() if instance is None: return super(IncompatibleAddonsDialog, cls).__new__(cls, *args, **kwargs) return instance def __init__( self, parent, APIVersion = addonAPIVersion.CURRENT, APIBackwardsCompatToVersion = addonAPIVersion.BACK_COMPAT_TO ): if IncompatibleAddonsDialog._instance() is not None: raise RuntimeError("Attempting to open multiple IncompatibleAddonsDialog instances") IncompatibleAddonsDialog._instance = weakref.ref(self) self._APIVersion = APIVersion self._APIBackwardsCompatToVersion = APIBackwardsCompatToVersion self.unknownCompatibilityAddonsList = list(addonHandler.getIncompatibleAddons( currentAPIVersion=APIVersion, backCompatToAPIVersion=APIBackwardsCompatToVersion )) if not len(self.unknownCompatibilityAddonsList) > 0: # this dialog is not designed to show an empty list. raise RuntimeError("No incompatible addons.") # Translators: The title of the Incompatible Addons Dialog wx.Dialog.__init__(self, parent, title=_("Incompatible Add-ons")) DpiScalingHelperMixin.__init__(self, self.GetHandle()) mainSizer=wx.BoxSizer(wx.VERTICAL) settingsSizer=wx.BoxSizer(wx.VERTICAL) sHelper = guiHelper.BoxSizerHelper(self, sizer=settingsSizer) maxControlWidth = 550 # Translators: The title of the Incompatible Addons Dialog introText = _( "The following add-ons are incompatible with NVDA version {}." " These add-ons can not be enabled." " Please contact the add-on author for further assistance." ).format(addonAPIVersion.formatForGUI(self._APIVersion)) AddonSelectionIntroLabel=wx.StaticText(self, label=introText) AddonSelectionIntroLabel.Wrap(self.scaleSize(maxControlWidth)) sHelper.addItem(AddonSelectionIntroLabel) # Translators: the label for the addons list in the incompatible addons dialog. entriesLabel=_("Incompatible add-ons") self.addonsList = sHelper.addLabeledControl( entriesLabel, nvdaControls.AutoWidthColumnListCtrl, style=wx.LC_REPORT|wx.LC_SINGLE_SEL, size=self.scaleSize((maxControlWidth, 350)) ) # Translators: The label for a column in add-ons list used to identify add-on package name (example: package is OCR). self.addonsList.InsertColumn(1, _("Package"), width=self.scaleSize(150)) # Translators: The label for a column in add-ons list used to identify add-on's running status (example: status is running). self.addonsList.InsertColumn(2, _("Version"), width=self.scaleSize(150)) # Translators: The label for a column in add-ons list used to provide some explanation about incompatibility self.addonsList.InsertColumn(3, _("Incompatible reason"), width=self.scaleSize(180)) buttonSizer = guiHelper.ButtonHelper(wx.HORIZONTAL) # Translators: The label for a button in Add-ons Manager dialog to show information about the selected add-on. self.aboutButton = buttonSizer.addButton(self, label=_("&About add-on...")) self.aboutButton.Disable() self.aboutButton.Bind(wx.EVT_BUTTON, self.onAbout) # Translators: The close button on an NVDA dialog. This button will dismiss the dialog. button = buttonSizer.addButton(self, label=_("&Close"), id=wx.ID_CLOSE) self.Bind(wx.EVT_CLOSE, self.onClose) sHelper.addDialogDismissButtons(buttonSizer) mainSizer.Add(settingsSizer, border=20, flag=wx.ALL) mainSizer.Fit(self) self.SetSizer(mainSizer) self.SetAffirmativeId(wx.ID_CLOSE) self.SetEscapeId(wx.ID_CLOSE) button.Bind(wx.EVT_BUTTON, self.onClose) self.refreshAddonsList() self.addonsList.SetFocus() self.CentreOnScreen() def _getIncompatReason(self, addon): if not addonVersionCheck.hasAddonGotRequiredSupport( addon, currentAPIVersion=self._APIVersion ): # Translators: The reason an add-on is not compatible. A more recent version of NVDA is # required for the add-on to work. The placeholder will be replaced with Year.Major.Minor (EG 2019.1). return _("An updated version of NVDA is required. NVDA version {} or later." ).format(addonAPIVersion.formatForGUI(addon.minimumNVDAVersion)) elif not addonVersionCheck.isAddonTested( addon, backwardsCompatToVersion=self._APIBackwardsCompatToVersion ): # Translators: The reason an add-on is not compatible. The addon relies on older, removed features of NVDA, # an updated add-on is required. The placeholder will be replaced with Year.Major.Minor (EG 2019.1). return _("An updated version of this add-on is required. The minimum supported API version is now {}" ).format(addonAPIVersion.formatForGUI(self._APIBackwardsCompatToVersion)) def refreshAddonsList(self): self.addonsList.DeleteAllItems() self.curAddons=[] for idx, addon in enumerate(self.unknownCompatibilityAddonsList): self.addonsList.Append(( addon.manifest['summary'], addon.version, self._getIncompatReason(addon) )) self.curAddons.append(addon) # onAbout depends on being able to recall the current addon based on selected index activeIndex=0 self.addonsList.Select(activeIndex, on=1) self.addonsList.SetItemState(activeIndex, wx.LIST_STATE_FOCUSED, wx.LIST_STATE_FOCUSED) self.aboutButton.Enable(True) def onAbout(self,evt): index=self.addonsList.GetFirstSelected() if index<0: return addon=self.curAddons[index] _showAddonInfo(addon) def onClose(self, evt): evt.Skip() self.EndModal(wx.OK) self.DestroyLater() # ensure that the _instance weakref is destroyed.
1
24,758
Did you look for other locations that compare name? I expected there to be more.
nvaccess-nvda
py
@@ -47,6 +47,9 @@ public final class BaselineTesting implements Plugin<Project> { .findAny() .ifPresent(ignored -> enableJUnit5ForAllTestTasks(project)); }); + + // Never cache test tasks, until we work out the correct inputs for ETE / integration tests + project.getTasks().withType(Test.class).configureEach(test -> test.getOutputs().cacheIf(task -> false)); }); }
1
/* * (c) Copyright 2019 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.plugins; import java.util.Objects; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.tasks.testing.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; public final class BaselineTesting implements Plugin<Project> { private static final Logger log = LoggerFactory.getLogger(BaselineTesting.class); @Override public void apply(Project project) { project.getTasks().withType(Test.class).all(task -> { task.jvmArgs("-XX:+HeapDumpOnOutOfMemoryError", "-XX:+CrashOnOutOfMemoryError"); }); project.getPlugins().withType(JavaPlugin.class, p -> { // afterEvaluate necessary because the junit-jupiter dep might be added further down the build.gradle project.afterEvaluate(unused -> { project.getConfigurations() .getByName(JavaPlugin.TEST_RUNTIME_CLASSPATH_CONFIGURATION_NAME) .getAllDependencies() .matching(dep -> Objects.equals(dep.getGroup(), "org.junit.jupiter") && dep.getName().equals("junit-jupiter")) .stream() .findAny() .ifPresent(ignored -> enableJUnit5ForAllTestTasks(project)); }); }); } private void enableJUnit5ForAllTestTasks(Project project) { log.info("Detected 'org:junit.jupiter:junit-jupiter', enabling useJUnitPlatform()"); project.getTasks().withType(Test.class).configureEach(task -> { task.useJUnitPlatform(); task.systemProperty("junit.platform.output.capture.stdout", "true"); task.systemProperty("junit.platform.output.capture.stderr", "true"); // https://junit.org/junit5/docs/snapshot/user-guide/#writing-tests-parallel-execution task.systemProperty("junit.jupiter.execution.parallel.enabled", "true"); // Computes the desired parallelism based on the number of available processors/cores task.systemProperty("junit.jupiter.execution.parallel.config.strategy", "dynamic"); // provide some stdout feedback when tests fail task.testLogging(testLogging -> testLogging.events("failed")); }); } }
1
7,107
There were some requests internally to allow users to opt out of this behaviour. We could expose a property that allows people to toggle ti
palantir-gradle-baseline
java
@@ -300,10 +300,11 @@ public class TestIndexWriterDelete extends LuceneTestCase { modifier.close(); dir.close(); } - + public void testDeleteAllNoDeadLock() throws IOException, InterruptedException { Directory dir = newDirectory(); - final RandomIndexWriter modifier = new RandomIndexWriter(random(), dir); + final RandomIndexWriter modifier = new RandomIndexWriter(random(), dir, + newIndexWriterConfig().setMergePolicy(new MockRandomMergePolicy(random()))); int numThreads = atLeast(2); Thread[] threads = new Thread[numThreads]; final CountDownLatch latch = new CountDownLatch(1);
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.index; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Random; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.StringField; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; import org.junit.Ignore; @SuppressCodecs("SimpleText") // too slow here public class TestIndexWriterDelete extends LuceneTestCase { // test the simple case public void testSimpleCase() throws IOException { String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); FieldType custom1 = new FieldType(); custom1.setStored(true); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(newStringField("id", keywords[i], Field.Store.YES)); doc.add(newField("country", unindexed[i], custom1)); doc.add(newTextField("contents", unstored[i], Field.Store.NO)); doc.add(newTextField("city", text[i], Field.Store.YES)); modifier.addDocument(doc); } modifier.forceMerge(1); modifier.commit(); Term term = new Term("city", "Amsterdam"); long hitCount = getHitCount(dir, term); assertEquals(1, hitCount); if (VERBOSE) { System.out.println("\nTEST: now delete by term=" + term); } modifier.deleteDocuments(term); modifier.commit(); if (VERBOSE) { System.out.println("\nTEST: now getHitCount"); } hitCount = getHitCount(dir, term); assertEquals(0, hitCount); modifier.close(); dir.close(); } // test when delete terms only apply to disk segments public void testNonRAMDelete() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); assertEquals(0, modifier.getNumBufferedDocuments()); assertTrue(0 < modifier.getSegmentCount()); modifier.commit(); IndexReader reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); modifier.deleteDocuments(new Term("value", String.valueOf(value))); modifier.commit(); reader = DirectoryReader.open(dir); assertEquals(0, reader.numDocs()); reader.close(); modifier.close(); dir.close(); } // test when delete terms only apply to ram segments public void testRAMDeletes() throws IOException { for(int t=0;t<2;t++) { if (VERBOSE) { System.out.println("TEST: t=" + t); } Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(4)); int id = 0; int value = 100; addDoc(modifier, ++id, value); if (0 == t) modifier.deleteDocuments(new Term("value", String.valueOf(value))); else modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value)))); addDoc(modifier, ++id, value); if (0 == t) { modifier.deleteDocuments(new Term("value", String.valueOf(value))); assertEquals(2, modifier.getNumBufferedDeleteTerms()); assertEquals(1, modifier.getBufferedDeleteTermsSize()); } else modifier.deleteDocuments(new TermQuery(new Term("value", String.valueOf(value)))); addDoc(modifier, ++id, value); assertEquals(0, modifier.getSegmentCount()); modifier.commit(); IndexReader reader = DirectoryReader.open(dir); assertEquals(1, reader.numDocs()); long hitCount = getHitCount(dir, new Term("id", String.valueOf(id))); assertEquals(1, hitCount); reader.close(); modifier.close(); dir.close(); } } // test when delete terms apply to both disk and ram segments public void testBothDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(100)); int id = 0; int value = 100; for (int i = 0; i < 5; i++) { addDoc(modifier, ++id, value); } value = 200; for (int i = 0; i < 5; i++) { addDoc(modifier, ++id, value); } modifier.commit(); for (int i = 0; i < 5; i++) { addDoc(modifier, ++id, value); } modifier.deleteDocuments(new Term("value", String.valueOf(value))); modifier.commit(); IndexReader reader = DirectoryReader.open(dir); assertEquals(5, reader.numDocs()); modifier.close(); reader.close(); dir.close(); } // test that batched delete terms are flushed together public void testBatchDeletes() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); IndexReader reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); id = 0; modifier.deleteDocuments(new Term("id", String.valueOf(++id))); modifier.deleteDocuments(new Term("id", String.valueOf(++id))); modifier.commit(); reader = DirectoryReader.open(dir); assertEquals(5, reader.numDocs()); reader.close(); Term[] terms = new Term[3]; for (int i = 0; i < terms.length; i++) { terms[i] = new Term("id", String.valueOf(++id)); } modifier.deleteDocuments(terms); modifier.commit(); reader = DirectoryReader.open(dir); assertEquals(2, reader.numDocs()); reader.close(); modifier.close(); dir.close(); } // test deleteAll() public void testDeleteAllSimple() throws IOException { if (VERBOSE) { System.out.println("TEST: now start"); } Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } if (VERBOSE) { System.out.println("TEST: now commit"); } modifier.commit(); IndexReader reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); // Add 1 doc (so we will have something buffered) addDoc(modifier, 99, value); // Delete all if (VERBOSE) { System.out.println("TEST: now delete all"); } modifier.deleteAll(); // Delete all shouldn't be on disk yet reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); // Add a doc and update a doc (after the deleteAll, before the commit) addDoc(modifier, 101, value); updateDoc(modifier, 102, value); if (VERBOSE) { System.out.println("TEST: now 2nd commit"); } // commit the delete all modifier.commit(); // Validate there are no docs left reader = DirectoryReader.open(dir); assertEquals(2, reader.numDocs()); reader.close(); modifier.close(); dir.close(); } public void testDeleteAllNoDeadLock() throws IOException, InterruptedException { Directory dir = newDirectory(); final RandomIndexWriter modifier = new RandomIndexWriter(random(), dir); int numThreads = atLeast(2); Thread[] threads = new Thread[numThreads]; final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch doneLatch = new CountDownLatch(numThreads); for (int i = 0; i < numThreads; i++) { final int offset = i; threads[i] = new Thread() { @Override public void run() { int id = offset * 1000; int value = 100; try { latch.await(); for (int j = 0; j < 1000; j++) { Document doc = new Document(); doc.add(newTextField("content", "aaa", Field.Store.NO)); doc.add(newStringField("id", String.valueOf(id++), Field.Store.YES)); doc.add(newStringField("value", String.valueOf(value), Field.Store.NO)); doc.add(new NumericDocValuesField("dv", value)); modifier.addDocument(doc); if (VERBOSE) { System.out.println("\tThread["+offset+"]: add doc: " + id); } } } catch (Exception e) { throw new RuntimeException(e); } finally { doneLatch.countDown(); if (VERBOSE) { System.out.println("\tThread["+offset+"]: done indexing" ); } } } }; threads[i].start(); } latch.countDown(); while(!doneLatch.await(1, TimeUnit.MILLISECONDS)) { if (VERBOSE) { System.out.println("\nTEST: now deleteAll"); } modifier.deleteAll(); if (VERBOSE) { System.out.println("del all"); } } if (VERBOSE) { System.out.println("\nTEST: now final deleteAll"); } modifier.deleteAll(); for (Thread thread : threads) { thread.join(); } if (VERBOSE) { System.out.println("\nTEST: now close"); } modifier.close(); DirectoryReader reader = DirectoryReader.open(dir); if (VERBOSE) { System.out.println("\nTEST: got reader=" + reader); } assertEquals(0, reader.maxDoc()); assertEquals(0, reader.numDocs()); assertEquals(0, reader.numDeletedDocs(), 0); reader.close(); dir.close(); } // test rollback of deleteAll() public void testDeleteAllRollback() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); addDoc(modifier, ++id, value); IndexReader reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); // Delete all modifier.deleteAll(); // Roll it back modifier.rollback(); // Validate that the docs are still there reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); dir.close(); } // test deleteAll() w/ near real-time reader public void testDeleteAllNRT() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(2)); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { addDoc(modifier, ++id, value); } modifier.commit(); IndexReader reader = modifier.getReader(); assertEquals(7, reader.numDocs()); reader.close(); addDoc(modifier, ++id, value); addDoc(modifier, ++id, value); // Delete all modifier.deleteAll(); reader = modifier.getReader(); assertEquals(0, reader.numDocs()); reader.close(); // Roll it back modifier.rollback(); // Validate that the docs are still there reader = DirectoryReader.open(dir); assertEquals(7, reader.numDocs()); reader.close(); dir.close(); } private void updateDoc(IndexWriter modifier, int id, int value) throws IOException { Document doc = new Document(); doc.add(newTextField("content", "aaa", Field.Store.NO)); doc.add(newStringField("id", String.valueOf(id), Field.Store.YES)); doc.add(newStringField("value", String.valueOf(value), Field.Store.NO)); doc.add(new NumericDocValuesField("dv", value)); modifier.updateDocument(new Term("id", String.valueOf(id)), doc); } private void addDoc(IndexWriter modifier, int id, int value) throws IOException { Document doc = new Document(); doc.add(newTextField("content", "aaa", Field.Store.NO)); doc.add(newStringField("id", String.valueOf(id), Field.Store.YES)); doc.add(newStringField("value", String.valueOf(value), Field.Store.NO)); doc.add(new NumericDocValuesField("dv", value)); modifier.addDocument(doc); } private long getHitCount(Directory dir, Term term) throws IOException { IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = newSearcher(reader); long hitCount = searcher.search(new TermQuery(term), 1000).totalHits.value; reader.close(); return hitCount; } // TODO: can we fix MockDirectoryWrapper disk full checking to be more efficient (not recompute on every write)? @Nightly public void testDeletesOnDiskFull() throws IOException { doTestOperationsOnDiskFull(false); } // TODO: can we fix MockDirectoryWrapper disk full checking to be more efficient (not recompute on every write)? @Nightly public void testUpdatesOnDiskFull() throws IOException { doTestOperationsOnDiskFull(true); } /** * Make sure if modifier tries to commit but hits disk full that modifier * remains consistent and usable. Similar to TestIndexReader.testDiskFull(). */ private void doTestOperationsOnDiskFull(boolean updates) throws IOException { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = newMockDirectory(); IndexWriter writer = new IndexWriter(startDir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 157; i++) { Document d = new Document(); d.add(newStringField("id", Integer.toString(i), Field.Store.YES)); d.add(newTextField("content", "aaa " + i, Field.Store.NO)); d.add(new NumericDocValuesField("dv", i)); writer.addDocument(d); } writer.close(); long diskUsage = startDir.sizeInBytes(); long diskFree = diskUsage + 10; IOException err = null; boolean done = false; // Iterate w/ ever increasing free disk space: while (!done) { if (VERBOSE) { System.out.println("TEST: cycle"); } MockDirectoryWrapper dir = new MockDirectoryWrapper(random(), TestUtil.ramCopyOf(startDir)); dir.setAllowRandomFileNotFoundException(false); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setMaxBufferedDocs(1000) .setMergeScheduler(new ConcurrentMergeScheduler())); ((ConcurrentMergeScheduler) modifier.getConfig().getMergeScheduler()).setSuppressExceptions(); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: boolean success = false; for (int x = 0; x < 2; x++) { if (VERBOSE) { System.out.println("TEST: x=" + x); } double rate = 0.1; double diskRatio = ((double)diskFree) / diskUsage; long thisDiskFree; String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { System.out.println("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; dir.setRandomIOExceptionRateOnOpen(random().nextDouble()*0.01); } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { System.out.println("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; dir.setRandomIOExceptionRateOnOpen(0.0); } dir.setMaxSizeInBytes(thisDiskFree); dir.setRandomIOExceptionRate(rate); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.add(newStringField("id", Integer.toString(i), Field.Store.YES)); d.add(newTextField("content", "bbb " + i, Field.Store.NO)); d.add(new NumericDocValuesField("dv", i)); modifier.updateDocument(new Term("id", Integer.toString(docId)), d); } else { // deletes modifier.deleteDocuments(new Term("id", Integer.toString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } try { modifier.close(); } catch (IllegalStateException ise) { // ok throw (IOException) ise.getCause(); } } success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { System.out.println(" hit IOException: " + e); e.printStackTrace(System.out); } err = e; if (1 == x) { e.printStackTrace(); fail(testName + " hit IOException after disk space was freed up"); } } // prevent throwing a random exception here!! final double randomIOExceptionRate = dir.getRandomIOExceptionRate(); final long maxSizeInBytes = dir.getMaxSizeInBytes(); dir.setRandomIOExceptionRate(0.0); dir.setRandomIOExceptionRateOnOpen(0.0); dir.setMaxSizeInBytes(0); if (!success) { // Must force the close else the writer can have // open files which cause exc in MockRAMDir.close if (VERBOSE) { System.out.println("TEST: now rollback"); } modifier.rollback(); } // If the close() succeeded, make sure index is OK: if (success) { TestUtil.checkIndex(dir); } dir.setRandomIOExceptionRate(randomIOExceptionRate); dir.setMaxSizeInBytes(maxSizeInBytes); // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = DirectoryReader.open(dir); } catch (IOException e) { e.printStackTrace(); fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = newSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.search(new TermQuery(searchTerm), 1000).scoreDocs; } catch (IOException e) { e.printStackTrace(); fail(testName + ": exception when searching: " + e); } int result2 = hits.length; if (success) { if (x == 0 && result2 != END_COUNT) { fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } newReader.close(); if (result2 == END_COUNT) { break; } } dir.close(); // Try again with more bytes of free space: diskFree += Math.max(10, diskFree >>> 3); } startDir.close(); } @Ignore // This test tests that buffered deletes are cleared when // an Exception is hit during flush. public void testErrorAfterApplyDeletes() throws IOException { MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() { boolean sawMaybe = false; boolean failed = false; Thread thread; @Override public MockDirectoryWrapper.Failure reset() { thread = Thread.currentThread(); sawMaybe = false; failed = false; return this; } @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (Thread.currentThread() != thread) { // don't fail during merging return; } if (VERBOSE) { System.out.println("FAIL EVAL:"); } new Throwable().printStackTrace(System.out); if (sawMaybe && !failed) { boolean seen = callStackContainsAnyOf("applyDeletesAndUpdates", "slowFileExists"); if (!seen) { // Only fail once we are no longer in applyDeletes failed = true; if (VERBOSE) { System.out.println("TEST: mock failure: now fail"); new Throwable().printStackTrace(System.out); } throw new RuntimeException("fail after applyDeletes"); } } if (!failed) { if (callStackContainsAnyOf("applyDeletesAndUpdates")) { if (VERBOSE) { System.out.println("TEST: mock failure: saw applyDeletes"); new Throwable().printStackTrace(System.out); } sawMaybe = true; } } } }; // create a couple of files String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; MockDirectoryWrapper dir = newMockDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false)) .setReaderPooling(false) .setMergePolicy(newLogMergePolicy())); MergePolicy lmp = modifier.getConfig().getMergePolicy(); lmp.setNoCFSRatio(1.0); dir.failOn(failure.reset()); FieldType custom1 = new FieldType(); custom1.setStored(true); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(newStringField("id", keywords[i], Field.Store.YES)); doc.add(newField("country", unindexed[i], custom1)); doc.add(newTextField("contents", unstored[i], Field.Store.NO)); doc.add(newTextField("city", text[i], Field.Store.YES)); modifier.addDocument(doc); } // flush if (VERBOSE) { System.out.println("TEST: now full merge"); } modifier.forceMerge(1); if (VERBOSE) { System.out.println("TEST: now commit"); } modifier.commit(); // one of the two files hits Term term = new Term("city", "Amsterdam"); long hitCount = getHitCount(dir, term); assertEquals(1, hitCount); // open the writer again (closed above) // delete the doc // max buf del terms is two, so this is buffered if (VERBOSE) { System.out.println("TEST: delete term=" + term); } modifier.deleteDocuments(term); // add a doc // doc remains buffered if (VERBOSE) { System.out.println("TEST: add empty doc"); } Document doc = new Document(); modifier.addDocument(doc); // commit the changes, the buffered deletes, and the new doc // The failure object will fail on the first write after the del // file gets created when processing the buffered delete // in the ac case, this will be when writing the new segments // files so we really don't need the new doc, but it's harmless // a new segments file won't be created but in this // case, creation of the cfs file happens next so we // need the doc (to test that it's okay that we don't // lose deletes if failing while creating the cfs file) if (VERBOSE) { System.out.println("TEST: now commit for failure"); } RuntimeException expected = expectThrows(RuntimeException.class, () -> { modifier.commit(); }); if (VERBOSE) { System.out.println("TEST: hit exc:"); expected.printStackTrace(System.out); } // The commit above failed, so we need to retry it (which will // succeed, because the failure is a one-shot) boolean writerClosed; try { modifier.commit(); writerClosed = false; } catch (IllegalStateException ise) { // The above exc struck during merge, and closed the writer writerClosed = true; } if (writerClosed == false) { hitCount = getHitCount(dir, term); // Make sure the delete was successfully flushed: assertEquals(0, hitCount); modifier.close(); } dir.close(); } // This test tests that the files created by the docs writer before // a segment is written are cleaned up if there's an i/o error public void testErrorInDocsWriterAdd() throws IOException { MockDirectoryWrapper.Failure failure = new MockDirectoryWrapper.Failure() { boolean failed = false; @Override public MockDirectoryWrapper.Failure reset() { failed = false; return this; } @Override public void eval(MockDirectoryWrapper dir) throws IOException { if (!failed) { failed = true; throw new IOException("fail in add doc"); } } }; // create a couple of files String[] keywords = { "1", "2" }; String[] unindexed = { "Netherlands", "Italy" }; String[] unstored = { "Amsterdam has lots of bridges", "Venice has lots of canals" }; String[] text = { "Amsterdam", "Venice" }; MockDirectoryWrapper dir = newMockDirectory(); IndexWriter modifier = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); modifier.commit(); dir.failOn(failure.reset()); FieldType custom1 = new FieldType(); custom1.setStored(true); for (int i = 0; i < keywords.length; i++) { Document doc = new Document(); doc.add(newStringField("id", keywords[i], Field.Store.YES)); doc.add(newField("country", unindexed[i], custom1)); doc.add(newTextField("contents", unstored[i], Field.Store.NO)); doc.add(newTextField("city", text[i], Field.Store.YES)); try { modifier.addDocument(doc); } catch (IOException io) { if (VERBOSE) { System.out.println("TEST: got expected exc:"); io.printStackTrace(System.out); } break; } } assertTrue(modifier.isDeleterClosed()); TestIndexWriter.assertNoUnreferencedFiles(dir, "docsWriter.abort() failed to delete unreferenced files"); dir.close(); } public void testDeleteNullQuery() throws IOException { Directory dir = newDirectory(); IndexWriter modifier = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 5; i++) { addDoc(modifier, i, 2*i); } modifier.deleteDocuments(new TermQuery(new Term("nada", "nada"))); modifier.commit(); assertEquals(5, modifier.getDocStats().numDocs); modifier.close(); dir.close(); } public void testDeleteAllSlowly() throws Exception { final Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); final int NUM_DOCS = atLeast(1000); final List<Integer> ids = new ArrayList<>(NUM_DOCS); for(int id=0;id<NUM_DOCS;id++) { ids.add(id); } Collections.shuffle(ids, random()); for(int id : ids) { Document doc = new Document(); doc.add(newStringField("id", ""+id, Field.Store.NO)); w.addDocument(doc); } Collections.shuffle(ids, random()); int upto = 0; while(upto < ids.size()) { final int left = ids.size() - upto; final int inc = Math.min(left, TestUtil.nextInt(random(), 1, 20)); final int limit = upto + inc; while(upto < limit) { if (VERBOSE) { System.out.println("TEST: delete id=" + ids.get(upto)); } w.deleteDocuments(new Term("id", ""+ids.get(upto++))); } if (VERBOSE) { System.out.println("\nTEST: now open reader"); } final IndexReader r = w.getReader(); assertEquals(NUM_DOCS - upto, r.numDocs()); r.close(); } w.close(); dir.close(); } // TODO: this test can hit pathological cases (IW settings?) where it runs for far too long @Nightly public void testIndexingThenDeleting() throws Exception { // TODO: move this test to its own class and just @SuppressCodecs? // TODO: is it enough to just use newFSDirectory? final String fieldFormat = TestUtil.getPostingsFormat("field"); assumeFalse("This test cannot run with SimpleText codec", fieldFormat.equals("SimpleText")); assumeFalse("This test cannot run with Direct codec", fieldFormat.equals("Direct")); final Random r = random(); Directory dir = newDirectory(); // note this test explicitly disables payloads final Analyzer analyzer = new Analyzer() { @Override public TokenStreamComponents createComponents(String fieldName) { return new TokenStreamComponents(new MockTokenizer(MockTokenizer.WHITESPACE, true)); } }; IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(analyzer) .setRAMBufferSizeMB(4.0) .setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH)); Document doc = new Document(); doc.add(newTextField("field", "go 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20", Field.Store.NO)); int num = atLeast(1); for (int iter = 0; iter < num; iter++) { int count = 0; final boolean doIndexing = r.nextBoolean(); if (VERBOSE) { System.out.println("TEST: iter doIndexing=" + doIndexing); } if (doIndexing) { // Add docs until a flush is triggered final int startFlushCount = w.getFlushCount(); while(w.getFlushCount() == startFlushCount) { w.addDocument(doc); count++; } } else { // Delete docs until a flush is triggered final int startFlushCount = w.getFlushCount(); while(w.getFlushCount() == startFlushCount) { w.deleteDocuments(new Term("foo", ""+count)); count++; } } assertTrue("flush happened too quickly during " + (doIndexing ? "indexing" : "deleting") + " count=" + count, count > 2500); } w.close(); dir.close(); } // LUCENE-3340: make sure deletes that we don't apply // during flush (ie are just pushed into the stream) are // in fact later flushed due to their RAM usage: public void testFlushPushedDeletesByRAM() throws Exception { Directory dir = newDirectory(); // Cannot use RandomIndexWriter because we don't want to // ever call commit() for this test: // note: tiny rambuffer used, as with a 1MB buffer the test is too slow (flush @ 128,999) IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())) .setRAMBufferSizeMB(0.1f) .setMaxBufferedDocs(1000) .setMergePolicy(NoMergePolicy.INSTANCE) .setReaderPooling(false)); int count = 0; while(true) { Document doc = new Document(); doc.add(new StringField("id", count+"", Field.Store.NO)); final Term delTerm; if (count == 1010) { // This is the only delete that applies delTerm = new Term("id", ""+0); } else { // These get buffered, taking up RAM, but delete // nothing when applied: delTerm = new Term("id", "x" + count); } w.updateDocument(delTerm, doc); // Eventually segment 0 should get a del docs: // TODO: fix this test if (slowFileExists(dir, "_0_1.del") || slowFileExists(dir, "_0_1.liv") ) { if (VERBOSE) { System.out.println("TEST: deletes created @ count=" + count); } break; } count++; // Today we applyDeletes @ count=21553; even if we make // sizable improvements to RAM efficiency of buffered // del term we're unlikely to go over 100K: if (count > 100000) { fail("delete's were not applied"); } } w.close(); dir.close(); } // Make sure buffered (pushed) deletes don't use up so // much RAM that it forces long tail of tiny segments: @Nightly public void testApplyDeletesOnFlush() throws Exception { Directory dir = newDirectory(); // Cannot use RandomIndexWriter because we don't want to // ever call commit() for this test: final AtomicInteger docsInSegment = new AtomicInteger(); final AtomicBoolean closing = new AtomicBoolean(); final AtomicBoolean sawAfterFlush = new AtomicBoolean(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())) .setRAMBufferSizeMB(0.5) .setMaxBufferedDocs(-1) .setMergePolicy(NoMergePolicy.INSTANCE) .setReaderPooling(false) // always use CFS so we don't use tons of file handles in the test .setUseCompoundFile(true)) { @Override public void doAfterFlush() { assertTrue("only " + docsInSegment.get() + " in segment", closing.get() || docsInSegment.get() >= 7); docsInSegment.set(0); sawAfterFlush.set(true); } }; int id = 0; while(true) { StringBuilder sb = new StringBuilder(); for(int termIDX=0;termIDX<100;termIDX++) { sb.append(' ').append(TestUtil.randomRealisticUnicodeString(random())); } if (id == 500) { w.deleteDocuments(new Term("id", "0")); } Document doc = new Document(); doc.add(newStringField("id", ""+id, Field.Store.NO)); doc.add(newTextField("body", sb.toString(), Field.Store.NO)); w.updateDocument(new Term("id", ""+id), doc); docsInSegment.incrementAndGet(); // TODO: fix this test if (slowFileExists(dir, "_0_1.del") || slowFileExists(dir, "_0_1.liv")) { if (VERBOSE) { System.out.println("TEST: deletes created @ id=" + id); } break; } id++; } closing.set(true); assertTrue(sawAfterFlush.get()); w.close(); dir.close(); } // LUCENE-4455 public void testDeletesCheckIndexOutput() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setMaxBufferedDocs(2); IndexWriter w = new IndexWriter(dir, iwc); Document doc = new Document(); doc.add(newField("field", "0", StringField.TYPE_NOT_STORED)); w.addDocument(doc); doc = new Document(); doc.add(newField("field", "1", StringField.TYPE_NOT_STORED)); w.addDocument(doc); w.commit(); assertEquals(1, w.getSegmentCount()); w.deleteDocuments(new Term("field", "0")); w.commit(); assertEquals(1, w.getSegmentCount()); w.close(); ByteArrayOutputStream bos = new ByteArrayOutputStream(1024); CheckIndex checker = new CheckIndex(dir); checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false); CheckIndex.Status indexStatus = checker.checkIndex(null); assertTrue(indexStatus.clean); checker.close(); String s = bos.toString(IOUtils.UTF_8); // Segment should have deletions: assertTrue(s.contains("has deletions")); iwc = new IndexWriterConfig(new MockAnalyzer(random())); w = new IndexWriter(dir, iwc); w.forceMerge(1); w.close(); bos = new ByteArrayOutputStream(1024); checker = new CheckIndex(dir); checker.setInfoStream(new PrintStream(bos, false, IOUtils.UTF_8), false); indexStatus = checker.checkIndex(null); assertTrue(indexStatus.clean); checker.close(); s = bos.toString(IOUtils.UTF_8); assertFalse(s.contains("has deletions")); dir.close(); } public void testTryDeleteDocument() throws Exception { Directory d = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); IndexWriter w = new IndexWriter(d, iwc); Document doc = new Document(); w.addDocument(doc); w.addDocument(doc); w.addDocument(doc); w.close(); iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); w = new IndexWriter(d, iwc); IndexReader r = DirectoryReader.open(w, false, false); assertTrue(w.tryDeleteDocument(r, 1) != -1); assertFalse(((StandardDirectoryReader)r).isCurrent()); assertTrue(w.tryDeleteDocument(r.leaves().get(0).reader(), 0) != -1); assertFalse(((StandardDirectoryReader)r).isCurrent()); r.close(); w.close(); r = DirectoryReader.open(d); assertEquals(2, r.numDeletedDocs()); assertNotNull(MultiBits.getLiveDocs(r)); r.close(); d.close(); } public void testNRTIsCurrentAfterDelete() throws Exception { Directory d = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); IndexWriter w = new IndexWriter(d, iwc); Document doc = new Document(); w.addDocument(doc); w.addDocument(doc); w.addDocument(doc); doc.add(new StringField("id", "1", Field.Store.YES)); w.addDocument(doc); w.close(); iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); w = new IndexWriter(d, iwc); IndexReader r = DirectoryReader.open(w, false, false); w.deleteDocuments(new Term("id", "1")); IndexReader r2 = DirectoryReader.open(w, true, true); assertFalse(((StandardDirectoryReader)r).isCurrent()); assertTrue(((StandardDirectoryReader)r2).isCurrent()); IOUtils.close(r, r2, w, d); } public void testOnlyDeletesTriggersMergeOnClose() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMaxBufferedDocs(2); LogDocMergePolicy mp = new LogDocMergePolicy(); mp.setMinMergeDocs(1); iwc.setMergePolicy(mp); iwc.setMergeScheduler(new SerialMergeScheduler()); IndexWriter w = new IndexWriter(dir, iwc); for(int i=0;i<38;i++) { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); } w.commit(); for(int i=0;i<18;i++) { w.deleteDocuments(new Term("id", ""+i)); } w.close(); DirectoryReader r = DirectoryReader.open(dir); assertEquals(1, r.leaves().size()); r.close(); dir.close(); } public void testOnlyDeletesTriggersMergeOnGetReader() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMaxBufferedDocs(2); LogDocMergePolicy mp = new LogDocMergePolicy(); mp.setMinMergeDocs(1); iwc.setMergePolicy(mp); iwc.setMergeScheduler(new SerialMergeScheduler()); IndexWriter w = new IndexWriter(dir, iwc); for(int i=0;i<38;i++) { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); } w.commit(); for(int i=0;i<18;i++) { w.deleteDocuments(new Term("id", ""+i)); } // First one triggers, but does not reflect, the merge: if (VERBOSE) { System.out.println("TEST: now get reader"); } DirectoryReader.open(w).close(); IndexReader r = DirectoryReader.open(w); assertEquals(1, r.leaves().size()); r.close(); w.close(); dir.close(); } public void testOnlyDeletesTriggersMergeOnFlush() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMaxBufferedDocs(2); LogDocMergePolicy mp = new LogDocMergePolicy(); mp.setMinMergeDocs(1); iwc.setMergePolicy(mp); iwc.setMergeScheduler(new SerialMergeScheduler()); IndexWriter w = new IndexWriter(dir, iwc); for(int i=0;i<38;i++) { if (VERBOSE) { System.out.println("TEST: add doc " + i); } Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); } if (VERBOSE) { System.out.println("TEST: commit1"); } w.commit(); // Deleting 18 out of the 20 docs in the first segment make it the same "level" as the other 9 which should cause a merge to kick off: for(int i=0;i<18;i++) { w.deleteDocuments(new Term("id", ""+i)); } if (VERBOSE) { System.out.println("TEST: commit2"); } w.close(); DirectoryReader r = DirectoryReader.open(dir); assertEquals(1, r.leaves().size()); r.close(); dir.close(); } public void testOnlyDeletesDeleteAllDocs() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMaxBufferedDocs(2); LogDocMergePolicy mp = new LogDocMergePolicy(); mp.setMinMergeDocs(1); iwc.setMergePolicy(mp); iwc.setMergeScheduler(new SerialMergeScheduler()); IndexWriter w = new IndexWriter(dir, iwc); for(int i=0;i<38;i++) { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); } w.commit(); for(int i=0;i<38;i++) { w.deleteDocuments(new Term("id", ""+i)); } DirectoryReader r = DirectoryReader.open(w); assertEquals(0, r.leaves().size()); assertEquals(0, r.maxDoc()); r.close(); w.close(); dir.close(); } // Make sure merges still kick off after IW.deleteAll! public void testMergingAfterDeleteAll() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random())); iwc.setMaxBufferedDocs(2); LogDocMergePolicy mp = new LogDocMergePolicy(); mp.setMinMergeDocs(1); iwc.setMergePolicy(mp); iwc.setMergeScheduler(new SerialMergeScheduler()); IndexWriter w = new IndexWriter(dir, iwc); for(int i=0;i<10;i++) { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); } w.commit(); w.deleteAll(); for(int i=0;i<100;i++) { Document doc = new Document(); doc.add(newStringField("id", ""+i, Field.Store.NO)); w.addDocument(doc); } w.forceMerge(1); DirectoryReader r = DirectoryReader.open(w); assertEquals(1, r.leaves().size()); r.close(); w.close(); dir.close(); } }
1
36,558
Nice -- this forced `merge-on-getReader/commit` to be used more often in this test?
apache-lucene-solr
java
@@ -221,6 +221,9 @@ type ConsensusParams struct { // sum of estimated op cost must be less than this LogicSigMaxCost uint64 + + // a precision for assets + SupportAssetsPrecision bool } // Consensus tracks the protocol-level settings for different versions of the
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package config import ( "encoding/json" "errors" "io" "os" "os/user" "path/filepath" "strconv" "strings" "time" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/codecs" ) // Devnet identifies the 'development network' use for development and not generally accessible publicly const Devnet protocol.NetworkID = "devnet" // Devtestnet identifies the 'development network for tests' use for running tests against development and not generally accessible publicly const Devtestnet protocol.NetworkID = "devtestnet" // Testnet identifies the publicly-available test network const Testnet protocol.NetworkID = "testnet" // Mainnet identifies the publicly-available real-money network const Mainnet protocol.NetworkID = "mainnet" // GenesisJSONFile is the name of the genesis.json file const GenesisJSONFile = "genesis.json" // Global defines global Algorand protocol parameters which should not be overriden. type Global struct { SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential) BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block) } // Protocol holds the global configuration settings for the agreement protocol, // initialized with our current defaults. This is used across all nodes we create. var Protocol = Global{ SmallLambda: 2000 * time.Millisecond, BigLambda: 15000 * time.Millisecond, } // ConsensusParams specifies settings that might vary based on the // particular version of the consensus protocol. type ConsensusParams struct { // Consensus protocol upgrades. Votes for upgrades are collected for // UpgradeVoteRounds. If the number of positive votes is over // UpgradeThreshold, the proposal is accepted. // // UpgradeVoteRounds needs to be long enough to collect an // accurate sample of participants, and UpgradeThreshold needs // to be high enough to ensure that there are sufficient participants // after the upgrade. // // There is a delay of UpgradeWaitRounds between approval of // an upgrade and its deployment, to give clients time to notify users. UpgradeVoteRounds uint64 UpgradeThreshold uint64 UpgradeWaitRounds uint64 MaxVersionStringLen int // MaxTxnBytesPerBlock determines the maximum number of bytes // that transactions can take up in a block. Specifically, // the sum of the lengths of encodings of each transaction // in a block must not exceed MaxTxnBytesPerBlock. MaxTxnBytesPerBlock int // MaxTxnBytesPerBlock is the maximum size of a transaction's Note field. MaxTxnNoteBytes int // MaxTxnLife is how long a transaction can be live for: // the maximum difference between LastValid and FirstValid. // // Note that in a protocol upgrade, the ledger must first be upgraded // to hold more past blocks for this value to be raised. MaxTxnLife uint64 // ApprovedUpgrades describes the upgrade proposals that this protocol // implementation will vote for. ApprovedUpgrades map[protocol.ConsensusVersion]bool // SupportGenesisHash indicates support for the GenesisHash // fields in transactions (and requires them in blocks). SupportGenesisHash bool // RequireGenesisHash indicates that GenesisHash must be present // in every transaction. RequireGenesisHash bool // DefaultKeyDilution specifies the granularity of top-level ephemeral // keys. KeyDilution is the number of second-level keys in each batch, // signed by a top-level "batch" key. The default value can be // overriden in the account state. DefaultKeyDilution uint64 // MinBalance specifies the minimum balance that can appear in // an account. To spend money below MinBalance requires issuing // an account-closing transaction, which transfers all of the // money from the account, and deletes the account state. MinBalance uint64 // MinTxnFee specifies the minimum fee allowed on a transaction. // A minimum fee is necessary to prevent DoS. In some sense this is // a way of making the spender subsidize the cost of storing this transaction. MinTxnFee uint64 // RewardUnit specifies the number of MicroAlgos corresponding to one reward // unit. // // Rewards are received by whole reward units. Fractions of // RewardUnits do not receive rewards. RewardUnit uint64 // RewardsRateRefreshInterval is the number of rounds after which the // rewards level is recomputed for the next RewardsRateRefreshInterval rounds. RewardsRateRefreshInterval uint64 // seed-related parameters SeedLookback uint64 // how many blocks back we use seeds from in sortition. delta_s in the spec SeedRefreshInterval uint64 // how often an old block hash is mixed into the seed. delta_r in the spec // ledger retention policy MaxBalLookback uint64 // (current round - MaxBalLookback) is the oldest round the ledger must answer balance queries for // sortition threshold factors NumProposers uint64 SoftCommitteeSize uint64 SoftCommitteeThreshold uint64 CertCommitteeSize uint64 CertCommitteeThreshold uint64 NextCommitteeSize uint64 // for any non-FPR votes >= deadline step, committee sizes and thresholds are constant NextCommitteeThreshold uint64 LateCommitteeSize uint64 LateCommitteeThreshold uint64 RedoCommitteeSize uint64 RedoCommitteeThreshold uint64 DownCommitteeSize uint64 DownCommitteeThreshold uint64 FastRecoveryLambda time.Duration // time between fast recovery attempts FastPartitionRecovery bool // set when fast partition recovery is enabled // commit to payset using a hash of entire payset, // instead of txid merkle tree PaysetCommitFlat bool MaxTimestampIncrement int64 // maximum time between timestamps on successive blocks // support for the efficient encoding in SignedTxnInBlock SupportSignedTxnInBlock bool // force the FeeSink address to be non-participating in the genesis balances. ForceNonParticipatingFeeSink bool // support for ApplyData in SignedTxnInBlock ApplyData bool // track reward distributions in ApplyData RewardsInApplyData bool // domain-separated credentials CredentialDomainSeparationEnabled bool // support for transactions that mark an account non-participating SupportBecomeNonParticipatingTransactions bool // fix the rewards calculation by avoiding subtracting too much from the rewards pool PendingResidueRewards bool // asset support Asset bool // max number of assets per account MaxAssetsPerAccount int // max length of asset name MaxAssetNameBytes int // max length of asset unit name MaxAssetUnitNameBytes int // max length of asset url MaxAssetURLBytes int // support sequential transaction counter TxnCounter TxnCounter bool // transaction groups SupportTxGroups bool // max group size MaxTxGroupSize int // support for transaction leases SupportTransactionLeases bool // 0 for no support, otherwise highest version supported LogicSigVersion uint64 // len(LogicSig.Logic) + len(LogicSig.Args[*]) must be less than this LogicSigMaxSize uint64 // sum of estimated op cost must be less than this LogicSigMaxCost uint64 } // Consensus tracks the protocol-level settings for different versions of the // consensus protocol. var Consensus map[protocol.ConsensusVersion]ConsensusParams func init() { Consensus = make(map[protocol.ConsensusVersion]ConsensusParams) initConsensusProtocols() initConsensusTestProtocols() // This must appear last, since it depends on all of the other // versions to already be registered (by the above calls). initConsensusTestFastUpgrade() // Allow tuning SmallLambda for faster consensus in single-machine e2e // tests. Useful for development. This might make sense to fold into // a protocol-version-specific setting, once we move SmallLambda into // ConsensusParams. algoSmallLambda, err := strconv.ParseInt(os.Getenv("ALGOSMALLLAMBDAMSEC"), 10, 64) if err == nil { Protocol.SmallLambda = time.Duration(algoSmallLambda) * time.Millisecond } } func initConsensusProtocols() { // WARNING: copying a ConsensusParams by value into a new variable // does not copy the ApprovedUpgrades map. Make sure that each new // ConsensusParams structure gets a fresh ApprovedUpgrades map. // Base consensus protocol version, v7. v7 := ConsensusParams{ UpgradeVoteRounds: 10000, UpgradeThreshold: 9000, UpgradeWaitRounds: 10000, MaxVersionStringLen: 64, MinBalance: 10000, MinTxnFee: 1000, MaxTxnLife: 1000, MaxTxnNoteBytes: 1024, MaxTxnBytesPerBlock: 1000000, DefaultKeyDilution: 10000, MaxTimestampIncrement: 25, RewardUnit: 1e6, RewardsRateRefreshInterval: 5e5, ApprovedUpgrades: map[protocol.ConsensusVersion]bool{}, NumProposers: 30, SoftCommitteeSize: 2500, SoftCommitteeThreshold: 1870, CertCommitteeSize: 1000, CertCommitteeThreshold: 720, NextCommitteeSize: 10000, NextCommitteeThreshold: 7750, LateCommitteeSize: 10000, LateCommitteeThreshold: 7750, RedoCommitteeSize: 10000, RedoCommitteeThreshold: 7750, DownCommitteeSize: 10000, DownCommitteeThreshold: 7750, FastRecoveryLambda: 5 * time.Minute, SeedLookback: 2, SeedRefreshInterval: 100, MaxBalLookback: 320, MaxTxGroupSize: 1, } v7.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV7] = v7 // v8 uses parameters and a seed derivation policy (the "twin seeds") from Georgios' new analysis v8 := v7 v8.SeedRefreshInterval = 80 v8.NumProposers = 9 v8.SoftCommitteeSize = 2990 v8.SoftCommitteeThreshold = 2267 v8.CertCommitteeSize = 1500 v8.CertCommitteeThreshold = 1112 v8.NextCommitteeSize = 5000 v8.NextCommitteeThreshold = 3838 v8.LateCommitteeSize = 5000 v8.LateCommitteeThreshold = 3838 v8.RedoCommitteeSize = 5000 v8.RedoCommitteeThreshold = 3838 v8.DownCommitteeSize = 5000 v8.DownCommitteeThreshold = 3838 v8.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV8] = v8 // v7 can be upgraded to v8. v7.ApprovedUpgrades[protocol.ConsensusV8] = true // v9 increases the minimum balance to 100,000 microAlgos. v9 := v8 v9.MinBalance = 100000 v9.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV9] = v9 // v8 can be upgraded to v9. v8.ApprovedUpgrades[protocol.ConsensusV9] = true // v10 introduces fast partition recovery (and also raises NumProposers). v10 := v9 v10.FastPartitionRecovery = true v10.NumProposers = 20 v10.LateCommitteeSize = 500 v10.LateCommitteeThreshold = 320 v10.RedoCommitteeSize = 2400 v10.RedoCommitteeThreshold = 1768 v10.DownCommitteeSize = 6000 v10.DownCommitteeThreshold = 4560 v10.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV10] = v10 // v9 can be upgraded to v10. v9.ApprovedUpgrades[protocol.ConsensusV10] = true // v11 introduces SignedTxnInBlock. v11 := v10 v11.SupportSignedTxnInBlock = true v11.PaysetCommitFlat = true v11.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV11] = v11 // v10 can be upgraded to v11. v10.ApprovedUpgrades[protocol.ConsensusV11] = true // v12 increases the maximum length of a version string. v12 := v11 v12.MaxVersionStringLen = 128 v12.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV12] = v12 // v11 can be upgraded to v12. v11.ApprovedUpgrades[protocol.ConsensusV12] = true // v13 makes the consensus version a meaningful string. v13 := v12 v13.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV13] = v13 // v12 can be upgraded to v13. v12.ApprovedUpgrades[protocol.ConsensusV13] = true // v14 introduces tracking of closing amounts in ApplyData, and enables // GenesisHash in transactions. v14 := v13 v14.ApplyData = true v14.SupportGenesisHash = true v14.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV14] = v14 // v13 can be upgraded to v14. v13.ApprovedUpgrades[protocol.ConsensusV14] = true // v15 introduces tracking of reward distributions in ApplyData. v15 := v14 v15.RewardsInApplyData = true v15.ForceNonParticipatingFeeSink = true v15.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV15] = v15 // v14 can be upgraded to v15. v14.ApprovedUpgrades[protocol.ConsensusV15] = true // v16 fixes domain separation in credentials. v16 := v15 v16.CredentialDomainSeparationEnabled = true v16.RequireGenesisHash = true v16.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV16] = v16 // v15 can be upgraded to v16. v15.ApprovedUpgrades[protocol.ConsensusV16] = true // ConsensusV17 points to 'final' spec commit v17 := v16 v17.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV17] = v17 // v16 can be upgraded to v17. v16.ApprovedUpgrades[protocol.ConsensusV17] = true // ConsensusV18 points to reward calculation spec commit v18 := v17 v18.PendingResidueRewards = true v18.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} v18.TxnCounter = true v18.Asset = true v18.LogicSigVersion = 1 v18.LogicSigMaxSize = 1000 v18.LogicSigMaxCost = 20000 v18.MaxAssetsPerAccount = 1000 v18.SupportTxGroups = true v18.MaxTxGroupSize = 16 v18.SupportTransactionLeases = true v18.SupportBecomeNonParticipatingTransactions = true v18.MaxAssetNameBytes = 32 v18.MaxAssetUnitNameBytes = 8 v18.MaxAssetURLBytes = 32 Consensus[protocol.ConsensusV18] = v18 // ConsensusV19 is the official spec commit ( teal, assets, group tx ) v19 := v18 v19.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusV19] = v19 // v18 can be upgraded to v19. v18.ApprovedUpgrades[protocol.ConsensusV19] = true // v17 can be upgraded to v19. v17.ApprovedUpgrades[protocol.ConsensusV19] = true // ConsensusFuture is used to test features that are implemented // but not yet released in a production protocol version. vFuture := v19 vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusFuture] = vFuture } func initConsensusTestProtocols() { // Various test protocol versions Consensus[protocol.ConsensusTest0] = ConsensusParams{ UpgradeVoteRounds: 2, UpgradeThreshold: 1, UpgradeWaitRounds: 2, MaxVersionStringLen: 64, MaxTxnBytesPerBlock: 1000000, DefaultKeyDilution: 10000, ApprovedUpgrades: map[protocol.ConsensusVersion]bool{ protocol.ConsensusTest1: true, }, } Consensus[protocol.ConsensusTest1] = ConsensusParams{ UpgradeVoteRounds: 10, UpgradeThreshold: 8, UpgradeWaitRounds: 10, MaxVersionStringLen: 64, MaxTxnBytesPerBlock: 1000000, DefaultKeyDilution: 10000, ApprovedUpgrades: map[protocol.ConsensusVersion]bool{}, } testBigBlocks := Consensus[protocol.ConsensusCurrentVersion] testBigBlocks.MaxTxnBytesPerBlock = 100000000 testBigBlocks.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusTestBigBlocks] = testBigBlocks rapidRecalcParams := Consensus[protocol.ConsensusCurrentVersion] rapidRecalcParams.RewardsRateRefreshInterval = 25 //because rapidRecalcParams is based on ConsensusCurrentVersion, //it *shouldn't* have any ApprovedUpgrades //but explicitly mark "no approved upgrades" just in case rapidRecalcParams.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} Consensus[protocol.ConsensusTestRapidRewardRecalculation] = rapidRecalcParams // Setting the testShorterLookback parameters derived from ConsensusCurrentVersion // Will result in MaxBalLookback = 32 // Used to run tests faster where past MaxBalLookback values are checked testShorterLookback := Consensus[protocol.ConsensusCurrentVersion] testShorterLookback.ApprovedUpgrades = map[protocol.ConsensusVersion]bool{} // MaxBalLookback = 2 x SeedRefreshInterval x SeedLookback // ref. https://github.com/algorandfoundation/specs/blob/master/dev/abft.md testShorterLookback.SeedLookback = 2 testShorterLookback.SeedRefreshInterval = 8 testShorterLookback.MaxBalLookback = 2*testShorterLookback.SeedLookback*testShorterLookback.SeedRefreshInterval // 32 Consensus[protocol.ConsensusTestShorterLookback] = testShorterLookback } func initConsensusTestFastUpgrade() { fastUpgradeProtocols := make(map[protocol.ConsensusVersion]ConsensusParams) for proto, params := range Consensus { fastParams := params fastParams.UpgradeVoteRounds = 5 fastParams.UpgradeThreshold = 3 fastParams.UpgradeWaitRounds = 5 fastParams.MaxVersionStringLen += len(protocol.ConsensusTestFastUpgrade("")) fastParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]bool) for ver, flag := range params.ApprovedUpgrades { fastParams.ApprovedUpgrades[protocol.ConsensusTestFastUpgrade(ver)] = flag } fastUpgradeProtocols[protocol.ConsensusTestFastUpgrade(proto)] = fastParams } // Put the test protocols into the Consensus struct; this // is done as a separate step so we don't recurse forever. for proto, params := range fastUpgradeProtocols { Consensus[proto] = params } } // Local holds the per-node-instance configuration settings for the protocol. type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. Version uint32 // environmental (may be overridden) // if true, does not garbage collect; also, replies to catchup requests Archival bool // gossipNode.go // how many peers to propagate to? GossipFanout int NetAddress string ReconnectTime time.Duration // what we should tell people to connect to PublicAddress string MaxConnectionsPerIP int // 0 == disable PeerPingPeriodSeconds int // for https serving TLSCertFile string TLSKeyFile string // Logging BaseLoggerDebugLevel uint32 // if this is 0, do not produce agreement.cadaver CadaverSizeTarget uint64 // IncomingConnectionsLimit specifies the max number of long-lived incoming // connections. 0 means no connections allowed. -1 is unbounded. IncomingConnectionsLimit int // BroadcastConnectionsLimit specifies the number of connections that // will receive broadcast (gossip) messages from this node. If the // node has more connections than this number, it will send broadcasts // to the top connections by priority (outgoing connections first, then // by money held by peers based on their participation key). 0 means // no outgoing messages (not even transaction broadcasting to outgoing // peers). -1 means unbounded (default). BroadcastConnectionsLimit int // AnnounceParticipationKey specifies that this node should announce its // participation key (with the largest stake) to its gossip peers. This // allows peers to prioritize our connection, if necessary, in case of a // DoS attack. Disabling this means that the peers will not have any // additional information to allow them to prioritize our connection. AnnounceParticipationKey bool // PriorityPeers specifies peer IP addresses that should always get // outgoing broadcast messages from this node. PriorityPeers map[string]bool // To make sure the algod process does not run out of FDs, algod ensures // that RLIMIT_NOFILE exceeds the max number of incoming connections (i.e., // IncomingConnectionsLimit) by at least ReservedFDs. ReservedFDs are meant // to leave room for short-lived FDs like DNS queries, SQLite files, etc. ReservedFDs uint64 // local server // API endpoint address EndpointAddress string // timeouts passed to the rest http.Server implementation RestReadTimeoutSeconds int RestWriteTimeoutSeconds int // SRV-based phonebook DNSBootstrapID string // Log file size limit in bytes LogSizeLimit uint64 // text/template for creating log archive filename. // Available template vars: // Time at start of log: {{.Year}} {{.Month}} {{.Day}} {{.Hour}} {{.Minute}} {{.Second}} // Time at end of log: {{.EndYear}} {{.EndMonth}} {{.EndDay}} {{.EndHour}} {{.EndMinute}} {{.EndSecond}} // // If the filename ends with .gz or .bz2 it will be compressed. // // default: "node.archive.log" (no rotation, clobbers previous archive) LogArchiveName string // LogArchiveMaxAge will be parsed by time.ParseDuration(). // Valid units are 's' seconds, 'm' minutes, 'h' hours LogArchiveMaxAge string // number of consecutive attempts to catchup after which we replace the peers we're connected to CatchupFailurePeerRefreshRate int // where should the node exporter listen for metrics NodeExporterListenAddress string // enable metric reporting flag EnableMetricReporting bool // enable top accounts reporting flag EnableTopAccountsReporting bool // enable agreement reporting flag. Currently only prints additional period events. EnableAgreementReporting bool // enable agreement timing metrics flag EnableAgreementTimeMetrics bool // The path to the node exporter. NodeExporterPath string // The fallback DNS resolver address that would be used if the system resolver would fail to retrieve SRV records FallbackDNSResolverAddress string // exponential increase factor of transaction pool's fee threshold, should always be 2 in production TxPoolExponentialIncreaseFactor uint64 SuggestedFeeBlockHistory int // TxPoolSize is the number of transactions that fit in the transaction pool TxPoolSize int // number of seconds allowed for syncing transactions TxSyncTimeoutSeconds int64 // number of seconds between transaction synchronizations TxSyncIntervalSeconds int64 // the number of incoming message hashes buckets. IncomingMessageFilterBucketCount int // the size of each incoming message hash bucket. IncomingMessageFilterBucketSize int // the number of outgoing message hashes buckets. OutgoingMessageFilterBucketCount int // the size of each outgoing message hash bucket. OutgoingMessageFilterBucketSize int // enable the filtering of outgoing messages EnableOutgoingNetworkMessageFiltering bool // enable the filtering of incoming messages EnableIncomingMessageFilter bool // control enabling / disabling deadlock detection. // negative (-1) to disable, positive (1) to enable, 0 for default. DeadlockDetection int // Prefer to run algod Hosted (under algoh) // Observed by `goal` for now. RunHosted bool // The maximal number of blocks that catchup will fetch in parallel. // If less than Protocol.SeedLookback, then Protocol.SeedLookback will be used as to limit the catchup. CatchupParallelBlocks uint64 // Generate AssembleBlockMetrics telemetry event EnableAssembleStats bool // Generate ProcessBlockMetrics telemetry event EnableProcessBlockStats bool // SuggestedFeeSlidingWindowSize is number of past blocks that will be considered in computing the suggested fee SuggestedFeeSlidingWindowSize uint32 // the max size the sync server would return TxSyncServeResponseSize int // IsIndexerActive indicates whether to activate the indexer for fast retrieval of transactions // Note -- Indexer cannot operate on non Archival nodes IsIndexerActive bool // UseXForwardedForAddress indicates whether or not the node should use the X-Forwarded-For HTTP Header when // determining the source of a connection. If used, it should be set to the string "X-Forwarded-For", unless the // proxy vendor provides another header field. In the case of CloudFlare proxy, the "CF-Connecting-IP" header // field can be used. UseXForwardedForAddressField string // ForceRelayMessages indicates whether the network library relay messages even in the case that no NetAddress was specified. ForceRelayMessages bool // ConnectionsRateLimitingWindowSeconds is being used in conjunction with ConnectionsRateLimitingCount; // see ConnectionsRateLimitingCount description for further information. Providing a zero value // in this variable disables the connection rate limiting. ConnectionsRateLimitingWindowSeconds uint // ConnectionsRateLimitingCount is being used along with ConnectionsRateLimitingWindowSeconds to determine if // a connection request should be accepted or not. The gossip network examine all the incoming requests in the past // ConnectionsRateLimitingWindowSeconds seconds that share the same origin. If the total count exceed the ConnectionsRateLimitingCount // value, the connection is refused. ConnectionsRateLimitingCount uint // EnableRequestLogger enabled the logging of the incoming requests to the telemetry server. EnableRequestLogger bool } // Filenames of config files within the configdir (e.g. ~/.algorand) // ConfigFilename is the name of the config.json file where we store per-algod-instance settings const ConfigFilename = "config.json" // PhonebookFilename is the name of the phonebook configuration files - no longer used const PhonebookFilename = "phonebook.json" // No longer used in product - still in tests // LedgerFilenamePrefix is the prefix of the name of the ledger database files const LedgerFilenamePrefix = "ledger" // CrashFilename is the name of the agreement database file. // It is used to recover from node crashes. const CrashFilename = "crash.sqlite" // LoadConfigFromDisk returns a Local config structure based on merging the defaults // with settings loaded from the config file from the custom dir. If the custom file // cannot be loaded, the default config is returned (with the error from loading the // custom file). func LoadConfigFromDisk(custom string) (c Local, err error) { return loadConfigFromFile(filepath.Join(custom, ConfigFilename)) } func loadConfigFromFile(configFile string) (c Local, err error) { c = defaultLocal c.Version = 0 // Reset to 0 so we get the version from the loaded file. c, err = mergeConfigFromFile(configFile, c) if err != nil { return } // Migrate in case defaults were changed // If a config file does not have version, it is assumed to be zero. // All fields listed in migrate() might be changed if an actual value matches to default value from a previous version. c, err = migrate(c) return } // GetDefaultLocal returns a copy of the current defaultLocal config func GetDefaultLocal() Local { return defaultLocal } func mergeConfigFromDir(root string, source Local) (Local, error) { return mergeConfigFromFile(filepath.Join(root, ConfigFilename), source) } func mergeConfigFromFile(configpath string, source Local) (Local, error) { f, err := os.Open(configpath) if err != nil { return source, err } defer f.Close() err = loadConfig(f, &source) // For now, all relays (listening for incoming connections) are also Archival // We can change this logic in the future, but it's currently the sanest default. if source.NetAddress != "" { source.Archival = true } return source, err } func loadConfig(reader io.Reader, config *Local) error { dec := json.NewDecoder(reader) return dec.Decode(config) } // DNSBootstrapArray returns an array of one or more DNS Bootstrap identifiers func (cfg Local) DNSBootstrapArray(networkID protocol.NetworkID) (bootstrapArray []string) { dnsBootstrapString := cfg.DNSBootstrap(networkID) bootstrapArray = strings.Split(dnsBootstrapString, ";") return } // DNSBootstrap returns the network-specific DNSBootstrap identifier func (cfg Local) DNSBootstrap(network protocol.NetworkID) string { // if user hasn't modified the default DNSBootstrapID in the configuration // file and we're targeting a devnet ( via genesis file ), we the // explicit devnet network bootstrap. if defaultLocal.DNSBootstrapID == cfg.DNSBootstrapID && network == Devnet { return "devnet.algodev.network" } return strings.Replace(cfg.DNSBootstrapID, "<network>", string(network), -1) } // SaveToDisk writes the Local settings into a root/ConfigFilename file func (cfg Local) SaveToDisk(root string) error { configpath := filepath.Join(root, ConfigFilename) filename := os.ExpandEnv(configpath) return cfg.SaveToFile(filename) } // SaveToFile saves the config to a specific filename, allowing overriding the default name func (cfg Local) SaveToFile(filename string) error { var alwaysInclude []string alwaysInclude = append(alwaysInclude, "Version") return codecs.SaveNonDefaultValuesToFile(filename, cfg, defaultLocal, alwaysInclude, true) } type phonebookBlackWhiteList struct { Include []string } // LoadPhonebook returns a phonebook loaded from the provided directory, if it exists. // NOTE: We no longer use phonebook for anything but tests, but users should be able to use it func LoadPhonebook(datadir string) ([]string, error) { var entries []string path := filepath.Join(datadir, PhonebookFilename) f, rootErr := os.Open(path) if rootErr != nil { if !os.IsNotExist(rootErr) { return nil, rootErr } } else { defer f.Close() phonebook := phonebookBlackWhiteList{} dec := json.NewDecoder(f) err := dec.Decode(&phonebook) if err != nil { return nil, errors.New("error decoding phonebook! got error: " + err.Error()) } entries = phonebook.Include } // get an initial list of peers return entries, rootErr } // SavePhonebookToDisk writes the phonebook into a root/PhonebookFilename file func SavePhonebookToDisk(entries []string, root string) error { configpath := filepath.Join(root, PhonebookFilename) f, err := os.OpenFile(os.ExpandEnv(configpath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { defer f.Close() err = savePhonebook(entries, f) } return err } func savePhonebook(entries []string, w io.Writer) error { pb := phonebookBlackWhiteList{ Include: entries, } enc := codecs.NewFormattedJSONEncoder(w) return enc.Encode(pb) } var globalConfigFileRoot string // GetConfigFilePath retrieves the full path to a configuration file // These are global configurations - not specific to data-directory / network. func GetConfigFilePath(file string) (string, error) { rootPath, err := GetGlobalConfigFileRoot() if err != nil { return "", err } return filepath.Join(rootPath, file), nil } // GetGlobalConfigFileRoot returns the current root folder for global configuration files. // This will likely only change for tests. func GetGlobalConfigFileRoot() (string, error) { var err error if globalConfigFileRoot == "" { globalConfigFileRoot, err = GetDefaultConfigFilePath() if err == nil { dirErr := os.Mkdir(globalConfigFileRoot, os.ModePerm) if !os.IsExist(dirErr) { err = dirErr } } } return globalConfigFileRoot, err } // SetGlobalConfigFileRoot allows overriding the root folder for global configuration files. // It returns the current one so it can be restored, if desired. // This will likely only change for tests. func SetGlobalConfigFileRoot(rootPath string) string { currentRoot := globalConfigFileRoot globalConfigFileRoot = rootPath return currentRoot } // GetDefaultConfigFilePath retrieves the default directory for global (not per-instance) config files // By default we store in ~/.algorand/. // This will likely only change for tests. func GetDefaultConfigFilePath() (string, error) { currentUser, err := user.Current() if err != nil { return "", err } if currentUser.HomeDir == "" { return "", errors.New("GetDefaultConfigFilePath fail - current user has no home directory") } return filepath.Join(currentUser.HomeDir, ".algorand"), nil }
1
36,919
Instead of a `bool`, can we just call this `MaxAssetDecimals` and have it be a `uint8`/`uint16`/`uint32` whose value is 0 before the upgrade and 19 after (2**64 - 1 is 20 decimal digits)? We'll need to set a maximum value anyway, and that way there's only one new proto variable instead of two.
algorand-go-algorand
go
@@ -145,7 +145,10 @@ final class EntityRepository implements EntityRepositoryInterface if ($sortFieldIsDoctrineAssociation) { $sortFieldParts = explode('.', $sortProperty, 2); - $queryBuilder->leftJoin('entity.'.$sortFieldParts[0], $sortFieldParts[0]); + // check if join has been added once before. + if (!\in_array($sortFieldParts[0], $aliases)) { + $queryBuilder->leftJoin('entity.'.$sortFieldParts[0], $sortFieldParts[0]); + } if (1 === \count($sortFieldParts)) { $queryBuilder->addOrderBy('entity.'.$sortProperty, $sortOrder);
1
<?php namespace EasyCorp\Bundle\EasyAdminBundle\Orm; use Doctrine\ORM\QueryBuilder; use Doctrine\Persistence\ManagerRegistry; use EasyCorp\Bundle\EasyAdminBundle\Collection\FieldCollection; use EasyCorp\Bundle\EasyAdminBundle\Collection\FilterCollection; use EasyCorp\Bundle\EasyAdminBundle\Contracts\Orm\EntityRepositoryInterface; use EasyCorp\Bundle\EasyAdminBundle\Dto\EntityDto; use EasyCorp\Bundle\EasyAdminBundle\Dto\FilterDataDto; use EasyCorp\Bundle\EasyAdminBundle\Dto\SearchDto; use EasyCorp\Bundle\EasyAdminBundle\Factory\EntityFactory; use EasyCorp\Bundle\EasyAdminBundle\Factory\FormFactory; use EasyCorp\Bundle\EasyAdminBundle\Form\Type\ComparisonType; use EasyCorp\Bundle\EasyAdminBundle\Provider\AdminContextProvider; /** * @author Javier Eguiluz <[email protected]> */ final class EntityRepository implements EntityRepositoryInterface { private $adminContextProvider; private $doctrine; private $entityFactory; private $formFactory; public function __construct(AdminContextProvider $adminContextProvider, ManagerRegistry $doctrine, EntityFactory $entityFactory, FormFactory $formFactory) { $this->adminContextProvider = $adminContextProvider; $this->doctrine = $doctrine; $this->entityFactory = $entityFactory; $this->formFactory = $formFactory; } public function createQueryBuilder(SearchDto $searchDto, EntityDto $entityDto, FieldCollection $fields, FilterCollection $filters): QueryBuilder { $entityManager = $this->doctrine->getManagerForClass($entityDto->getFqcn()); /** @var QueryBuilder $queryBuilder */ $queryBuilder = $entityManager->createQueryBuilder() ->select('entity') ->from($entityDto->getFqcn(), 'entity') ; if (!empty($searchDto->getQuery())) { $this->addSearchClause($queryBuilder, $searchDto, $entityDto); } if (!empty($searchDto->getAppliedFilters())) { $this->addFilterClause($queryBuilder, $searchDto, $entityDto, $filters, $fields); } $this->addOrderClause($queryBuilder, $searchDto, $entityDto); return $queryBuilder; } private function addSearchClause(QueryBuilder $queryBuilder, SearchDto $searchDto, EntityDto $entityDto): void { $query = $searchDto->getQuery(); $lowercaseQuery = mb_strtolower($query); $isNumericQuery = is_numeric($query); $isSmallIntegerQuery = ctype_digit($query) && $query >= -32768 && $query <= 32767; $isIntegerQuery = ctype_digit($query) && $query >= -2147483648 && $query <= 2147483647; $isUuidQuery = 1 === preg_match('/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i', $query); $dqlParameters = [ // adding '0' turns the string into a numeric value 'numeric_query' => is_numeric($query) ? 0 + $query : $query, 'uuid_query' => $query, 'text_query' => '%'.$lowercaseQuery.'%', 'words_query' => explode(' ', $lowercaseQuery), ]; $entitiesAlreadyJoined = []; $configuredSearchableProperties = $searchDto->getSearchableProperties(); $searchableProperties = empty($configuredSearchableProperties) ? $entityDto->getAllPropertyNames() : $configuredSearchableProperties; foreach ($searchableProperties as $propertyName) { if ($entityDto->isAssociation($propertyName)) { // support arbitrarily nested associations (e.g. foo.bar.baz.qux) $associatedProperties = explode('.', $propertyName); $numAssociatedProperties = \count($associatedProperties); $originalPropertyName = $associatedProperties[0]; $originalPropertyMetadata = $entityDto->getPropertyMetadata($originalPropertyName); $associatedEntityDto = $this->entityFactory->create($originalPropertyMetadata->get('targetEntity')); for ($i = 0; $i < $numAssociatedProperties - 1; ++$i) { $associatedEntityName = $associatedProperties[$i]; $associatedPropertyName = $associatedProperties[$i + 1]; if (!\in_array($associatedEntityName, $entitiesAlreadyJoined, true)) { $parentEntityName = 0 === $i ? 'entity' : $associatedProperties[$i - 1]; $queryBuilder->leftJoin($parentEntityName.'.'.$associatedEntityName, $associatedEntityName); $entitiesAlreadyJoined[] = $associatedEntityName; } if ($i < $numAssociatedProperties - 2) { $propertyMetadata = $associatedEntityDto->getPropertyMetadata($associatedPropertyName); $targetEntity = $propertyMetadata->get('targetEntity'); $associatedEntityDto = $this->entityFactory->create($targetEntity); } } $entityName = $associatedEntityName; $propertyName = $associatedPropertyName; $propertyDataType = $associatedEntityDto->getPropertyDataType($propertyName); } else { $entityName = 'entity'; $propertyDataType = $entityDto->getPropertyDataType($propertyName); } $isSmallIntegerProperty = 'smallint' === $propertyDataType; $isIntegerProperty = 'integer' === $propertyDataType; $isNumericProperty = \in_array($propertyDataType, ['number', 'bigint', 'decimal', 'float']); // 'citext' is a PostgreSQL extension (https://github.com/EasyCorp/EasyAdminBundle/issues/2556) $isTextProperty = \in_array($propertyDataType, ['string', 'text', 'citext', 'array', 'simple_array']); $isGuidProperty = \in_array($propertyDataType, ['guid', 'uuid']); // this complex condition is needed to avoid issues on PostgreSQL databases if ( ($isSmallIntegerProperty && $isSmallIntegerQuery) || ($isIntegerProperty && $isIntegerQuery) || ($isNumericProperty && $isNumericQuery) ) { $queryBuilder->orWhere(sprintf('%s.%s = :query_for_numbers', $entityName, $propertyName)) ->setParameter('query_for_numbers', $dqlParameters['numeric_query']); } elseif ($isGuidProperty && $isUuidQuery) { $queryBuilder->orWhere(sprintf('%s.%s = :query_for_uuids', $entityName, $propertyName)) ->setParameter('query_for_uuids', $dqlParameters['uuid_query']); } elseif ($isTextProperty) { $queryBuilder->orWhere(sprintf('LOWER(%s.%s) LIKE :query_for_text', $entityName, $propertyName)) ->setParameter('query_for_text', $dqlParameters['text_query']); $queryBuilder->orWhere(sprintf('LOWER(%s.%s) IN (:query_as_words)', $entityName, $propertyName)) ->setParameter('query_as_words', $dqlParameters['words_query']); } } } private function addOrderClause(QueryBuilder $queryBuilder, SearchDto $searchDto, EntityDto $entityDto): void { foreach ($searchDto->getSort() as $sortProperty => $sortOrder) { $sortFieldIsDoctrineAssociation = $entityDto->isAssociation($sortProperty); if ($sortFieldIsDoctrineAssociation) { $sortFieldParts = explode('.', $sortProperty, 2); $queryBuilder->leftJoin('entity.'.$sortFieldParts[0], $sortFieldParts[0]); if (1 === \count($sortFieldParts)) { $queryBuilder->addOrderBy('entity.'.$sortProperty, $sortOrder); } else { $queryBuilder->addOrderBy($sortProperty, $sortOrder); } } else { $queryBuilder->addOrderBy('entity.'.$sortProperty, $sortOrder); } } } private function addFilterClause(QueryBuilder $queryBuilder, SearchDto $searchDto, EntityDto $entityDto, FilterCollection $configuredFilters, FieldCollection $fields): void { $filtersForm = $this->formFactory->createFiltersForm($configuredFilters, $this->adminContextProvider->getContext()->getRequest()); if (!$filtersForm->isSubmitted()) { return; } $appliedFilters = $searchDto->getAppliedFilters(); $i = 0; foreach ($filtersForm as $filterForm) { $propertyName = $filterForm->getName(); $filter = $configuredFilters->get($propertyName); // this filter is not defined or not applied if (null === $filter || !isset($appliedFilters[$propertyName])) { continue; } // if the form filter is not valid then we should not apply the filter if (!$filterForm->isValid()) { continue; } $submittedData = $filterForm->getData(); if (!\is_array($submittedData)) { $submittedData = [ 'comparison' => ComparisonType::EQ, 'value' => $submittedData, ]; } $filterDataDto = FilterDataDto::new($i, $filter, current($queryBuilder->getRootAliases()), $submittedData); $filter->apply($queryBuilder, $filterDataDto, $fields->get($propertyName), $entityDto); ++$i; } } }
1
13,074
where `$aliases` is defined ? @javiereguiluz
EasyCorp-EasyAdminBundle
php
@@ -66,12 +66,9 @@ func (h *Handler) FetchJWTSVID(ctx context.Context, req *workload.JWTSVIDRequest var spiffeIDs []string identities := h.Manager.MatchingIdentities(selectors) if len(identities) == 0 { - telemetry_common.AddRegistered(counter, false) return nil, status.Errorf(codes.PermissionDenied, "no identity issued") } - telemetry_common.AddRegistered(counter, true) - for _, identity := range identities { if req.SpiffeId != "" && identity.Entry.SpiffeId != req.SpiffeId { continue
1
package workload import ( "bytes" "context" "crypto" "crypto/x509" "encoding/json" "errors" "fmt" "sync/atomic" "time" "github.com/golang/protobuf/jsonpb" structpb "github.com/golang/protobuf/ptypes/struct" "github.com/sirupsen/logrus" "github.com/spiffe/go-spiffe/proto/spiffe/workload" attestor "github.com/spiffe/spire/pkg/agent/attestor/workload" "github.com/spiffe/spire/pkg/agent/catalog" "github.com/spiffe/spire/pkg/agent/client" "github.com/spiffe/spire/pkg/agent/manager" "github.com/spiffe/spire/pkg/agent/manager/cache" "github.com/spiffe/spire/pkg/common/bundleutil" "github.com/spiffe/spire/pkg/common/jwtsvid" "github.com/spiffe/spire/pkg/common/peertracker" "github.com/spiffe/spire/pkg/common/telemetry" telemetry_workload "github.com/spiffe/spire/pkg/common/telemetry/agent/workloadapi" telemetry_common "github.com/spiffe/spire/pkg/common/telemetry/common" "github.com/spiffe/spire/pkg/common/x509util" "github.com/spiffe/spire/proto/spire/common" "github.com/zeebo/errs" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) // Handler implements the Workload API interface type Handler struct { Manager manager.Manager Catalog catalog.Catalog Log logrus.FieldLogger Metrics telemetry.Metrics // tracks the number of outstanding connections connections int32 } // FetchJWTSVID processes request for a JWT SVID func (h *Handler) FetchJWTSVID(ctx context.Context, req *workload.JWTSVIDRequest) (resp *workload.JWTSVIDResponse, err error) { if len(req.Audience) == 0 { return nil, errs.New("audience must be specified") } _, selectors, metrics, done, err := h.startCall(ctx) if err != nil { return nil, err } defer done() counter := telemetry_workload.StartFetchJWTSVIDCall(metrics) defer counter.Done(&err) defer func() { telemetry_common.AddErrorClass(counter, status.Code(err)) }() var spiffeIDs []string identities := h.Manager.MatchingIdentities(selectors) if len(identities) == 0 { telemetry_common.AddRegistered(counter, false) return nil, status.Errorf(codes.PermissionDenied, "no identity issued") } telemetry_common.AddRegistered(counter, true) for _, identity := range identities { if req.SpiffeId != "" && identity.Entry.SpiffeId != req.SpiffeId { continue } spiffeIDs = append(spiffeIDs, identity.Entry.SpiffeId) } telemetry_common.AddCount(counter, len(spiffeIDs)) resp = new(workload.JWTSVIDResponse) for _, spiffeID := range spiffeIDs { var svid *client.JWTSVID svid, err = h.Manager.FetchJWTSVID(ctx, spiffeID, req.Audience) if err != nil { return nil, status.Errorf(codes.Unavailable, "could not fetch %q JWTSVID: %v", spiffeID, err) } resp.Svids = append(resp.Svids, &workload.JWTSVID{ SpiffeId: spiffeID, Svid: svid.Token, }) ttl := time.Until(svid.ExpiresAt) telemetry_workload.SetFetchJWTSVIDTTLGauge(metrics, spiffeID, float32(ttl.Seconds())) } return resp, nil } // FetchJWTBundles processes request for JWT bundles func (h *Handler) FetchJWTBundles(req *workload.JWTBundlesRequest, stream workload.SpiffeWorkloadAPI_FetchJWTBundlesServer) error { ctx := stream.Context() pid, selectors, metrics, done, err := h.startCall(ctx) if err != nil { return err } defer done() telemetry_workload.IncrFetchJWTBundlesCounter(metrics) subscriber := h.Manager.SubscribeToCacheChanges(selectors) defer subscriber.Finish() for { select { case update := <-subscriber.Updates(): telemetry_workload.IncrUpdateJWTBundlesCounter(metrics) start := time.Now() if err := h.sendJWTBundlesResponse(update, stream, metrics); err != nil { return err } telemetry_workload.MeasureSendJWTBundleLatency(metrics, start) if time.Since(start) > (1 * time.Second) { h.Log.WithFields(logrus.Fields{ telemetry.Seconds: time.Since(start).Seconds, telemetry.PID: pid, }).Warn("Took >1 second to send JWT bundle to PID") } case <-ctx.Done(): return nil } } } // ValidateJWTSVID processes request for JWT SVID validation func (h *Handler) ValidateJWTSVID(ctx context.Context, req *workload.ValidateJWTSVIDRequest) (*workload.ValidateJWTSVIDResponse, error) { if req.Audience == "" { return nil, status.Error(codes.InvalidArgument, "audience must be specified") } if req.Svid == "" { return nil, status.Error(codes.InvalidArgument, "svid must be specified") } _, selectors, metrics, done, err := h.startCall(ctx) if err != nil { return nil, err } defer done() keyStore := keyStoreFromBundles(h.getWorkloadBundles(selectors)) spiffeID, claims, err := jwtsvid.ValidateToken(ctx, req.Svid, keyStore, []string{req.Audience}) if err != nil { telemetry_workload.IncrValidJWTSVIDErrCounter(metrics) return nil, status.Error(codes.InvalidArgument, err.Error()) } telemetry_workload.IncrValidJWTSVIDCounter(metrics, spiffeID, req.Audience) s, err := structFromValues(claims) if err != nil { return nil, status.Error(codes.InvalidArgument, err.Error()) } return &workload.ValidateJWTSVIDResponse{ SpiffeId: spiffeID, Claims: s, }, nil } // FetchX509SVID processes request for an x509 SVID func (h *Handler) FetchX509SVID(_ *workload.X509SVIDRequest, stream workload.SpiffeWorkloadAPI_FetchX509SVIDServer) error { ctx := stream.Context() pid, selectors, metrics, done, err := h.startCall(ctx) if err != nil { return err } defer done() subscriber := h.Manager.SubscribeToCacheChanges(selectors) defer subscriber.Finish() for { select { case update := <-subscriber.Updates(): start := time.Now() err := h.sendX509SVIDResponse(update, stream, metrics, selectors) if err != nil { return err } // TODO: evaluate the possibility of removing the following metric at some point // in the future because almost the same metric (with different labels and keys) is being // taken by the CallCounter in sendX509SVIDResponse function. telemetry_workload.MeasureFetchX509SVIDLatency(metrics, start) if time.Since(start) > (1 * time.Second) { h.Log.WithFields(logrus.Fields{ telemetry.Seconds: time.Since(start).Seconds, telemetry.PID: pid, }).Warn("Took >1 second to send SVID response to PID") } case <-ctx.Done(): return nil } } } func (h *Handler) sendX509SVIDResponse(update *cache.WorkloadUpdate, stream workload.SpiffeWorkloadAPI_FetchX509SVIDServer, metrics telemetry.Metrics, selectors []*common.Selector) (err error) { counter := telemetry_workload.StartFetchX509SVIDCall(metrics) defer counter.Done(&err) defer func() { telemetry_common.AddErrorClass(counter, status.Code(err)) }() if len(update.Identities) == 0 { telemetry_common.AddRegistered(counter, false) return status.Errorf(codes.PermissionDenied, "no identity issued") } telemetry_common.AddRegistered(counter, true) resp, err := h.composeX509SVIDResponse(update) if err != nil { return status.Errorf(codes.Unavailable, "could not serialize response: %v", err) } err = stream.Send(resp) if err != nil { return err } // Add all the SPIFFE IDs to the labels array. for i, svid := range resp.Svids { ttl := time.Until(update.Identities[i].SVID[0].NotAfter) telemetry_workload.SetFetchX509SVIDTTLGauge(metrics, svid.SpiffeId, float32(ttl.Seconds())) } telemetry_common.AddCount(counter, len(resp.Svids)) return nil } func (h *Handler) composeX509SVIDResponse(update *cache.WorkloadUpdate) (*workload.X509SVIDResponse, error) { resp := new(workload.X509SVIDResponse) resp.Svids = []*workload.X509SVID{} resp.FederatedBundles = make(map[string][]byte) bundle := marshalBundle(update.Bundle.RootCAs()) for id, federatedBundle := range update.FederatedBundles { resp.FederatedBundles[id] = marshalBundle(federatedBundle.RootCAs()) } for _, identity := range update.Identities { id := identity.Entry.SpiffeId keyData, err := x509.MarshalPKCS8PrivateKey(identity.PrivateKey) if err != nil { return nil, fmt.Errorf("marshal key for %v: %v", id, err) } svid := &workload.X509SVID{ SpiffeId: id, X509Svid: x509util.DERFromCertificates(identity.SVID), X509SvidKey: keyData, Bundle: bundle, FederatesWith: identity.Entry.FederatesWith, } resp.Svids = append(resp.Svids, svid) } return resp, nil } func (h *Handler) sendJWTBundlesResponse(update *cache.WorkloadUpdate, stream workload.SpiffeWorkloadAPI_FetchJWTBundlesServer, metrics telemetry.Metrics) (err error) { counter := telemetry_workload.StartFetchJWTBundlesCall(metrics) defer counter.Done(&err) defer func() { telemetry_common.AddErrorClass(counter, status.Code(err)) }() if len(update.Identities) == 0 { return status.Errorf(codes.PermissionDenied, "no identity issued") } resp, err := h.composeJWTBundlesResponse(update) if err != nil { return status.Errorf(codes.Unavailable, "could not serialize response: %v", err) } return stream.Send(resp) } func (h *Handler) composeJWTBundlesResponse(update *cache.WorkloadUpdate) (*workload.JWTBundlesResponse, error) { bundles := make(map[string][]byte) if update.Bundle != nil { jwksBytes, err := bundleutil.Marshal(update.Bundle, bundleutil.NoX509SVIDKeys()) if err != nil { return nil, err } bundles[update.Bundle.TrustDomainID()] = jwksBytes } for _, federatedBundle := range update.FederatedBundles { jwksBytes, err := bundleutil.Marshal(federatedBundle, bundleutil.NoX509SVIDKeys()) if err != nil { return nil, err } bundles[federatedBundle.TrustDomainID()] = jwksBytes } return &workload.JWTBundlesResponse{ Bundles: bundles, }, nil } // From context, parse out peer watcher PID and selectors. Attest against the PID. Add selectors as labels to // to a new metrics object. Return this information to the caller so it can emit further metrics. // If no error, callers must call the output func() to decrement current connections count. func (h *Handler) startCall(ctx context.Context) (int32, []*common.Selector, telemetry.Metrics, func(), error) { md, ok := metadata.FromIncomingContext(ctx) if !ok || len(md["workload.spiffe.io"]) != 1 || md["workload.spiffe.io"][0] != "true" { return 0, nil, nil, nil, status.Errorf(codes.InvalidArgument, "Security header missing from request") } watcher, err := h.peerWatcher(ctx) if err != nil { return 0, nil, nil, nil, status.Errorf(codes.Internal, "Is this a supported system? Please report this bug: %v", err) } // add to count of current telemetry_workload.SetConnectionTotalGauge(h.Metrics, atomic.AddInt32(&h.connections, 1)) done := func() { // rely on caller to decrement count of current connections telemetry_workload.SetConnectionTotalGauge(h.Metrics, atomic.AddInt32(&h.connections, -1)) } config := attestor.Config{ Catalog: h.Catalog, Log: h.Log, Metrics: h.Metrics, } selectors := attestor.New(&config).Attest(ctx, watcher.PID()) // Ensure that the original caller is still alive so that we know we didn't // attest some other process that happened to be assigned the original PID if err := watcher.IsAlive(); err != nil { done() return 0, nil, nil, nil, status.Errorf(codes.Unauthenticated, "Could not verify existence of the original caller: %v", err) } telemetry_workload.IncrConnectionCounter(h.Metrics) return watcher.PID(), selectors, h.Metrics, done, nil } // peerWatcher takes a grpc context, and returns a Watcher representing the caller which // has issued the request. Returns an error if the call was not made locally, if the necessary // syscalls aren't unsupported, or if the transport security was not properly configured. // See the peertracker package for more information. func (h *Handler) peerWatcher(ctx context.Context) (watcher peertracker.Watcher, err error) { watcher, ok := peertracker.WatcherFromContext(ctx) if !ok { return nil, errors.New("Unable to fetch watcher from context") } return watcher, nil } func (h *Handler) getWorkloadBundles(selectors []*common.Selector) (bundles []*bundleutil.Bundle) { update := h.Manager.FetchWorkloadUpdate(selectors) if update.Bundle != nil { bundles = append(bundles, update.Bundle) } for _, federatedBundle := range update.FederatedBundles { bundles = append(bundles, federatedBundle) } return bundles } func marshalBundle(certs []*x509.Certificate) []byte { bundle := []byte{} for _, c := range certs { bundle = append(bundle, c.Raw...) } return bundle } func keyStoreFromBundles(bundles []*bundleutil.Bundle) jwtsvid.KeyStore { trustDomainKeys := make(map[string]map[string]crypto.PublicKey) for _, bundle := range bundles { trustDomainKeys[bundle.TrustDomainID()] = bundle.JWTSigningKeys() } return jwtsvid.NewKeyStore(trustDomainKeys) } func structFromValues(values map[string]interface{}) (*structpb.Struct, error) { valuesJSON, err := json.Marshal(values) if err != nil { return nil, errs.Wrap(err) } s := new(structpb.Struct) if err := jsonpb.Unmarshal(bytes.NewReader(valuesJSON), s); err != nil { return nil, errs.Wrap(err) } return s, nil }
1
11,836
what is the justification for the removal of the registered label? it doesn't seem to have high cardinality nor is it redundant. Seems useful to shed insight into understand situations where workloads aren't registered....
spiffe-spire
go
@@ -1835,3 +1835,7 @@ func (a *WebAPI) GetDeploymentChain(ctx context.Context, req *webservice.GetDepl DeploymentChain: dc, }, nil } + +func (a *WebAPI) ListEvents(ctx context.Context, req *webservice.ListEventsRequest) (*webservice.ListEventsResponse, error) { + return nil, status.Error(codes.Unimplemented, "Not implemented") +}
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package grpcapi import ( "bytes" "context" "encoding/gob" "errors" "fmt" "sort" "strings" "time" "github.com/google/uuid" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "github.com/pipe-cd/pipecd/pkg/app/server/applicationlivestatestore" "github.com/pipe-cd/pipecd/pkg/app/server/commandstore" "github.com/pipe-cd/pipecd/pkg/app/server/service/webservice" "github.com/pipe-cd/pipecd/pkg/app/server/stagelogstore" "github.com/pipe-cd/pipecd/pkg/cache" "github.com/pipe-cd/pipecd/pkg/cache/memorycache" "github.com/pipe-cd/pipecd/pkg/cache/rediscache" "github.com/pipe-cd/pipecd/pkg/config" "github.com/pipe-cd/pipecd/pkg/datastore" "github.com/pipe-cd/pipecd/pkg/filestore" "github.com/pipe-cd/pipecd/pkg/insight/insightstore" "github.com/pipe-cd/pipecd/pkg/model" "github.com/pipe-cd/pipecd/pkg/redis" "github.com/pipe-cd/pipecd/pkg/rpc/rpcauth" ) type encrypter interface { Encrypt(text string) (string, error) } // WebAPI implements the behaviors for the gRPC definitions of WebAPI. type WebAPI struct { applicationStore datastore.ApplicationStore environmentStore datastore.EnvironmentStore deploymentChainStore datastore.DeploymentChainStore deploymentStore datastore.DeploymentStore pipedStore datastore.PipedStore projectStore datastore.ProjectStore apiKeyStore datastore.APIKeyStore stageLogStore stagelogstore.Store applicationLiveStateStore applicationlivestatestore.Store commandStore commandstore.Store insightStore insightstore.Store encrypter encrypter appProjectCache cache.Cache deploymentProjectCache cache.Cache pipedProjectCache cache.Cache envProjectCache cache.Cache pipedStatCache cache.Cache insightCache cache.Cache redis redis.Redis projectsInConfig map[string]config.ControlPlaneProject logger *zap.Logger } // NewWebAPI creates a new WebAPI instance. func NewWebAPI( ctx context.Context, ds datastore.DataStore, fs filestore.Store, sls stagelogstore.Store, alss applicationlivestatestore.Store, cmds commandstore.Store, is insightstore.Store, psc cache.Cache, rd redis.Redis, projs map[string]config.ControlPlaneProject, encrypter encrypter, logger *zap.Logger, ) *WebAPI { a := &WebAPI{ applicationStore: datastore.NewApplicationStore(ds), environmentStore: datastore.NewEnvironmentStore(ds), deploymentChainStore: datastore.NewDeploymentChainStore(ds), deploymentStore: datastore.NewDeploymentStore(ds), pipedStore: datastore.NewPipedStore(ds), projectStore: datastore.NewProjectStore(ds), apiKeyStore: datastore.NewAPIKeyStore(ds), stageLogStore: sls, applicationLiveStateStore: alss, commandStore: cmds, insightStore: is, projectsInConfig: projs, encrypter: encrypter, appProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), deploymentProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), pipedProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), envProjectCache: memorycache.NewTTLCache(ctx, 24*time.Hour, 3*time.Hour), pipedStatCache: psc, insightCache: rediscache.NewTTLCache(rd, 3*time.Hour), redis: rd, logger: logger.Named("web-api"), } return a } // Register registers all handling of this service into the specified gRPC server. func (a *WebAPI) Register(server *grpc.Server) { webservice.RegisterWebServiceServer(server, a) } func (a *WebAPI) UpdateEnvironmentDesc(ctx context.Context, req *webservice.UpdateEnvironmentDescRequest) (*webservice.UpdateEnvironmentDescResponse, error) { return nil, status.Error(codes.Unimplemented, "") } func (a *WebAPI) ListEnvironments(ctx context.Context, req *webservice.ListEnvironmentsRequest) (*webservice.ListEnvironmentsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, }, } envs, err := a.environmentStore.ListEnvironments(ctx, opts) if err != nil { a.logger.Error("failed to get environments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get environments") } return &webservice.ListEnvironmentsResponse{ Environments: envs, }, nil } func (a *WebAPI) EnableEnvironment(ctx context.Context, req *webservice.EnableEnvironmentRequest) (*webservice.EnableEnvironmentResponse, error) { if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, true); err != nil { return nil, err } return &webservice.EnableEnvironmentResponse{}, nil } func (a *WebAPI) DisableEnvironment(ctx context.Context, req *webservice.DisableEnvironmentRequest) (*webservice.DisableEnvironmentResponse, error) { if err := a.updateEnvironmentEnable(ctx, req.EnvironmentId, false); err != nil { return nil, err } return &webservice.DisableEnvironmentResponse{}, nil } // DeleteEnvironment deletes the given environment and all applications that belong to it. // It returns a FailedPrecondition error if any Piped is still using that environment. func (a *WebAPI) DeleteEnvironment(ctx context.Context, req *webservice.DeleteEnvironmentRequest) (*webservice.DeleteEnvironmentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateEnvBelongsToProject(ctx, req.EnvironmentId, claims.Role.ProjectId); err != nil { return nil, err } // Check if no Piped has permission to the given environment. pipeds, err := a.pipedStore.ListPipeds(ctx, datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, { Field: "EnvIds", Operator: datastore.OperatorContains, Value: req.EnvironmentId, }, { Field: "Disabled", Operator: datastore.OperatorEqual, Value: false, }, }, }) if err != nil { a.logger.Error("failed to fetch Pipeds linked to the given environment", zap.String("env-id", req.EnvironmentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to validate the deletion operation") } if len(pipeds) > 0 { pipedNames := make([]string, 0, len(pipeds)) for _, p := range pipeds { pipedNames = append(pipedNames, p.Name) } return nil, status.Errorf( codes.FailedPrecondition, "Found Pipeds linked the environment to be deleted. Please remove this environment from all Pipeds (%s) on the Piped settings page", strings.Join(pipedNames, ","), ) } // Delete all applications that belongs to the given env. apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, { Field: "EnvId", Operator: datastore.OperatorEqual, Value: req.EnvironmentId, }, }, }) if err != nil { a.logger.Error("failed to fetch applications that belongs to the given environment", zap.String("env-id", req.EnvironmentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to fetch applications that belongs to the given environment") } for _, app := range apps { if app.ProjectId != claims.Role.ProjectId { continue } err := a.applicationStore.DeleteApplication(ctx, app.Id) if err == nil { continue } switch err { case datastore.ErrNotFound: return nil, status.Error(codes.Internal, "The application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value to delete") default: a.logger.Error("failed to delete the application", zap.String("application-id", app.Id), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to delete the application") } } if err := a.environmentStore.DeleteEnvironment(ctx, req.EnvironmentId); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.NotFound, "The environment is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value to delete") default: a.logger.Error("failed to delete the environment", zap.String("env-id", req.EnvironmentId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to delete the environment") } } return &webservice.DeleteEnvironmentResponse{}, nil } func (a *WebAPI) updateEnvironmentEnable(ctx context.Context, envID string, enable bool) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validateEnvBelongsToProject(ctx, envID, claims.Role.ProjectId); err != nil { return err } var updater func(context.Context, string) error if enable { updater = a.environmentStore.EnableEnvironment } else { updater = a.environmentStore.DisableEnvironment } if err := updater(ctx, envID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.NotFound, "The environment is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the environment", zap.String("env-id", envID), zap.Error(err), ) return status.Error(codes.Internal, "Failed to update the environment") } } return nil } // validateEnvBelongsToProject checks if the given piped belongs to the given project. // It gives back error unless the env belongs to the project. func (a *WebAPI) validateEnvBelongsToProject(ctx context.Context, envID, projectID string) error { eid, err := a.envProjectCache.Get(envID) if err == nil { if projectID != eid { return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in") } return nil } env, err := getEnvironment(ctx, a.environmentStore, envID, a.logger) if err != nil { return err } a.envProjectCache.Put(envID, env.ProjectId) if projectID != env.ProjectId { return status.Error(codes.PermissionDenied, "Requested environment doesn't belong to the project you logged in") } return nil } func (a *WebAPI) RegisterPiped(ctx context.Context, req *webservice.RegisterPipedRequest) (*webservice.RegisterPipedResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } key, keyHash, err := model.GeneratePipedKey() if err != nil { a.logger.Error("failed to generate piped key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate the piped key") } piped := model.Piped{ Id: uuid.New().String(), Name: req.Name, Desc: req.Desc, ProjectId: claims.Role.ProjectId, EnvIds: req.EnvIds, } if err := piped.AddKey(keyHash, claims.Subject, time.Now()); err != nil { return nil, status.Error(codes.FailedPrecondition, fmt.Sprintf("Failed to create key: %v", err)) } err = a.pipedStore.AddPiped(ctx, &piped) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The piped already exists") } if err != nil { a.logger.Error("failed to register piped", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to register piped") } return &webservice.RegisterPipedResponse{ Id: piped.Id, Key: key, }, nil } func (a *WebAPI) UpdatePiped(ctx context.Context, req *webservice.UpdatePipedRequest) (*webservice.UpdatePipedResponse, error) { updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.UpdatePiped(ctx, req.PipedId, func(p *model.Piped) error { p.Name = req.Name p.Desc = req.Desc p.EnvIds = req.EnvIds return nil }) } if err := a.updatePiped(ctx, req.PipedId, updater); err != nil { return nil, err } return &webservice.UpdatePipedResponse{}, nil } func (a *WebAPI) RecreatePipedKey(ctx context.Context, req *webservice.RecreatePipedKeyRequest) (*webservice.RecreatePipedKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } key, keyHash, err := model.GeneratePipedKey() if err != nil { a.logger.Error("failed to generate piped key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate the piped key") } updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.AddKey(ctx, pipedID, keyHash, claims.Subject, time.Now()) } if err := a.updatePiped(ctx, req.Id, updater); err != nil { return nil, err } return &webservice.RecreatePipedKeyResponse{ Key: key, }, nil } func (a *WebAPI) DeleteOldPipedKeys(ctx context.Context, req *webservice.DeleteOldPipedKeysRequest) (*webservice.DeleteOldPipedKeysResponse, error) { if _, err := rpcauth.ExtractClaims(ctx); err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.DeleteOldKeys(ctx, pipedID) } if err := a.updatePiped(ctx, req.PipedId, updater); err != nil { return nil, err } return &webservice.DeleteOldPipedKeysResponse{}, nil } func (a *WebAPI) EnablePiped(ctx context.Context, req *webservice.EnablePipedRequest) (*webservice.EnablePipedResponse, error) { if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.EnablePiped); err != nil { return nil, err } return &webservice.EnablePipedResponse{}, nil } func (a *WebAPI) DisablePiped(ctx context.Context, req *webservice.DisablePipedRequest) (*webservice.DisablePipedResponse, error) { if err := a.updatePiped(ctx, req.PipedId, a.pipedStore.DisablePiped); err != nil { return nil, err } return &webservice.DisablePipedResponse{}, nil } func (a *WebAPI) updatePiped(ctx context.Context, pipedID string, updater func(context.Context, string) error) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validatePipedBelongsToProject(ctx, pipedID, claims.Role.ProjectId); err != nil { return err } if err := updater(ctx, pipedID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.InvalidArgument, "The piped is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the piped", zap.String("piped-id", pipedID), zap.Error(err), ) // TODO: Improve error handling, instead of considering all as Internal error like this // we should check the error type to decide to pass its message to the web client or just a generic message. return status.Error(codes.Internal, "Failed to update the piped") } } return nil } func (a *WebAPI) ListPipeds(ctx context.Context, req *webservice.ListPipedsRequest) (*webservice.ListPipedsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, }, } if req.Options != nil { if req.Options.Enabled != nil { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "Disabled", Operator: datastore.OperatorEqual, Value: !req.Options.Enabled.GetValue(), }) } } pipeds, err := a.pipedStore.ListPipeds(ctx, opts) if err != nil { a.logger.Error("failed to get pipeds", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get pipeds") } // Check piped connection status if necessary. // The connection status of piped determined by its submitted stat in pipedStatCache. if req.WithStatus { for i := range pipeds { sv, err := a.pipedStatCache.Get(pipeds[i].Id) if errors.Is(err, cache.ErrNotFound) { pipeds[i].Status = model.Piped_OFFLINE continue } if err != nil { pipeds[i].Status = model.Piped_UNKNOWN a.logger.Error("failed to get piped stat from the cache", zap.Error(err)) continue } ps := model.PipedStat{} if err = model.UnmarshalPipedStat(sv, &ps); err != nil { pipeds[i].Status = model.Piped_UNKNOWN a.logger.Error("unable to unmarshal the piped stat", zap.Error(err)) continue } if ps.IsStaled(model.PipedStatsRetention) { pipeds[i].Status = model.Piped_OFFLINE continue } pipeds[i].Status = model.Piped_ONLINE } } // Redact all sensitive data inside piped message before sending to the client. for i := range pipeds { pipeds[i].RedactSensitiveData() } return &webservice.ListPipedsResponse{ Pipeds: pipeds, }, nil } func (a *WebAPI) GetPiped(ctx context.Context, req *webservice.GetPipedRequest) (*webservice.GetPipedResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil { return nil, err } // Redact all sensitive data inside piped message before sending to the client. piped.RedactSensitiveData() return &webservice.GetPipedResponse{ Piped: piped, }, nil } func (a *WebAPI) UpdatePipedDesiredVersion(ctx context.Context, req *webservice.UpdatePipedDesiredVersionRequest) (*webservice.UpdatePipedDesiredVersionResponse, error) { updater := func(ctx context.Context, pipedID string) error { return a.pipedStore.UpdatePiped(ctx, pipedID, func(p *model.Piped) error { p.DesiredVersion = req.Version return nil }) } for _, pipedID := range req.PipedIds { if err := a.updatePiped(ctx, pipedID, updater); err != nil { return nil, err } } return &webservice.UpdatePipedDesiredVersionResponse{}, nil } // validatePipedBelongsToProject checks if the given piped belongs to the given project. // It gives back error unless the piped belongs to the project. func (a *WebAPI) validatePipedBelongsToProject(ctx context.Context, pipedID, projectID string) error { pid, err := a.pipedProjectCache.Get(pipedID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in") } return nil } piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger) if err != nil { return err } a.pipedProjectCache.Put(pipedID, piped.ProjectId) if piped.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested piped doesn't belong to the project you logged in") } return nil } func (a *WebAPI) ListUnregisteredApplications(ctx context.Context, _ *webservice.ListUnregisteredApplicationsRequest) (*webservice.ListUnregisteredApplicationsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } // Collect all apps that belong to the project. key := makeUnregisteredAppsCacheKey(claims.Role.ProjectId) c := rediscache.NewHashCache(a.redis, key) // pipedToApps assumes to be a map["piped-id"][]byte(slice of *model.ApplicationInfo encoded by encoding/gob) pipedToApps, err := c.GetAll() if errors.Is(err, cache.ErrNotFound) { return &webservice.ListUnregisteredApplicationsResponse{}, nil } if err != nil { a.logger.Error("failed to get unregistered apps", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get unregistered apps") } // Integrate all apps cached for each Piped. allApps := make([]*model.ApplicationInfo, 0) for _, as := range pipedToApps { b, ok := as.([]byte) if !ok { return nil, status.Error(codes.Internal, "Unexpected data cached") } dec := gob.NewDecoder(bytes.NewReader(b)) var apps []*model.ApplicationInfo if err := dec.Decode(&apps); err != nil { a.logger.Error("failed to decode the unregistered apps", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to decode the unregistered apps") } allApps = append(allApps, apps...) } if len(allApps) == 0 { return &webservice.ListUnregisteredApplicationsResponse{}, nil } sort.Slice(allApps, func(i, j int) bool { return allApps[i].Path < allApps[j].Path }) return &webservice.ListUnregisteredApplicationsResponse{ Applications: allApps, }, nil } // TODO: Validate the specified piped to ensure that it belongs to the specified environment. func (a *WebAPI) AddApplication(ctx context.Context, req *webservice.AddApplicationRequest) (*webservice.AddApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger) if err != nil { return nil, err } if piped.ProjectId != claims.Role.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested piped does not belong to your project") } gitpath, err := makeGitPath( req.GitPath.Repo.Id, req.GitPath.Path, req.GitPath.ConfigFilename, piped, a.logger, ) if err != nil { return nil, err } app := model.Application{ Id: uuid.New().String(), Name: req.Name, EnvId: req.EnvId, PipedId: req.PipedId, ProjectId: claims.Role.ProjectId, GitPath: gitpath, Kind: req.Kind, CloudProvider: req.CloudProvider, Description: req.Description, Labels: req.Labels, } err = a.applicationStore.AddApplication(ctx, &app) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The application already exists") } if err != nil { a.logger.Error("failed to create application", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create application") } return &webservice.AddApplicationResponse{ ApplicationId: app.Id, }, nil } func (a *WebAPI) UpdateApplication(ctx context.Context, req *webservice.UpdateApplicationRequest) (*webservice.UpdateApplicationResponse, error) { updater := func(app *model.Application) error { app.Name = req.Name app.EnvId = req.EnvId app.PipedId = req.PipedId app.Kind = req.Kind app.CloudProvider = req.CloudProvider return nil } if err := a.updateApplication(ctx, req.ApplicationId, req.PipedId, updater); err != nil { return nil, err } return &webservice.UpdateApplicationResponse{}, nil } func (a *WebAPI) UpdateApplicationDescription(ctx context.Context, req *webservice.UpdateApplicationDescriptionRequest) (*webservice.UpdateApplicationDescriptionResponse, error) { updater := func(app *model.Application) error { app.Description = req.Description return nil } if err := a.updateApplication(ctx, req.ApplicationId, "", updater); err != nil { return nil, err } return &webservice.UpdateApplicationDescriptionResponse{}, nil } func (a *WebAPI) updateApplication(ctx context.Context, id, pipedID string, updater func(app *model.Application) error) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } // Ensure that the specified piped is assignable for this application. if pipedID != "" { piped, err := getPiped(ctx, a.pipedStore, pipedID, a.logger) if err != nil { return err } if piped.ProjectId != claims.Role.ProjectId { return status.Error(codes.PermissionDenied, "Requested piped does not belong to your project") } } err = a.applicationStore.UpdateApplication(ctx, id, updater) if err != nil { a.logger.Error("failed to update application", zap.Error(err)) return status.Error(codes.Internal, "Failed to update application") } return nil } func (a *WebAPI) EnableApplication(ctx context.Context, req *webservice.EnableApplicationRequest) (*webservice.EnableApplicationResponse, error) { if err := a.updateApplicationEnable(ctx, req.ApplicationId, true); err != nil { return nil, err } return &webservice.EnableApplicationResponse{}, nil } func (a *WebAPI) DisableApplication(ctx context.Context, req *webservice.DisableApplicationRequest) (*webservice.DisableApplicationResponse, error) { if err := a.updateApplicationEnable(ctx, req.ApplicationId, false); err != nil { return nil, err } return &webservice.DisableApplicationResponse{}, nil } func (a *WebAPI) DeleteApplication(ctx context.Context, req *webservice.DeleteApplicationRequest) (*webservice.DeleteApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } if err := a.applicationStore.DeleteApplication(ctx, req.ApplicationId); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.NotFound, "The application is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value to delete") default: a.logger.Error("failed to delete the application", zap.String("application-id", req.ApplicationId), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to delete the application") } } return &webservice.DeleteApplicationResponse{}, nil } func (a *WebAPI) updateApplicationEnable(ctx context.Context, appID string, enable bool) error { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return err } if err := a.validateAppBelongsToProject(ctx, appID, claims.Role.ProjectId); err != nil { return err } var updater func(context.Context, string) error if enable { updater = a.applicationStore.EnableApplication } else { updater = a.applicationStore.DisableApplication } if err := updater(ctx, appID); err != nil { switch err { case datastore.ErrNotFound: return status.Error(codes.NotFound, "The application is not found") case datastore.ErrInvalidArgument: return status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to update the application", zap.String("application-id", appID), zap.Error(err), ) return status.Error(codes.Internal, "Failed to update the application") } } return nil } func (a *WebAPI) ListApplications(ctx context.Context, req *webservice.ListApplicationsRequest) (*webservice.ListApplicationsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, } if o := req.Options; o != nil { if o.Enabled != nil { filters = append(filters, datastore.ListFilter{ Field: "Disabled", Operator: datastore.OperatorEqual, Value: !o.Enabled.GetValue(), }) } // Allowing multiple so that it can do In Query later. // Currently only the first value is used. if len(o.Kinds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Kind", Operator: datastore.OperatorEqual, Value: o.Kinds[0], }) } if len(o.SyncStatuses) > 0 { filters = append(filters, datastore.ListFilter{ Field: "SyncState.Status", Operator: datastore.OperatorEqual, Value: o.SyncStatuses[0], }) } if len(o.EnvIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "EnvId", Operator: datastore.OperatorEqual, Value: o.EnvIds[0], }) } if o.Name != "" { filters = append(filters, datastore.ListFilter{ Field: "Name", Operator: datastore.OperatorEqual, Value: o.Name, }) } } apps, _, err := a.applicationStore.ListApplications(ctx, datastore.ListOptions{ Filters: filters, Orders: orders, }) if err != nil { a.logger.Error("failed to get applications", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get applications") } if len(req.Options.Labels) == 0 { return &webservice.ListApplicationsResponse{ Applications: apps, }, nil } // NOTE: Filtering by labels is done by the application-side because we need to create composite indexes for every combination in the filter. filtered := make([]*model.Application, 0, len(apps)) for _, a := range apps { if a.ContainLabels(req.Options.Labels) { filtered = append(filtered, a) } } return &webservice.ListApplicationsResponse{ Applications: filtered, }, nil } func (a *WebAPI) SyncApplication(ctx context.Context, req *webservice.SyncApplicationRequest) (*webservice.SyncApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != app.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested application does not belong to your project") } cmd := model.Command{ Id: uuid.New().String(), PipedId: app.PipedId, ApplicationId: app.Id, ProjectId: app.ProjectId, Type: model.Command_SYNC_APPLICATION, Commander: claims.Subject, SyncApplication: &model.Command_SyncApplication{ ApplicationId: app.Id, SyncStrategy: req.SyncStrategy, }, } if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil { return nil, err } return &webservice.SyncApplicationResponse{ CommandId: cmd.Id, }, nil } func (a *WebAPI) GetApplication(ctx context.Context, req *webservice.GetApplicationRequest) (*webservice.GetApplicationResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } app, err := getApplication(ctx, a.applicationStore, req.ApplicationId, a.logger) if err != nil { return nil, err } if app.ProjectId != claims.Role.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested application does not belong to your project") } return &webservice.GetApplicationResponse{ Application: app, }, nil } func (a *WebAPI) GenerateApplicationSealedSecret(ctx context.Context, req *webservice.GenerateApplicationSealedSecretRequest) (*webservice.GenerateApplicationSealedSecretResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } piped, err := getPiped(ctx, a.pipedStore, req.PipedId, a.logger) if err != nil { return nil, err } if err := a.validatePipedBelongsToProject(ctx, req.PipedId, claims.Role.ProjectId); err != nil { return nil, err } se := model.GetSecretEncryptionInPiped(piped) pubkey, err := getEncriptionKey(se) if err != nil { return nil, err } ciphertext, err := encrypt(req.Data, pubkey, req.Base64Encoding, a.logger) if err != nil { return nil, err } return &webservice.GenerateApplicationSealedSecretResponse{ Data: ciphertext, }, nil } // validateAppBelongsToProject checks if the given application belongs to the given project. // It gives back error unless the application belongs to the project. func (a *WebAPI) validateAppBelongsToProject(ctx context.Context, appID, projectID string) error { pid, err := a.appProjectCache.Get(appID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in") } return nil } app, err := getApplication(ctx, a.applicationStore, appID, a.logger) if err != nil { return err } a.appProjectCache.Put(appID, app.ProjectId) if app.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested application doesn't belong to the project you logged in") } return nil } func (a *WebAPI) ListDeployments(ctx context.Context, req *webservice.ListDeploymentsRequest) (*webservice.ListDeploymentsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, { Field: "UpdatedAt", Operator: datastore.OperatorGreaterThanOrEqual, Value: req.PageMinUpdatedAt, }, } if o := req.Options; o != nil { // Allowing multiple so that it can do In Query later. // Currently only the first value is used. if len(o.Statuses) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Status", Operator: datastore.OperatorEqual, Value: o.Statuses[0], }) } if len(o.Kinds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "Kind", Operator: datastore.OperatorEqual, Value: o.Kinds[0], }) } if len(o.ApplicationIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "ApplicationId", Operator: datastore.OperatorEqual, Value: o.ApplicationIds[0], }) } if len(o.EnvIds) > 0 { filters = append(filters, datastore.ListFilter{ Field: "EnvId", Operator: datastore.OperatorEqual, Value: o.EnvIds[0], }) } if o.ApplicationName != "" { filters = append(filters, datastore.ListFilter{ Field: "ApplicationName", Operator: datastore.OperatorEqual, Value: o.ApplicationName, }) } } pageSize := int(req.PageSize) options := datastore.ListOptions{ Filters: filters, Orders: orders, Limit: pageSize, Cursor: req.Cursor, } deployments, cursor, err := a.deploymentStore.ListDeployments(ctx, options) if err != nil { a.logger.Error("failed to get deployments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get deployments") } labels := req.Options.Labels if len(labels) == 0 || len(deployments) == 0 { return &webservice.ListDeploymentsResponse{ Deployments: deployments, Cursor: cursor, }, nil } // Start filtering them by labels. // // NOTE: Filtering by labels is done by the application-side because we need to create composite indexes for every combination in the filter. // We don't want to depend on any other search engine, that's why it filters here. filtered := make([]*model.Deployment, 0, len(deployments)) for _, d := range deployments { if d.ContainLabels(labels) { filtered = append(filtered, d) } } // Stop running additional queries for more data, and return filtered deployments immediately with // current cursor if the size before filtering is already less than the page size. if len(deployments) < pageSize { return &webservice.ListDeploymentsResponse{ Deployments: filtered, Cursor: cursor, }, nil } // Repeat the query until the number of filtered deployments reaches the page size, // or until it finishes scanning to page_min_updated_at. for len(filtered) < pageSize { options.Cursor = cursor deployments, cursor, err = a.deploymentStore.ListDeployments(ctx, options) if err != nil { a.logger.Error("failed to get deployments", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get deployments") } if len(deployments) == 0 { break } for _, d := range deployments { if d.ContainLabels(labels) { filtered = append(filtered, d) } } // We've already specified UpdatedAt >= req.PageMinUpdatedAt, so we need to check just equality. if deployments[len(deployments)-1].UpdatedAt == req.PageMinUpdatedAt { break } } // TODO: Think about possibility that the response of ListDeployments exceeds the page size return &webservice.ListDeploymentsResponse{ Deployments: filtered, Cursor: cursor, }, nil } func (a *WebAPI) GetDeployment(ctx context.Context, req *webservice.GetDeploymentRequest) (*webservice.GetDeploymentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != deployment.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested deployment does not belong to your project") } return &webservice.GetDeploymentResponse{ Deployment: deployment, }, nil } // validateDeploymentBelongsToProject checks if the given deployment belongs to the given project. // It gives back error unless the deployment belongs to the project. func (a *WebAPI) validateDeploymentBelongsToProject(ctx context.Context, deploymentID, projectID string) error { pid, err := a.deploymentProjectCache.Get(deploymentID) if err == nil { if pid != projectID { return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in") } return nil } deployment, err := getDeployment(ctx, a.deploymentStore, deploymentID, a.logger) if err != nil { return err } a.deploymentProjectCache.Put(deploymentID, deployment.ProjectId) if deployment.ProjectId != projectID { return status.Error(codes.PermissionDenied, "Requested deployment doesn't belong to the project you logged in") } return nil } func (a *WebAPI) GetStageLog(ctx context.Context, req *webservice.GetStageLogRequest) (*webservice.GetStageLogResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } blocks, completed, err := a.stageLogStore.FetchLogs(ctx, req.DeploymentId, req.StageId, req.RetriedCount, req.OffsetIndex) if errors.Is(err, stagelogstore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The stage log not found") } if err != nil { a.logger.Error("failed to get stage logs", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get stage logs") } return &webservice.GetStageLogResponse{ Blocks: blocks, Completed: completed, }, nil } func (a *WebAPI) CancelDeployment(ctx context.Context, req *webservice.CancelDeploymentRequest) (*webservice.CancelDeploymentResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != deployment.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested deployment does not belong to your project") } if model.IsCompletedDeployment(deployment.Status) { return nil, status.Errorf(codes.FailedPrecondition, "could not cancel the deployment because it was already completed") } cmd := model.Command{ Id: uuid.New().String(), PipedId: deployment.PipedId, ApplicationId: deployment.ApplicationId, ProjectId: deployment.ProjectId, DeploymentId: req.DeploymentId, Type: model.Command_CANCEL_DEPLOYMENT, Commander: claims.Subject, CancelDeployment: &model.Command_CancelDeployment{ DeploymentId: req.DeploymentId, ForceRollback: req.ForceRollback, ForceNoRollback: req.ForceNoRollback, }, } if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil { return nil, err } return &webservice.CancelDeploymentResponse{ CommandId: cmd.Id, }, nil } func (a *WebAPI) ApproveStage(ctx context.Context, req *webservice.ApproveStageRequest) (*webservice.ApproveStageResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } deployment, err := getDeployment(ctx, a.deploymentStore, req.DeploymentId, a.logger) if err != nil { return nil, err } if err := validateApprover(deployment.Stages, claims.Subject, req.StageId); err != nil { return nil, err } if err := a.validateDeploymentBelongsToProject(ctx, req.DeploymentId, claims.Role.ProjectId); err != nil { return nil, err } stage, ok := deployment.StageStatusMap()[req.StageId] if !ok { return nil, status.Error(codes.FailedPrecondition, "The stage was not found in the deployment") } if model.IsCompletedStage(stage) { return nil, status.Errorf(codes.FailedPrecondition, "Could not approve the stage because it was already completed") } commandID := uuid.New().String() cmd := model.Command{ Id: commandID, PipedId: deployment.PipedId, ApplicationId: deployment.ApplicationId, ProjectId: deployment.ProjectId, DeploymentId: req.DeploymentId, StageId: req.StageId, Type: model.Command_APPROVE_STAGE, Commander: claims.Subject, ApproveStage: &model.Command_ApproveStage{ DeploymentId: req.DeploymentId, StageId: req.StageId, }, } if err := addCommand(ctx, a.commandStore, &cmd, a.logger); err != nil { return nil, err } return &webservice.ApproveStageResponse{ CommandId: commandID, }, nil } // No error means that the given commander is valid. func validateApprover(stages []*model.PipelineStage, commander, stageID string) error { var approvers []string for _, s := range stages { if s.Id != stageID { continue } if as := s.Metadata["Approvers"]; as != "" { approvers = strings.Split(as, ",") } break } if len(approvers) == 0 { // Anyone can approve the deployment pipeline return nil } for _, ap := range approvers { if ap == commander { return nil } } return status.Error(codes.PermissionDenied, fmt.Sprintf("You can't approve this deployment because you (%s) are not in the approver list: %v", commander, approvers)) } func (a *WebAPI) GetApplicationLiveState(ctx context.Context, req *webservice.GetApplicationLiveStateRequest) (*webservice.GetApplicationLiveStateResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.validateAppBelongsToProject(ctx, req.ApplicationId, claims.Role.ProjectId); err != nil { return nil, err } snapshot, err := a.applicationLiveStateStore.GetStateSnapshot(ctx, req.ApplicationId) if errors.Is(err, filestore.ErrNotFound) { return nil, status.Error(codes.NotFound, "Application live state not found") } if err != nil { a.logger.Error("failed to get application live state", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get application live state") } return &webservice.GetApplicationLiveStateResponse{ Snapshot: snapshot, }, nil } // GetProject gets the specified porject without sensitive data. func (a *WebAPI) GetProject(ctx context.Context, req *webservice.GetProjectRequest) (*webservice.GetProjectResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } project, err := a.getProject(ctx, claims.Role.ProjectId) if err != nil { return nil, err } // Redact all sensitive data inside project message before sending to the client. project.RedactSensitiveData() return &webservice.GetProjectResponse{ Project: project, }, nil } func (a *WebAPI) getProject(ctx context.Context, projectID string) (*model.Project, error) { if p, ok := a.projectsInConfig[projectID]; ok { return &model.Project{ Id: p.Id, Desc: p.Desc, StaticAdmin: &model.ProjectStaticUser{ Username: p.StaticAdmin.Username, PasswordHash: p.StaticAdmin.PasswordHash, }, }, nil } project, err := a.projectStore.GetProject(ctx, projectID) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "The project is not found") } if err != nil { a.logger.Error("failed to get project", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get project") } return project, nil } // UpdateProjectStaticAdmin updates the static admin user settings. func (a *WebAPI) UpdateProjectStaticAdmin(ctx context.Context, req *webservice.UpdateProjectStaticAdminRequest) (*webservice.UpdateProjectStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.UpdateProjectStaticAdmin(ctx, claims.Role.ProjectId, req.Username, req.Password); err != nil { a.logger.Error("failed to update static admin", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update static admin") } return &webservice.UpdateProjectStaticAdminResponse{}, nil } // EnableStaticAdmin enables static admin login. func (a *WebAPI) EnableStaticAdmin(ctx context.Context, req *webservice.EnableStaticAdminRequest) (*webservice.EnableStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.EnableStaticAdmin(ctx, claims.Role.ProjectId); err != nil { a.logger.Error("failed to enable static admin login", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to enable static admin login") } return &webservice.EnableStaticAdminResponse{}, nil } // DisableStaticAdmin disables static admin login. func (a *WebAPI) DisableStaticAdmin(ctx context.Context, req *webservice.DisableStaticAdminRequest) (*webservice.DisableStaticAdminResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.DisableStaticAdmin(ctx, claims.Role.ProjectId); err != nil { a.logger.Error("failed to disenable static admin login", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to disenable static admin login") } return &webservice.DisableStaticAdminResponse{}, nil } // UpdateProjectSSOConfig updates the sso settings. func (a *WebAPI) UpdateProjectSSOConfig(ctx context.Context, req *webservice.UpdateProjectSSOConfigRequest) (*webservice.UpdateProjectSSOConfigResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := req.Sso.Encrypt(a.encrypter); err != nil { a.logger.Error("failed to encrypt sensitive data in sso configurations", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to encrypt sensitive data in sso configurations") } if err := a.projectStore.UpdateProjectSSOConfig(ctx, claims.Role.ProjectId, req.Sso); err != nil { a.logger.Error("failed to update project single sign on settings", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update project single sign on settings") } return &webservice.UpdateProjectSSOConfigResponse{}, nil } // UpdateProjectRBACConfig updates the sso settings. func (a *WebAPI) UpdateProjectRBACConfig(ctx context.Context, req *webservice.UpdateProjectRBACConfigRequest) (*webservice.UpdateProjectRBACConfigResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if _, ok := a.projectsInConfig[claims.Role.ProjectId]; ok { return nil, status.Error(codes.FailedPrecondition, "Failed to update a debug project specified in the control-plane configuration") } if err := a.projectStore.UpdateProjectRBACConfig(ctx, claims.Role.ProjectId, req.Rbac); err != nil { a.logger.Error("failed to update project single sign on settings", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to update project single sign on settings") } return &webservice.UpdateProjectRBACConfigResponse{}, nil } // GetMe gets information about the current user. func (a *WebAPI) GetMe(ctx context.Context, req *webservice.GetMeRequest) (*webservice.GetMeResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } return &webservice.GetMeResponse{ Subject: claims.Subject, AvatarUrl: claims.AvatarURL, ProjectId: claims.Role.ProjectId, ProjectRole: claims.Role.ProjectRole, }, nil } func (a *WebAPI) GetCommand(ctx context.Context, req *webservice.GetCommandRequest) (*webservice.GetCommandResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } cmd, err := getCommand(ctx, a.commandStore, req.CommandId, a.logger) if err != nil { return nil, err } if claims.Role.ProjectId != cmd.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested command does not belong to your project") } return &webservice.GetCommandResponse{ Command: cmd, }, nil } func (a *WebAPI) GenerateAPIKey(ctx context.Context, req *webservice.GenerateAPIKeyRequest) (*webservice.GenerateAPIKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } id := uuid.New().String() key, hash, err := model.GenerateAPIKey(id) if err != nil { a.logger.Error("failed to generate API key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to generate API key") } apiKey := model.APIKey{ Id: id, Name: req.Name, KeyHash: hash, ProjectId: claims.Role.ProjectId, Role: req.Role, Creator: claims.Subject, } err = a.apiKeyStore.AddAPIKey(ctx, &apiKey) if errors.Is(err, datastore.ErrAlreadyExists) { return nil, status.Error(codes.AlreadyExists, "The API key already exists") } if err != nil { a.logger.Error("failed to create API key", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to create API key") } return &webservice.GenerateAPIKeyResponse{ Key: key, }, nil } func (a *WebAPI) DisableAPIKey(ctx context.Context, req *webservice.DisableAPIKeyRequest) (*webservice.DisableAPIKeyResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } if err := a.apiKeyStore.DisableAPIKey(ctx, req.Id, claims.Role.ProjectId); err != nil { switch err { case datastore.ErrNotFound: return nil, status.Error(codes.InvalidArgument, "The API key is not found") case datastore.ErrInvalidArgument: return nil, status.Error(codes.InvalidArgument, "Invalid value for update") default: a.logger.Error("failed to disable the API key", zap.String("apikey-id", req.Id), zap.Error(err), ) return nil, status.Error(codes.Internal, "Failed to disable the API key") } } return &webservice.DisableAPIKeyResponse{}, nil } func (a *WebAPI) ListAPIKeys(ctx context.Context, req *webservice.ListAPIKeysRequest) (*webservice.ListAPIKeysResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } opts := datastore.ListOptions{ Filters: []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, }, } if req.Options != nil { if req.Options.Enabled != nil { opts.Filters = append(opts.Filters, datastore.ListFilter{ Field: "Disabled", Operator: datastore.OperatorEqual, Value: !req.Options.Enabled.GetValue(), }) } } apiKeys, err := a.apiKeyStore.ListAPIKeys(ctx, opts) if err != nil { a.logger.Error("failed to list API keys", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to list API keys") } // Redact all sensitive data inside API key before sending to the client. for i := range apiKeys { apiKeys[i].RedactSensitiveData() } return &webservice.ListAPIKeysResponse{ Keys: apiKeys, }, nil } // GetInsightData returns the accumulated insight data. func (a *WebAPI) GetInsightData(ctx context.Context, req *webservice.GetInsightDataRequest) (*webservice.GetInsightDataResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } count := int(req.DataPointCount) from := time.Unix(req.RangeFrom, 0) chunks, err := insightstore.LoadChunksFromCache(a.insightCache, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count) if err != nil { a.logger.Error("failed to load chunks from cache", zap.Error(err)) chunks, err = a.insightStore.LoadChunks(ctx, claims.Role.ProjectId, req.ApplicationId, req.MetricsKind, req.Step, from, count) if err != nil { a.logger.Error("failed to load chunks from insightstore", zap.Error(err)) return nil, err } if err := insightstore.PutChunksToCache(a.insightCache, chunks); err != nil { a.logger.Error("failed to put chunks to cache", zap.Error(err)) } } idp, err := chunks.ExtractDataPoints(req.Step, from, count) if err != nil { a.logger.Error("failed to extract data points from chunks", zap.Error(err)) } var updateAt int64 for _, c := range chunks { accumulatedTo := c.GetAccumulatedTo() if accumulatedTo > updateAt { updateAt = accumulatedTo } } return &webservice.GetInsightDataResponse{ UpdatedAt: updateAt, DataPoints: idp, Type: model.InsightResultType_MATRIX, Matrix: []*model.InsightSampleStream{ { DataPoints: idp, }, }, }, nil } func (a *WebAPI) GetInsightApplicationCount(ctx context.Context, req *webservice.GetInsightApplicationCountRequest) (*webservice.GetInsightApplicationCountResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } // TODO: Cache application counts in the cache service. c, err := a.insightStore.LoadApplicationCounts(ctx, claims.Role.ProjectId) if err != nil { if err == filestore.ErrNotFound { return nil, status.Error(codes.NotFound, "Not found") } a.logger.Error("failed to load application counts", zap.Error(err)) return nil, status.Error(codes.Internal, "failed to load application counts") } counts := make([]*model.InsightApplicationCount, 0, len(c.Counts)) for i := range c.Counts { counts = append(counts, &c.Counts[i]) } return &webservice.GetInsightApplicationCountResponse{ Counts: counts, UpdatedAt: c.UpdatedAt, }, nil } func (a *WebAPI) ListDeploymentChains(ctx context.Context, req *webservice.ListDeploymentChainsRequest) (*webservice.ListDeploymentChainsResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } orders := []datastore.Order{ { Field: "UpdatedAt", Direction: datastore.Desc, }, { Field: "Id", Direction: datastore.Asc, }, } filters := []datastore.ListFilter{ { Field: "ProjectId", Operator: datastore.OperatorEqual, Value: claims.Role.ProjectId, }, { Field: "UpdatedAt", Operator: datastore.OperatorGreaterThan, Value: req.PageMinUpdatedAt, }, } // TODO: Support filter list deployment chain with options. pageSize := int(req.PageSize) options := datastore.ListOptions{ Filters: filters, Orders: orders, Limit: pageSize, Cursor: req.Cursor, } deploymentChains, cursor, err := a.deploymentChainStore.ListDeploymentChains(ctx, options) if err != nil { a.logger.Error("failed to list deployment chains", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to list deployment chains") } return &webservice.ListDeploymentChainsResponse{ DeploymentChains: deploymentChains, Cursor: cursor, }, nil } func (a *WebAPI) GetDeploymentChain(ctx context.Context, req *webservice.GetDeploymentChainRequest) (*webservice.GetDeploymentChainResponse, error) { claims, err := rpcauth.ExtractClaims(ctx) if err != nil { a.logger.Error("failed to authenticate the current user", zap.Error(err)) return nil, err } dc, err := a.deploymentChainStore.GetDeploymentChain(ctx, req.DeploymentChainId) if errors.Is(err, datastore.ErrNotFound) { return nil, status.Error(codes.NotFound, "Deployment chain is not found") } if err != nil { a.logger.Error("failed to get deployment chain", zap.Error(err)) return nil, status.Error(codes.Internal, "Failed to get deployment chain") } if claims.Role.ProjectId != dc.ProjectId { return nil, status.Error(codes.PermissionDenied, "Requested deployment chain does not belong to your project") } return &webservice.GetDeploymentChainResponse{ DeploymentChain: dc, }, nil }
1
24,564
`ctx` is unused in ListEvents
pipe-cd-pipe
go
@@ -101,6 +101,14 @@ public class SmartStoreTest extends SmartStoreTestCase { Assert.assertTrue("ENABLE_JSON1 flag not found in compile options", compileOptions.contains("ENABLE_JSON1")); } + /** + * Checking sqlcipher version + */ + @Test + public void testSQLCipherVersion() { + Assert.assertEquals("Wrong sqlcipher version", "4.0.1 community", store.getSQLCipherVersion()); + } + /** * Method to check soup blob with one stored by db. Can be overridden to check external storage if necessary. */
1
/* * Copyright (c) 2011-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.store; import android.database.Cursor; import android.os.SystemClock; import androidx.test.filters.MediumTest; import androidx.test.ext.junit.runners.AndroidJUnit4; import com.salesforce.androidsdk.smartstore.store.DBHelper; import com.salesforce.androidsdk.smartstore.store.IndexSpec; import com.salesforce.androidsdk.smartstore.store.QuerySpec; import com.salesforce.androidsdk.smartstore.store.QuerySpec.Order; import com.salesforce.androidsdk.smartstore.store.SmartStore; import com.salesforce.androidsdk.smartstore.store.SmartStore.Type; import com.salesforce.androidsdk.smartstore.store.SoupSpec; import com.salesforce.androidsdk.util.test.JSONTestHelper; import net.sqlcipher.database.SQLiteDatabase; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import java.util.Arrays; import java.util.Collections; import java.util.List; /** * Main test suite for SmartStore */ @RunWith(AndroidJUnit4.class) @MediumTest public class SmartStoreTest extends SmartStoreTestCase { protected static final String TEST_SOUP = "test_soup"; protected static final String OTHER_TEST_SOUP = "other_test_soup"; private static final String THIRD_TEST_SOUP = "third_test_soup"; private static final String FOURTH_TEST_SOUP = "fourth_test_soup"; @Before public void setUp() throws Exception { super.setUp(); store.setCaptureExplainQueryPlan(true); Assert.assertFalse("Table for test_soup should not exist", hasTable("TABLE_1")); Assert.assertFalse("Soup test_soup should not exist", store.hasSoup(TEST_SOUP)); registerSoup(store, TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string) }); Assert.assertEquals("Table for test_soup was expected to be called TABLE_1", "TABLE_1", getSoupTableName(TEST_SOUP)); Assert.assertTrue("Table for test_soup should now exist", hasTable("TABLE_1")); Assert.assertTrue("Soup test_soup should now exist", store.hasSoup(TEST_SOUP)); } @After public void tearDown() throws Exception { super.tearDown(); } @Override protected String getEncryptionKey() { return "test123"; } /** * Checking compile options */ @Test public void testCompileOptions() { List<String> compileOptions = store.getCompileOptions(); Assert.assertTrue("ENABLE_FTS4 flag not found in compile options", compileOptions.contains("ENABLE_FTS4")); Assert.assertTrue("ENABLE_FTS3_PARENTHESIS flag not found in compile options", compileOptions.contains("ENABLE_FTS3_PARENTHESIS")); Assert.assertTrue("ENABLE_FTS5 flag not found in compile options", compileOptions.contains("ENABLE_FTS5")); Assert.assertTrue("ENABLE_JSON1 flag not found in compile options", compileOptions.contains("ENABLE_JSON1")); } /** * Method to check soup blob with one stored by db. Can be overridden to check external storage if necessary. */ protected void assertSameSoupAsDB(JSONObject soup, Cursor c, String soupName, Long id) throws JSONException { JSONTestHelper.assertSameJSON("Wrong value in soup column", soup, new JSONObject(c.getString(c.getColumnIndex("soup")))); } /** * Testing method with paths to top level string/integer/array/map as well as edge cases (null object/null or empty path) * @throws JSONException */ @Test public void testProjectTopLevel() throws JSONException { JSONObject json = new JSONObject("{'a':'va', 'b':2, 'c':[0,1,2], 'd': {'d1':'vd1', 'd2':'vd2', 'd3':[1,2], 'd4':{'e':5}}}"); // Null object Assert.assertNull("Should have been null", SmartStore.project(null, "path")); // Root JSONTestHelper.assertSameJSON("Should have returned whole object", json, SmartStore.project(json, null)); JSONTestHelper.assertSameJSON("Should have returned whole object", json, SmartStore.project(json, "")); // Top-level elements Assert.assertEquals("Wrong value for key a", "va", SmartStore.project(json, "a")); Assert.assertEquals("Wrong value for key b", 2, SmartStore.project(json, "b")); JSONTestHelper.assertSameJSON("Wrong value for key c", new JSONArray("[0,1,2]"), SmartStore.project(json, "c")); JSONTestHelper.assertSameJSON("Wrong value for key d", new JSONObject("{'d1':'vd1','d2':'vd2','d3':[1,2],'d4':{'e':5}}"), (JSONObject) SmartStore.project(json, "d")); } /** * Testing method with paths to non-top level string/integer/array/map * @throws JSONException */ @Test public void testProjectNested() throws JSONException { JSONObject json = new JSONObject("{'a':'va', 'b':2, 'c':[0,1,2], 'd': {'d1':'vd1', 'd2':'vd2', 'd3':[1,2], 'd4':{'e':5}}}"); // Nested elements Assert.assertEquals("Wrong value for key d.d1", "vd1", SmartStore.project(json, "d.d1")); Assert.assertEquals("Wrong value for key d.d2", "vd2", SmartStore.project(json, "d.d2")); JSONTestHelper.assertSameJSON("Wrong value for key d.d3", new JSONArray("[1,2]"), SmartStore.project(json, "d.d3")); JSONTestHelper.assertSameJSON("Wrong value for key d.d4", new JSONObject("{'e':5}"), SmartStore.project(json, "d.d4")); Assert.assertEquals("Wrong value for key d.d4.e", 5, SmartStore.project(json, "d.d4.e")); } /** * Testing method with path through arrays * @throws JSONException */ @Test public void testProjectThroughArrays() throws JSONException { JSONObject json = new JSONObject("{\"a\":\"a1\", \"b\":2, \"c\":[{\"cc\":\"cc1\"}, {\"cc\":2}, {\"cc\":[1,2,3]}, {}, {\"cc\":{\"cc5\":5}}], \"d\":[{\"dd\":[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}]}, {\"dd\":[{\"ddd\":\"ddd21\"}]}, {\"dd\":[{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]}]}"); JSONTestHelper.assertSameJSON("Wrong value for key c", new JSONArray("[{\"cc\":\"cc1\"}, {\"cc\":2}, {\"cc\":[1,2,3]}, {}, {\"cc\":{\"cc5\":5}}]"), SmartStore.project(json, "c")); JSONTestHelper.assertSameJSON("Wrong value for key c.cc", new JSONArray("[\"cc1\",2, [1,2,3], {\"cc5\":5}]"), SmartStore.project(json, "c.cc")); JSONTestHelper.assertSameJSON("Wrong value for key c.cc.cc5", new JSONArray("[5]"), SmartStore.project(json, "c.cc.cc5")); JSONTestHelper.assertSameJSON("Wrong value for key d", new JSONArray("[{\"dd\":[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}]}, {\"dd\":[{\"ddd\":\"ddd21\"}]}, {\"dd\":[{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]}]"), SmartStore.project(json, "d")); JSONTestHelper.assertSameJSON("Wrong value for key d.dd", new JSONArray("[[{\"ddd\":\"ddd11\"},{\"ddd\":\"ddd12\"}], [{\"ddd\":\"ddd21\"}], [{\"ddd\":\"ddd31\"},{\"ddd3\":\"ddd32\"}]]"), SmartStore.project(json, "d.dd")); JSONTestHelper.assertSameJSON("Wrong value for key d.dd.ddd", new JSONArray("[[\"ddd11\",\"ddd12\"],[\"ddd21\"],[\"ddd31\"]]"), SmartStore.project(json, "d.dd.ddd")); JSONTestHelper.assertSameJSON("Wrong value for key d.dd.ddd3", new JSONArray("[[\"ddd32\"]]"), SmartStore.project(json, "d.dd.ddd3")); } /** * Check that the meta data table (soup index map) has been created */ @Test public void testMetaDataTableCreated() { Assert.assertTrue("Table soup_index_map not found", hasTable("soup_index_map")); } /** * Test register/drop soup */ @Test public void testRegisterDropSoup() { // Before Assert.assertNull("getSoupTableName should have returned null", getSoupTableName(THIRD_TEST_SOUP)); Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP)); // Register registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string), new IndexSpec("value", Type.string) }); String soupTableName = getSoupTableName(THIRD_TEST_SOUP); Assert.assertEquals("getSoupTableName should have returned TABLE_2", "TABLE_2", soupTableName); Assert.assertTrue("Table for soup third_test_soup does exist", hasTable(soupTableName)); Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP)); // Check soup indexes final IndexSpec[] indexSpecs = store.getSoupIndexSpecs(THIRD_TEST_SOUP); Assert.assertEquals("Wrong path", "key", indexSpecs[0].path); Assert.assertEquals("Wrong type", Type.string, indexSpecs[0].type); Assert.assertEquals("Wrong column name", soupTableName + "_0", indexSpecs[0].columnName); Assert.assertEquals("Wrong path", "value", indexSpecs[1].path); Assert.assertEquals("Wrong type", Type.string, indexSpecs[1].type); Assert.assertEquals("Wrong column name", soupTableName + "_1", indexSpecs[1].columnName); // Check db indexes checkDatabaseIndexes(soupTableName, Arrays.asList(new String[] { "CREATE INDEX " + soupTableName + "_0_idx on " + soupTableName + " ( " + soupTableName + "_0 )", "CREATE INDEX " + soupTableName + "_1_idx on " + soupTableName + " ( " + soupTableName + "_1 )", "CREATE INDEX " + soupTableName + "_created_idx on " + soupTableName + " ( created )", "CREATE INDEX " + soupTableName + "_lastModified_idx on " + soupTableName + " ( lastModified )" })); // Drop store.dropSoup(THIRD_TEST_SOUP); // After Assert.assertFalse("Soup third_test_soup should no longer exist", store.hasSoup(THIRD_TEST_SOUP)); Assert.assertNull("getSoupTableName should have returned null", getSoupTableName(THIRD_TEST_SOUP)); Assert.assertFalse("Table for soup third_test_soup does exist", hasTable(soupTableName)); } /** * Testing getAllSoupNames: register a new soup and then drop it and call getAllSoupNames before and after */ @Test public void testGetAllSoupNames() { // Before Assert.assertEquals("One soup name expected", 1, store.getAllSoupNames().size()); Assert.assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP)); // Register another soup registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string), new IndexSpec("value", Type.string) }); Assert.assertEquals("Two soup names expected", 2, store.getAllSoupNames().size()); Assert.assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP)); Assert.assertTrue(THIRD_TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(THIRD_TEST_SOUP)); // Drop the latest soup store.dropSoup(THIRD_TEST_SOUP); Assert.assertEquals("One soup name expected", 1, store.getAllSoupNames().size()); Assert.assertTrue(TEST_SOUP + " should have been returned by getAllSoupNames", store.getAllSoupNames().contains(TEST_SOUP)); } /** * Testing dropAllSoups: register a couple of soups then drop them all */ @Test public void testDropAllSoups() { // Register another soup Assert.assertEquals("One soup name expected", 1, store.getAllSoupNames().size()); registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", Type.string), new IndexSpec("value", Type.string) }); Assert.assertEquals("Two soup names expected", 2, store.getAllSoupNames().size()); // Drop all store.dropAllSoups(); Assert.assertEquals("No soup name expected", 0, store.getAllSoupNames().size()); Assert.assertFalse("Soup " + THIRD_TEST_SOUP + " should no longer exist", store.hasSoup(THIRD_TEST_SOUP)); Assert.assertFalse("Soup " + TEST_SOUP + " should no longer exist", store.hasSoup(TEST_SOUP)); } /** * Testing create: create a single element with a single index pointing to a top level attribute * @throws JSONException */ @Test public void testCreateOne() throws JSONException { JSONObject soupElt = new JSONObject("{'key':'ka', 'value':'va'}"); JSONObject soupEltCreated = store.create(TEST_SOUP, soupElt); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, null, null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected one soup element only", 1, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupEltCreated), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupEltCreated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Wrong value in index column", "ka", c.getString(c.getColumnIndex(soupTableName + "_0"))); assertSameSoupAsDB(soupEltCreated, c, soupTableName, idOf(soupEltCreated)); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); } finally { safeClose(c); } } /** * Testing create: create multiple elements with multiple indices not just pointing to top level attributes * @throws JSONException */ @Test public void testCreateMultiple() throws JSONException { Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); registerSoup(store, OTHER_TEST_SOUP, new IndexSpec[] { new IndexSpec("lastName", Type.string), new IndexSpec("address.city", Type.string) }); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); JSONObject soupElt1 = new JSONObject("{'lastName':'Doe', 'address':{'city':'San Francisco','street':'1 market'}}"); JSONObject soupElt2 = new JSONObject("{'lastName':'Jackson', 'address':{'city':'Los Angeles','street':'100 mission'}}"); JSONObject soupElt3 = new JSONObject("{'lastName':'Watson', 'address':{'city':'London','street':'50 market'}}"); JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3); // Check DB Cursor c = null; try { String soupTableName = getSoupTableName(OTHER_TEST_SOUP); Assert.assertEquals("Table for other_test_soup was expected to be called TABLE_2", "TABLE_2", soupTableName); Assert.assertTrue("Table for other_test_soup should now exist", hasTable("TABLE_2")); final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected three soup elements", 3, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt1Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Wrong value in index column", "Doe", c.getString(c.getColumnIndex(soupTableName + "_0"))); Assert.assertEquals("Wrong value in index column", "San Francisco", c.getString(c.getColumnIndex(soupTableName + "_1"))); assertSameSoupAsDB(soupElt1Created, c, soupTableName, idOf(soupElt1Created)); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt2Created), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt2Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Wrong value in index column", "Jackson", c.getString(c.getColumnIndex(soupTableName + "_0"))); Assert.assertEquals("Wrong value in index column", "Los Angeles", c.getString(c.getColumnIndex(soupTableName + "_1"))); assertSameSoupAsDB(soupElt2Created, c, soupTableName, idOf(soupElt2Created)); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt3Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Wrong value in index column", "Watson", c.getString(c.getColumnIndex(soupTableName + "_0"))); Assert.assertEquals("Wrong value in index column", "London", c.getString(c.getColumnIndex(soupTableName + "_1"))); assertSameSoupAsDB(soupElt3Created, c, soupTableName, idOf(soupElt3Created)); } finally { safeClose(c); } } /** * Testing update: create multiple soup elements and update one of them, check them all * @throws JSONException */ @Test public void testUpdate() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); SystemClock.sleep(10); // to get a different last modified date JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2u', 'value':'va2u'}"); JSONObject soupElt2Updated = store.update(TEST_SOUP, soupElt2ForUpdate, idOf(soupElt2Created)); JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0); JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)).getJSONObject(0); JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected three soup elements", 3, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt1Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt2Created), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt3Created.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); } finally { safeClose(c); } } /** * Testing upsert: upsert multiple soup elements and re-upsert one of them, check them all * @throws JSONException */ @Test public void testUpsert() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1); JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2); JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3); SystemClock.sleep(10); // to get a different last modified date JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2u', 'value':'va2u', '_soupEntryId': " + idOf(soupElt2Upserted) + "}"); JSONObject soupElt2Updated = store.upsert(TEST_SOUP, soupElt2ForUpdate); JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0); JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0); JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Upserted)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Upserted, soupElt3Retrieved); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected three soup elements", 3, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupElt1Upserted), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt1Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt2Upserted), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt3Upserted), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt3Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); } finally { safeClose(c); } } /** * Testing upsert with external id: upsert multiple soup elements and re-upsert one of them, check them all * @throws JSONException */ @Test public void testUpsertWithExternalId() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1, "key"); JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2, "key"); JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3, "key"); SystemClock.sleep(10); // to get a different last modified date JSONObject soupElt2ForUpdate = new JSONObject("{'key':'ka2', 'value':'va2u'}"); JSONObject soupElt2Updated = store.upsert(TEST_SOUP, soupElt2ForUpdate, "key"); JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0); JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0); JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Upserted)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Updated, soupElt2Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Upserted, soupElt3Retrieved); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected three soup elements", 3, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupElt1Upserted), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt1Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt2Upserted), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt2Updated.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertTrue("Last modified date should be more recent than created date", c.getLong(c.getColumnIndex("created")) < c.getLong(c.getColumnIndex("lastModified"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt3Upserted), c.getLong(c.getColumnIndex("id"))); Assert.assertEquals("Wrong created date", soupElt3Upserted.getLong(SmartStore.SOUP_LAST_MODIFIED_DATE), c.getLong(c.getColumnIndex("lastModified"))); Assert.assertEquals("Created date and last modified date should be equal", c.getLong(c.getColumnIndex("created")), c.getLong(c.getColumnIndex("lastModified"))); } finally { safeClose(c); } } /** * Testing upsert passing a non-indexed path for the external id (should fail) * @throws JSONException */ @Test public void testUpsertWithNonIndexedExternalId() throws JSONException { JSONObject soupElt = new JSONObject("{'key':'ka1', 'value':'va1'}"); try { store.upsert(TEST_SOUP, soupElt, "value"); Assert.fail("Exception was expected: value is not an indexed field"); } catch (RuntimeException e) { Assert.assertTrue("Wrong exception", e.getMessage().contains("does not have an index")); } } /** * Testing upsert by user-defined external id without value (should fail) * @throws JSONException */ @Test public void testUpsertByUserDefinedExternalIdWithoutValue() throws JSONException { JSONObject soupElt = new JSONObject("{'value':'va1'}"); try { store.upsert(TEST_SOUP, soupElt, "key"); Assert.fail("Exception was expected: value cannot be empty for upsert by user-defined external id"); } catch (RuntimeException e) { Assert.assertTrue("Wrong exception", e.getMessage().contains("For upsert with external ID path") && e.getMessage().contains("value cannot be empty for any entries")); } } /** * Testing upsert with an external id that is not unique in the soup * @throws JSONException */ @Test public void testUpsertWithNonUniqueExternalId() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka', 'value':'va3'}"); JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1); JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2); JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0); JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Upserted)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Upserted, soupElt2Retrieved); try { store.upsert(TEST_SOUP, soupElt3, "key"); Assert.fail("Exception was expected: key is not unique in the soup"); } catch (RuntimeException e) { Assert.assertTrue("Wrong exception", e.getMessage().contains("are more than one soup elements")); } } /** * Testing retrieve: create multiple soup elements and retrieves them back * @throws JSONException */ @Test public void testRetrieve() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0); JSONObject soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)).getJSONObject(0); JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt2Created, soupElt2Retrieved); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved); } /** * Testing delete: create soup elements, delete element by id and check database directly that it is in fact gone * @throws JSONException */ @Test public void testDelete() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); store.delete(TEST_SOUP, idOf(soupElt2Created)); JSONObject soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)).getJSONObject(0); JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)); JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Created, soupElt1Retrieved); Assert.assertEquals("Should be empty", 0, soupElt2Retrieved.length()); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected two soup elements", 2, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupElt1Created), c.getLong(c.getColumnIndex("id"))); c.moveToNext(); Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id"))); } finally { safeClose(c); } } /** * Testing delete: create soup elements, delete by query and check database directly that deleted entries are in fact gone * @throws JSONException */ @Test public void testDeleteByQuery() throws JSONException { tryDeleteByQuery(null, null); } /** * Testing delete: create soup elements, delete by query and check database directly that deleted entries are in fact gone * Populate idsDeleted and idsNotDeleted if not null * @param idsDeleted * @param idsNotDeleted */ protected void tryDeleteByQuery(List<Long> idsDeleted, List<Long> idsNotDeleted) throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); long id1 = soupElt1Created.getLong(SmartStore.SOUP_ENTRY_ID); long id2 = soupElt2Created.getLong(SmartStore.SOUP_ENTRY_ID); long id3 = soupElt3Created.getLong(SmartStore.SOUP_ENTRY_ID); QuerySpec querySpec = QuerySpec.buildRangeQuerySpec(TEST_SOUP, "key", "ka1", "ka2", "key", Order.ascending, 2); store.deleteByQuery(TEST_SOUP, querySpec); JSONArray soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)); JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)); JSONObject soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)).getJSONObject(0); Assert.assertEquals("Should be empty", 0, soupElt1Retrieved.length()); Assert.assertEquals("Should be empty", 0, soupElt2Retrieved.length()); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt3Created, soupElt3Retrieved); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected one soup elements", 1, c.getCount()); Assert.assertEquals("Wrong id", idOf(soupElt3Created), c.getLong(c.getColumnIndex("id"))); } finally { safeClose(c); } // Populate idsDeleted if (idsDeleted != null) { idsDeleted.add(id1); idsDeleted.add(id2); } // Populate idsNotDeleted if (idsNotDeleted != null) { idsNotDeleted.add(id3); } } /** * Testing clear soup: create soup elements, clear soup and check database directly that there are in fact gone * @throws JSONException */ @Test public void testClearSoup() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); store.clearSoup(TEST_SOUP); JSONArray soupElt1Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt1Created)); JSONArray soupElt2Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt2Created)); JSONArray soupElt3Retrieved = store.retrieve(TEST_SOUP, idOf(soupElt3Created)); Assert.assertEquals("Should be empty", 0, soupElt1Retrieved.length()); Assert.assertEquals("Should be empty", 0, soupElt2Retrieved.length()); Assert.assertEquals("Should be empty", 0, soupElt3Retrieved.length()); // Check DB Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(TEST_SOUP); c = DBHelper.getInstance(db).query(db, soupTableName, null, "id ASC", null, null); Assert.assertFalse("Expected no soup element", c.moveToFirst()); } finally { safeClose(c); } } /** * Test query when looking for all elements when soup has string index * @throws JSONException */ @Test public void testAllQueryWithStringIndex() throws JSONException { tryAllQuery(Type.string); } /** * Test query when looking for all elements when soup has json1 index * @throws JSONException */ @Test public void testAllQueryWithJSON1Index() throws JSONException { tryAllQuery(Type.json1); } /** * Test query when looking for all elements * @throws JSONException */ public void tryAllQuery(Type type) throws JSONException { // Before Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); // Register store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)}); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}"); JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3); // Query all - small page runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, "key", Order.ascending, 2), 0, false, "SCAN", soupElt1Created, soupElt2Created); // Query all - next small page runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, "key", Order.ascending, 2), 1, false, "SCAN", soupElt3Created); // Query all - large page runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, "key", Order.ascending, 10), 0, false, "SCAN", soupElt1Created, soupElt2Created, soupElt3Created); // Query all with select paths runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildAllQuerySpec(OTHER_TEST_SOUP, new String[]{"key"}, "key", Order.ascending, 10), 0, type != Type.json1, "SCAN", new JSONArray("['ka1']"), new JSONArray("['ka2']"), new JSONArray("['ka3']")); } /** * Test query when looking for a specific element with a string index * @throws JSONException */ @Test public void testExactQueryWithStringIndex() throws JSONException { tryExactQuery(Type.string); } /** * Test query when looking for a specific element with a json1 index * @throws JSONException */ @Test public void testExactQueryWithJSON1Index() throws JSONException { tryExactQuery(Type.json1); } private void tryExactQuery(Type type) throws JSONException { // Before Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); // Register store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)}); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}"); store.create(OTHER_TEST_SOUP, soupElt1); JSONObject soupElt2Created= store.create(OTHER_TEST_SOUP, soupElt2); store.create(OTHER_TEST_SOUP, soupElt3); // Exact match runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildExactQuerySpec(OTHER_TEST_SOUP, "key", "ka2", null, null, 10), 0, false, "SEARCH", soupElt2Created); } /** * Query test looking for a range of elements (with ascending or descending ordering) with a string index * @throws JSONException */ @Test public void testRangeQueryWithStringIndex() throws JSONException { tryRangeQuery(Type.string); } /** * Query test looking for a range of elements (with ascending or descending ordering) with a json1 index * @throws JSONException */ @Test public void testRangeQueryWithJSON1Index() throws JSONException { tryRangeQuery(Type.json1); } private void tryRangeQuery(Type type) throws JSONException { // Before Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); // Register store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)}); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1', 'otherValue':'ova1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2', 'otherValue':'ova2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3', 'otherValue':'ova3'}"); store.create(OTHER_TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3); // Range query runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildRangeQuerySpec(OTHER_TEST_SOUP, "key", "ka2", "ka3", "key", Order.ascending, 10), 0, false, "SEARCH", soupElt2Created, soupElt3Created); // Range query - descending order runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildRangeQuerySpec(OTHER_TEST_SOUP, "key", "ka2", "ka3", "key", Order.descending, 10), 0, false, "SEARCH", soupElt3Created, soupElt2Created); // Range query with select paths runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildRangeQuerySpec(OTHER_TEST_SOUP, new String[]{"key"}, "key", "ka2", "ka3", "key", Order.descending, 10), 0, type != Type.json1, "SEARCH", new JSONArray("['ka3']"), new JSONArray("['ka2']")); } /** * Query test looking using like (with ascending or descending ordering) and a string index * @throws JSONException */ @Test public void testLikeQueryWithStringIndex() throws JSONException { tryLikeQuery(Type.string); } /** * Query test looking using like (with ascending or descending ordering) and a json1 index * @throws JSONException */ @Test public void testLikeQueryWithJSON1Index() throws JSONException { tryLikeQuery(Type.json1); } private void tryLikeQuery(Type type) throws JSONException { Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type)}); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); JSONObject soupElt1 = new JSONObject("{'key':'abcd', 'value':'va1', 'otherValue':'ova1'}"); JSONObject soupElt2 = new JSONObject("{'key':'bbcd', 'value':'va2', 'otherValue':'ova2'}"); JSONObject soupElt3 = new JSONObject("{'key':'abcc', 'value':'va3', 'otherValue':'ova3'}"); JSONObject soupElt4 = new JSONObject("{'key':'defg', 'value':'va4', 'otherValue':'ova3'}"); JSONObject soupElt1Created = store.create(OTHER_TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(OTHER_TEST_SOUP, soupElt3); store.create(OTHER_TEST_SOUP, soupElt4); // Like query (starts with) runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "abc%", "key", Order.ascending, 10), 0, false, "SCAN", soupElt3Created, soupElt1Created); // Like query (ends with) runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bcd", "key", Order.ascending, 10), 0, false, "SCAN", soupElt1Created, soupElt2Created); // Like query (starts with) - descending order runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "abc%", "key", Order.descending, 10), 0, false, "SCAN", soupElt1Created, soupElt3Created); // Like query (ends with) - descending order runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bcd", "key", Order.descending, 10), 0, false, "SCAN", soupElt2Created, soupElt1Created); // Like query (contains) runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bc%", "key", Order.ascending, 10), 0, false, "SCAN", soupElt3Created, soupElt1Created, soupElt2Created); // Like query (contains) - descending order runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, "key", "%bc%", "key", Order.descending, 10), 0, false, "SCAN", soupElt2Created, soupElt1Created, soupElt3Created); // Like query (contains) with select paths runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildLikeQuerySpec(OTHER_TEST_SOUP, new String[] {"key"}, "key", "%bc%", "key", Order.descending, 10), 0, type != Type.json1, "SCAN", new JSONArray("['bbcd']"), new JSONArray("['abcd']"), new JSONArray("['abcc']")); } /** * Test query against soup with special characters when soup has string index * @throws JSONException */ @Test public void testQueryDataWithSpecialCharactersWithStringIndex() throws JSONException { tryQueryDataWithSpecialCharacters(Type.string); } /** * Test query against soup with special characters when soup has json1 index * @throws JSONException */ @Test public void testQueryDataWithSpecialCharactersWithJSON1Index() throws JSONException { tryQueryDataWithSpecialCharacters(Type.json1); } private void tryQueryDataWithSpecialCharacters(Type type) throws JSONException { // Before Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); // Register store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", type), new IndexSpec("value", type)}); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); StringBuffer value = new StringBuffer(); for (int i=1; i<1000; i++) { value.append(new Character((char) i)); } String valueForAbcd = "abcd" + value; String valueForDefg = "defg" + value; // Populate soup JSONObject soupElt1 = new JSONObject(); soupElt1.put("key", "abcd"); soupElt1.put("value", valueForAbcd); JSONObject soupElt2 = new JSONObject("{'key':'defg'}"); soupElt2.put("key", "defg"); soupElt2.put("value", valueForDefg); store.create(OTHER_TEST_SOUP, soupElt1); store.create(OTHER_TEST_SOUP, soupElt2); // Smart query String sql = String.format("SELECT {%1$s:value} FROM {%1$s} ORDER BY {%1$s:key}", OTHER_TEST_SOUP); runQueryCheckResultsAndExplainPlan(OTHER_TEST_SOUP, QuerySpec.buildSmartQuerySpec(sql, 10), 0, false, null, new JSONArray(Collections.singletonList(valueForAbcd)), new JSONArray(Collections.singletonList(valueForDefg))); } protected void runQueryCheckResultsAndExplainPlan(String soupName, QuerySpec querySpec, int page, boolean covering, String expectedDbOperation, JSONObject... expectedResults) throws JSONException { // Run query JSONArray result = store.query(querySpec, page); // Check results Assert.assertEquals("Wrong number of results", expectedResults.length, result.length()); for (int i=0; i<expectedResults.length; i++) { JSONTestHelper.assertSameJSON("Wrong result for query", expectedResults[i], result.getJSONObject(i)); } // Check explain plan and make sure index was used checkExplainQueryPlan(soupName, 0, covering, expectedDbOperation); } private void runQueryCheckResultsAndExplainPlan(String soupName, QuerySpec querySpec, int page, boolean covering, String expectedDbOperation, JSONArray... expectedRows) throws JSONException { // Run query JSONArray result = store.query(querySpec, page); // Check results Assert.assertEquals("Wrong number of rows", expectedRows.length, result.length()); for (int i = 0; i < expectedRows.length; i++) { JSONTestHelper.assertSameJSON("Wrong result for query", expectedRows[i], result.getJSONArray(i)); } // Check explain plan and make sure index was used if (expectedDbOperation != null) { checkExplainQueryPlan(soupName, 0, covering, expectedDbOperation); } } /** * Test smart sql returning entire soup elements (i.e. select {soup:_soup} from {soup}) * @throws JSONException */ @Test public void testSelectUnderscoreSoup() throws JSONException { // Create soup elements JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt4 = new JSONObject("{'key':'ka4', 'value':'va4'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); JSONObject soupElt4Created = store.create(TEST_SOUP, soupElt4); final String smartSql = "SELECT {" + TEST_SOUP + ":_soup} FROM {" + TEST_SOUP + "} ORDER BY {" + TEST_SOUP + ":key}"; final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25); final JSONArray result = store.query(querySpec, 0); Assert.assertNotNull("Result should not be null", result); Assert.assertEquals("Four results expected", 4, result.length()); JSONTestHelper.assertSameJSON("Wrong result for query - row 0", new JSONArray(new JSONObject[] { soupElt1Created}), result.get(0)); JSONTestHelper.assertSameJSON("Wrong result for query - row 1", new JSONArray(new JSONObject[] { soupElt2Created}), result.get(1)); JSONTestHelper.assertSameJSON("Wrong result for query - row 2", new JSONArray(new JSONObject[] { soupElt3Created}), result.get(2)); JSONTestHelper.assertSameJSON("Wrong result for query - row 3", new JSONArray(new JSONObject[] { soupElt4Created}), result.get(3)); } /** * Test smart sql returning entire soup elements from multiple soups * @throws JSONException */ @Test public void testSelectUnderscoreSoupFromMultipleSoups() throws JSONException { JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':'va'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("key", Type.string)}); JSONObject soupElt2 = new JSONObject("{'key':'abcd', 'value':'va1', 'otherValue':'ova1'}"); JSONObject soupElt2Created = store.create(OTHER_TEST_SOUP, soupElt2); final String smartSql = "SELECT {" + TEST_SOUP + ":_soup}, {" + OTHER_TEST_SOUP + ":_soup} FROM {" + TEST_SOUP + "}, {" + OTHER_TEST_SOUP + "}"; final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25); final JSONArray result = store.query(querySpec, 0); Assert.assertNotNull("Result should not be null", result); Assert.assertEquals("One row expected", 1, result.length()); JSONArray firstRow = result.getJSONArray(0); JSONTestHelper.assertSameJSON("Wrong result for query - row 0 - first soup elt", soupElt1Created, firstRow.getJSONObject(0)); JSONTestHelper.assertSameJSON("Wrong result for query - row 0 - second soup elt", soupElt2Created, firstRow.getJSONObject(1)); } /** * Test smart sql select with null value in string indexed field * @throws JSONException */ @Test public void testSelectWithNullInStringIndexedField() throws JSONException { trySelectWithNullInIndexedField(Type.string); } /** * Test smart sql select with null value in json1 indexed field * @throws JSONException */ @Test public void testSelectWithNullInJSON1IndexedField() throws JSONException { trySelectWithNullInIndexedField(Type.json1); } private void trySelectWithNullInIndexedField(Type type) throws JSONException { // Before Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP)); // Register registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", type), new IndexSpec("value", type) }); Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP)); // Upsert JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':null}"); JSONObject soupElt1Upserted = store.upsert(THIRD_TEST_SOUP, soupElt1); // Smart sql final String smartSql = "SELECT {" + THIRD_TEST_SOUP + ":value}, {" + THIRD_TEST_SOUP + ":key} FROM {" + THIRD_TEST_SOUP + "} WHERE {" + THIRD_TEST_SOUP + ":key} = 'ka'"; final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25); final JSONArray result = store.query(querySpec, 0); // Check Assert.assertNotNull("Result should not be null", result); Assert.assertEquals("One result expected", 1, result.length()); JSONTestHelper.assertSameJSON("Wrong result for query", new JSONArray("[[null, 'ka']]"), result); } /** * Test upsert soup element with null value in string indexed field * @throws JSONException */ @Test public void testUpsertWithNullInStringIndexedField() throws JSONException { tryUpsertWithNullInIndexedField(Type.string); } /** * Test upsert soup element with null value in json1 indexed field * @throws JSONException */ @Test public void testUpsertWithNullInJSON1IndexedField() throws JSONException { tryUpsertWithNullInIndexedField(Type.json1); } private void tryUpsertWithNullInIndexedField(Type type) throws JSONException { // Before Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP)); // Register registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", type), new IndexSpec("value", type) }); Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP)); // Upsert JSONObject soupElt1 = new JSONObject("{'key':'ka', 'value':null}"); JSONObject soupElt1Upserted = store.upsert(THIRD_TEST_SOUP, soupElt1); // Check JSONObject soupElt1Retrieved = store.retrieve(THIRD_TEST_SOUP, idOf(soupElt1Upserted)).getJSONObject(0); JSONTestHelper.assertSameJSON("Retrieve mismatch", soupElt1Upserted, soupElt1Retrieved); } /** * Test to verify an aggregate query on floating point values indexed as floating. * * @throws JSONException */ @Test public void testAggregateQueryOnFloatingIndexedField() throws JSONException { tryAggregateQueryOnIndexedField(Type.floating); } /** * Test to verify an aggregate query on floating point values indexed as JSON1. * * @throws JSONException */ @Test public void testAggregateQueryOnJSON1IndexedField() throws JSONException { tryAggregateQueryOnIndexedField(Type.json1); } private void tryAggregateQueryOnIndexedField(Type type) throws JSONException { final JSONObject soupElt1 = new JSONObject("{'amount':10.2}"); final JSONObject soupElt2 = new JSONObject("{'amount':9.9}"); final IndexSpec[] indexSpecs = { new IndexSpec("amount", type) }; registerSoup(store, FOURTH_TEST_SOUP, indexSpecs); Assert.assertTrue("Soup " + FOURTH_TEST_SOUP + " should have been created", store.hasSoup(FOURTH_TEST_SOUP)); store.upsert(FOURTH_TEST_SOUP, soupElt1); store.upsert(FOURTH_TEST_SOUP, soupElt2); final String smartSql = "SELECT SUM({" + FOURTH_TEST_SOUP + ":amount}) FROM {" + FOURTH_TEST_SOUP + "}"; final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 1); final JSONArray result = store.query(querySpec, 0); Assert.assertNotNull("Result should not be null", result); Assert.assertEquals("One result expected", 1, result.length()); Assert.assertEquals("Incorrect result received", 20.1, result.getJSONArray(0).getDouble(0), 0); store.dropSoup(FOURTH_TEST_SOUP); Assert.assertFalse("Soup " + FOURTH_TEST_SOUP + " should have been deleted", store.hasSoup(FOURTH_TEST_SOUP)); } /** * Test to verify an count query for a query with group by when the soup uses string indexes. * * @throws JSONException */ @Test public void testCountQueryWithGroupByUsingStringIndexes() throws JSONException { tryCountQueryWithGroupBy(Type.string); } /** * Test to verify an count query for a query with group by when the soup uses json1 indexes. * * @throws JSONException */ @Test public void testCountQueryWithGroupByUsingJSON1Indexes() throws JSONException { tryCountQueryWithGroupBy(Type.json1); } private void tryCountQueryWithGroupBy(Type type) throws JSONException { // Before Assert.assertFalse("Soup third_test_soup should not exist", store.hasSoup(THIRD_TEST_SOUP)); // Register registerSoup(store, THIRD_TEST_SOUP, new IndexSpec[] { new IndexSpec("key", type), new IndexSpec("value", type) }); Assert.assertTrue("Register soup call failed", store.hasSoup(THIRD_TEST_SOUP)); JSONObject soupElt1 = new JSONObject("{'key':'a', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'b', 'value':'va1'}"); JSONObject soupElt3 = new JSONObject("{'key':'c', 'value':'va2'}"); JSONObject soupElt4 = new JSONObject("{'key':'d', 'value':'va3'}"); JSONObject soupElt5 = new JSONObject("{'key':'e', 'value':'va3'}"); store.create(THIRD_TEST_SOUP, soupElt1); store.create(THIRD_TEST_SOUP, soupElt2); store.create(THIRD_TEST_SOUP, soupElt3); store.create(THIRD_TEST_SOUP, soupElt4); store.create(THIRD_TEST_SOUP, soupElt5); final String smartSql = "SELECT {" + THIRD_TEST_SOUP + ":value}, count(*) FROM {" + THIRD_TEST_SOUP + "} GROUP BY {" + THIRD_TEST_SOUP + ":value} ORDER BY {" + THIRD_TEST_SOUP + ":value}"; final QuerySpec querySpec = QuerySpec.buildSmartQuerySpec(smartSql, 25); final JSONArray result = store.query(querySpec, 0); Assert.assertNotNull("Result should not be null", result); Assert.assertEquals("Three results expected", 3, result.length()); JSONTestHelper.assertSameJSON("Wrong result for query", new JSONArray("[['va1', 2], ['va2', 1], ['va3', 2]]"), result); final int count = store.countQuery(querySpec); Assert.assertEquals("Incorrect count query", "SELECT count(*) FROM (" + smartSql + ")", querySpec.countSmartSql); Assert.assertEquals("Incorrect count", 3, count); } /** * Test to verify proper indexing of integer and longs */ @Test public void testIntegerIndexedField() throws JSONException { registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.integer) }); tryNumber(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE); tryNumber(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE); tryNumber(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE); tryNumber(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE); tryNumber(Type.integer, Double.MIN_VALUE, (long) Double.MIN_VALUE); tryNumber(Type.integer, Double.MAX_VALUE, (long) Double.MAX_VALUE); } /** * Test to verify proper indexing of doubles */ @Test public void testFloatingIndexedField() throws JSONException { registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.floating) }); tryNumber(Type.floating, Integer.MIN_VALUE, (double) Integer.MIN_VALUE); tryNumber(Type.floating, Integer.MAX_VALUE, (double) Integer.MAX_VALUE); tryNumber(Type.floating, Long.MIN_VALUE, (double) Long.MIN_VALUE); tryNumber(Type.floating, Long.MIN_VALUE, (double) Long.MIN_VALUE); tryNumber(Type.floating, Double.MIN_VALUE, Double.MIN_VALUE); tryNumber(Type.floating, Double.MAX_VALUE, Double.MAX_VALUE); } /** * Helper method for testIntegerIndexedField and testFloatingIndexedField * Insert soup element with number and check db * @param fieldType * @param valueIn * @param valueOut * @throws JSONException */ private void tryNumber(Type fieldType, Number valueIn, Number valueOut) throws JSONException { JSONObject elt = new JSONObject(); elt.put("amount", valueIn); Long id = store.upsert(FOURTH_TEST_SOUP, elt).getLong(SmartStore.SOUP_ENTRY_ID); Cursor c = null; try { final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); String soupTableName = getSoupTableName(FOURTH_TEST_SOUP); String amountColumnName = store.getSoupIndexSpecs(FOURTH_TEST_SOUP)[0].columnName; c = DBHelper.getInstance(db).query(db, soupTableName, new String[] { amountColumnName }, null, null, "id = " + id); Assert.assertTrue("Expected a soup element", c.moveToFirst()); Assert.assertEquals("Expected one soup element", 1, c.getCount()); if (fieldType == Type.integer) Assert.assertEquals("Not the value expected", valueOut.longValue(), c.getLong(0)); else if (fieldType == Type.floating) Assert.assertEquals("Not the value expected", valueOut.doubleValue(), c.getDouble(0), 0); } finally { safeClose(c); } } /** * Test using smart sql to retrieve integer indexed fields */ @Test public void testIntegerIndexedFieldWithSmartSql() throws JSONException { registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.integer) }); tryNumberWithSmartSql(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE); tryNumberWithSmartSql(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE); tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE); tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE); tryNumberWithSmartSql(Type.integer, Double.MIN_VALUE, (long) Double.MIN_VALUE); tryNumberWithSmartSql(Type.integer, Double.MAX_VALUE, (long) Double.MAX_VALUE); } /** * Test using smart sql to retrieve indexed fields holding doubles * NB smart sql will return a long when querying a double field that contains a long */ @Test public void testFloatingIndexedFieldWithSmartSql() throws JSONException { registerSoup(store, FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.floating) }); tryNumberWithSmartSql(Type.floating, Integer.MIN_VALUE, Integer.MIN_VALUE); tryNumberWithSmartSql(Type.floating, Integer.MAX_VALUE, Integer.MAX_VALUE); tryNumberWithSmartSql(Type.floating, Long.MIN_VALUE, Long.MIN_VALUE); tryNumberWithSmartSql(Type.floating, Long.MIN_VALUE, Long.MIN_VALUE); tryNumberWithSmartSql(Type.floating, Double.MIN_VALUE, Double.MIN_VALUE); tryNumberWithSmartSql(Type.floating, Double.MAX_VALUE, Double.MAX_VALUE); } /** * Test using smart sql to retrieve number fields indexed with json1 */ @Test public void testNumberFieldWithJSON1IndexWithSmartSql() throws JSONException { store.registerSoup(FOURTH_TEST_SOUP, new IndexSpec[] { new IndexSpec("amount", Type.json1) }); tryNumberWithSmartSql(Type.integer, Integer.MIN_VALUE, Integer.MIN_VALUE); tryNumberWithSmartSql(Type.integer, Integer.MAX_VALUE, Integer.MAX_VALUE); tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE); tryNumberWithSmartSql(Type.integer, Long.MIN_VALUE, Long.MIN_VALUE); tryNumberWithSmartSql(Type.floating, Math.PI, Math.PI); } /** * Helper method for testIntegerIndexedFieldWithSmartSql and testFloatingIndexedFieldWithSmartSql * Insert soup element with number and retrieve it back using smartsql * @param fieldType * @param valueIn * @param valueOut * @throws JSONException */ private void tryNumberWithSmartSql(Type fieldType, Number valueIn, Number valueOut) throws JSONException { String smartSql = "SELECT {" + FOURTH_TEST_SOUP + ":amount} FROM {" + FOURTH_TEST_SOUP + "} WHERE {" + FOURTH_TEST_SOUP + ":_soupEntryId} = "; JSONObject elt = new JSONObject(); elt.put("amount", valueIn); Long id = store.upsert(FOURTH_TEST_SOUP, elt).getLong(SmartStore.SOUP_ENTRY_ID); Number actualValueOut = (Number) store.query(QuerySpec.buildSmartQuerySpec(smartSql + id, 1), 0).getJSONArray(0).get(0); if (fieldType == Type.integer) Assert.assertEquals("Not the value expected", valueOut.longValue(), actualValueOut.longValue()); else if (fieldType == Type.floating) Assert.assertEquals("Not the value expected", valueOut.doubleValue(), actualValueOut.doubleValue(), 0); } /** * Test for getDatabaseSize * * @throws JSONException */ @Test public void testGetDatabaseSize() throws JSONException { int initialSize = store.getDatabaseSize(); for (int i=0; i<100; i++) { JSONObject soupElt = new JSONObject("{'key':'abcd" + i + "', 'value':'va" + i + "', 'otherValue':'ova" + i + "'}"); store.create(TEST_SOUP, soupElt); } Assert.assertTrue("Database should be larger now", store.getDatabaseSize() > initialSize); } /** * Test registerSoup with json1 indexes * Register soup with multiple json1 indexes and a string index, check the underlying table and indexes in the database */ @Test public void testRegisterSoupWithJSON1() throws JSONException { Assert.assertFalse("Soup other_test_soup should not exist", store.hasSoup(OTHER_TEST_SOUP)); store.registerSoup(OTHER_TEST_SOUP, new IndexSpec[] {new IndexSpec("lastName", Type.json1), new IndexSpec("address.city", Type.json1), new IndexSpec("address.zipcode", Type.string)}); Assert.assertTrue("Register soup call failed", store.hasSoup(OTHER_TEST_SOUP)); // Check columns of soup table String soupTableName = getSoupTableName(OTHER_TEST_SOUP); checkColumns(soupTableName, Arrays.asList(new String[] {"id", "soup", "created", "lastModified", soupTableName + "_2"})); // Check soup indexes final IndexSpec[] indexSpecs = store.getSoupIndexSpecs(OTHER_TEST_SOUP); Assert.assertEquals("Wrong path", "lastName", indexSpecs[0].path); Assert.assertEquals("Wrong type", Type.json1, indexSpecs[0].type); Assert.assertEquals("Wrong column name", "json_extract(soup, '$.lastName')", indexSpecs[0].columnName); Assert.assertEquals("Wrong path", "address.city", indexSpecs[1].path); Assert.assertEquals("Wrong type", Type.json1, indexSpecs[1].type); Assert.assertEquals("Wrong column name", "json_extract(soup, '$.address.city')", indexSpecs[1].columnName); Assert.assertEquals("Wrong path", "address.zipcode", indexSpecs[2].path); Assert.assertEquals("Wrong type", Type.string, indexSpecs[2].type); Assert.assertEquals("Wrong column name", soupTableName + "_2", indexSpecs[2].columnName); // Check db indexes checkDatabaseIndexes(soupTableName, Arrays.asList(new String[] { "CREATE INDEX " + soupTableName + "_0_idx on " + soupTableName + " ( json_extract(soup, '$.lastName') )", "CREATE INDEX " + soupTableName + "_1_idx on " + soupTableName + " ( json_extract(soup, '$.address.city') )", "CREATE INDEX " + soupTableName + "_2_idx on " + soupTableName + " ( " + soupTableName + "_2 )", "CREATE INDEX " + soupTableName + "_created_idx on " + soupTableName + " ( created )", "CREATE INDEX " + soupTableName + "_lastModified_idx on " + soupTableName + " ( lastModified )" })); } /** * Testing Delete: create multiple soup elements and alter the soup, after that delete a entry, then check them all * @throws JSONException */ @Test public void testDeleteAgainstChangedSoup() throws JSONException { //create a new soup with multiple entries JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt4 = new JSONObject("{'key':'ka4', 'value':'va4'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); JSONObject soupElt4Created = store.create(TEST_SOUP, soupElt4); //CASE 1: index spec from key to value tryAllQueryOnChangedSoupWithUpdate(TEST_SOUP, soupElt2Created, "value", new IndexSpec[]{new IndexSpec("value", Type.string)}, soupElt1Created, soupElt3Created, soupElt4Created); //CASE 2: index spec from string to json1 tryAllQueryOnChangedSoupWithUpdate(TEST_SOUP, soupElt4Created, "key", new IndexSpec[]{new IndexSpec("key", Type.json1)}, soupElt1Created, soupElt3Created); //CASE 3: add a index spec field tryAllQueryOnChangedSoupWithUpdate(TEST_SOUP, soupElt4Created, "key", new IndexSpec[]{new IndexSpec("key", Type.json1), new IndexSpec("value", Type.string)}, soupElt1Created, soupElt3Created); } protected void tryAllQueryOnChangedSoupWithUpdate(String soupName, JSONObject deletedEntry, String orderPath, IndexSpec[] newIndexSpecs, JSONObject... expectedResults) throws JSONException { //alert the soup store.alterSoup(soupName, newIndexSpecs, true); //delete an entry store.delete(soupName, idOf(deletedEntry)); // Query all - small page runQueryCheckResultsAndExplainPlan(soupName, QuerySpec.buildAllQuerySpec(soupName, orderPath, Order.ascending, 5), 0, false, "SCAN", expectedResults); } /** * Testing Upsert: create multiple soup elements and alter the soup, after that upsert a entry, then check them all * @throws JSONException */ @Test public void testUpsertAgainstChangedSoup() throws JSONException { //create a new soup with multiple entries JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka2', 'value':'va2'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka3', 'value':'va3'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); JSONObject soupElt1ForUpsert = new JSONObject("{'key':'ka1u', 'value':'va1u'}"); JSONObject soupElt2ForUpsert = new JSONObject("{'key':'ka2u', 'value':'va2u'}"); JSONObject soupElt3ForUpsert = new JSONObject("{'key':'ka3u', 'value':'va3u'}"); //CASE 1: index spec from key to value store.alterSoup(TEST_SOUP, new IndexSpec[]{new IndexSpec("value", Type.string)}, true); //upsert an entry JSONObject soupElt1Upserted = store.upsert(TEST_SOUP, soupElt1ForUpsert); // Query all - small page runQueryCheckResultsAndExplainPlan(TEST_SOUP, QuerySpec.buildAllQuerySpec(TEST_SOUP, "value", Order.ascending, 10), 0, false, "SCAN", soupElt1Created, soupElt1Upserted, soupElt2Created, soupElt3Created); //CASE 2: index spec from string to json1 store.alterSoup(TEST_SOUP, new IndexSpec[]{new IndexSpec("key", Type.json1)}, true); //upsert an entry JSONObject soupElt2Upserted = store.upsert(TEST_SOUP, soupElt2ForUpsert); // Query all - small page runQueryCheckResultsAndExplainPlan(TEST_SOUP, QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 10), 0, false, "SCAN", soupElt1Created, soupElt1Upserted, soupElt2Created, soupElt2Upserted, soupElt3Created); //CASE 3: add a index spec field store.alterSoup(TEST_SOUP, new IndexSpec[]{new IndexSpec("key", Type.json1), new IndexSpec("value", Type.string)}, true); //upsert an entry JSONObject soupElt3Upserted = store.upsert(TEST_SOUP, soupElt3ForUpsert); // Query all - small page runQueryCheckResultsAndExplainPlan(TEST_SOUP, QuerySpec.buildAllQuerySpec(TEST_SOUP, "key", Order.ascending, 10), 0, false, "SCAN", soupElt1Created, soupElt1Upserted, soupElt2Created, soupElt2Upserted, soupElt3Created, soupElt3Upserted); } /** * Testing Delete: create multiple soup elements and alter the soup, after that delete a entry, then check them all * @throws JSONException */ @Test public void testExactQueryAgainstChangedSoup() throws JSONException { //create a new soup with multiple entries JSONObject soupElt1 = new JSONObject("{'key':'ka1', 'value':'va1'}"); JSONObject soupElt2 = new JSONObject("{'key':'ka1-', 'value':'va1*'}"); JSONObject soupElt3 = new JSONObject("{'key':'ka1 ', 'value':'va1%'}"); JSONObject soupElt1Created = store.create(TEST_SOUP, soupElt1); JSONObject soupElt2Created = store.create(TEST_SOUP, soupElt2); JSONObject soupElt3Created = store.create(TEST_SOUP, soupElt3); //CASE 1: index spec from key to value tryExactQueryOnChangedSoup(TEST_SOUP, "value", "va1", new IndexSpec[]{new IndexSpec("value", Type.string)}, soupElt1Created); //CASE 2: index spec from string to json1 tryExactQueryOnChangedSoup(TEST_SOUP, "key", "ka1", new IndexSpec[]{new IndexSpec("key", Type.json1)}, soupElt1Created); //CASE 3: add a index spec field tryExactQueryOnChangedSoup(TEST_SOUP, "key", "ka1 ", new IndexSpec[]{new IndexSpec("key", Type.json1), new IndexSpec("value", Type.string)}, soupElt3Created); } protected void tryExactQueryOnChangedSoup(String soupName, String orderPath, String value, IndexSpec[] newIndexSpecs, JSONObject expectedResult) throws JSONException { // Alter the soup store.alterSoup(soupName, newIndexSpecs, true); // Exact Query runQueryCheckResultsAndExplainPlan(soupName, QuerySpec.buildExactQuerySpec(soupName, orderPath, value, null, null, 5), 0, false, "SEARCH", expectedResult); } /** * Test updateSoupNamesToAttrs */ @Test public void testUpdateTableNameAndAddColumns() { // Setup db and test values final SQLiteDatabase db = dbOpenHelper.getWritableDatabase(getEncryptionKey()); final String TEST_TABLE = "test_table"; final String NEW_TEST_TABLE = "new_test_table"; final String NEW_COLUMN = "new_column"; db.execSQL("CREATE TABLE " + TEST_TABLE + " (id INTEGER PRIMARY KEY)"); // Ensure old table doesn't already exist Cursor cursor = db.query("sqlite_master", new String[] { "sql" }, "name = ?", new String[] { NEW_TEST_TABLE }, null, null, null); Assert.assertEquals("New table should not already be in db.", 0, cursor.getCount()); cursor.close(); // Test table renamed and column added SmartStore.updateTableNameAndAddColumns(db, TEST_TABLE, NEW_TEST_TABLE, new String[] { NEW_COLUMN }); // Ensure new table has replaced old table cursor = db.query("sqlite_master", new String[] { "sql" }, "name = ?", new String[] { NEW_TEST_TABLE }, null, null, null); cursor.moveToFirst(); String schema = cursor.getString(0); cursor.close(); Assert.assertTrue("New table not found", schema.contains(NEW_TEST_TABLE)); Assert.assertTrue("New column not found", schema.contains(NEW_COLUMN)); // Clean up db.execSQL("DROP TABLE " + NEW_TEST_TABLE); } /** * Ensure correct soup spec is returned from getSoupSpec */ @Test public void testGetSoupSpec() throws JSONException { final String SOUP_SPEC_TEST = "soup_spec_test"; IndexSpec[] indexSpecs = new IndexSpec[] {new IndexSpec("index", Type.string)}; SoupSpec TEST_SPEC = new SoupSpec(SOUP_SPEC_TEST, SoupSpec.FEATURE_EXTERNAL_STORAGE); store.registerSoupWithSpec(TEST_SPEC, indexSpecs); // Act SoupSpec result = store.getSoupSpec(TEST_SPEC.getSoupName()); // Verify the result Assert.assertEquals("Soup name in soup spec is incorrect", SOUP_SPEC_TEST, result.getSoupName()); Assert.assertEquals("Feature set in soup spec is incorrect", SoupSpec.FEATURE_EXTERNAL_STORAGE, result.getFeatures().get(0)); // Verify JSON form Assert.assertEquals("Soup name in json of soup spec is incorrect", SOUP_SPEC_TEST, result.toJSON().getString("name")); Assert.assertEquals("Feature set in json of soup spec is incorrect", SoupSpec.FEATURE_EXTERNAL_STORAGE, result.toJSON().getJSONArray("features").get(0)); } }
1
17,255
We've had that test on iOS for years.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -37,7 +37,7 @@ namespace BenchmarkDotNet.Extensions .DontEnforcePowerPlan(); // make sure BDN does not try to enforce High Performance power plan on Windows // See https://github.com/dotnet/roslyn/issues/42393 - job = job.WithArguments(new Argument[] { new MsBuildArgument("/p:DebugType=portable") }); + job = job.WithArguments(new Argument[] { new MsBuildArgument("/p:DebugType=portable"), new MsBuildArgument("-bl:benchmarkdotnet.binlog") }); } var config = ManualConfig.CreateEmpty()
1
using System.Collections.Immutable; using System.IO; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using BenchmarkDotNet.Diagnosers; using BenchmarkDotNet.Exporters.Json; using Perfolizer.Horology; using BenchmarkDotNet.Jobs; using BenchmarkDotNet.Reports; using System.Collections.Generic; using Reporting; using BenchmarkDotNet.Loggers; using System.Linq; using BenchmarkDotNet.Exporters; namespace BenchmarkDotNet.Extensions { public static class RecommendedConfig { public static IConfig Create( DirectoryInfo artifactsPath, ImmutableHashSet<string> mandatoryCategories, int? partitionCount = null, int? partitionIndex = null, List<string> exclusionFilterValue = null, List<string> categoryExclusionFilterValue = null, Job job = null, bool getDiffableDisasm = false) { if (job is null) { job = Job.Default .WithWarmupCount(1) // 1 warmup is enough for our purpose .WithIterationTime(TimeInterval.FromMilliseconds(250)) // the default is 0.5s per iteration, which is slighlty too much for us .WithMinIterationCount(15) .WithMaxIterationCount(20) // we don't want to run more that 20 iterations .DontEnforcePowerPlan(); // make sure BDN does not try to enforce High Performance power plan on Windows // See https://github.com/dotnet/roslyn/issues/42393 job = job.WithArguments(new Argument[] { new MsBuildArgument("/p:DebugType=portable") }); } var config = ManualConfig.CreateEmpty() .AddLogger(ConsoleLogger.Default) // log output to console .AddValidator(DefaultConfig.Instance.GetValidators().ToArray()) // copy default validators .AddAnalyser(DefaultConfig.Instance.GetAnalysers().ToArray()) // copy default analysers .AddExporter(MarkdownExporter.GitHub) // export to GitHub markdown .AddColumnProvider(DefaultColumnProviders.Instance) // display default columns (method name, args etc) .AddJob(job.AsDefault()) // tell BDN that this are our default settings .WithArtifactsPath(artifactsPath.FullName) .AddDiagnoser(MemoryDiagnoser.Default) // MemoryDiagnoser is enabled by default .AddFilter(new PartitionFilter(partitionCount, partitionIndex)) .AddFilter(new ExclusionFilter(exclusionFilterValue)) .AddFilter(new CategoryExclusionFilter(categoryExclusionFilterValue)) .AddExporter(JsonExporter.Full) // make sure we export to Json .AddColumn(StatisticColumn.Median, StatisticColumn.Min, StatisticColumn.Max) .AddValidator(TooManyTestCasesValidator.FailOnError) .AddValidator(new UniqueArgumentsValidator()) // don't allow for duplicated arguments #404 .AddValidator(new MandatoryCategoryValidator(mandatoryCategories)) .WithSummaryStyle(SummaryStyle.Default.WithMaxParameterColumnWidth(36)); // the default is 20 and trims too aggressively some benchmark results if (Reporter.CreateReporter().InLab) { config = config.AddExporter(new PerfLabExporter()); } if (getDiffableDisasm) { config = config.AddDiagnoser(CreateDisassembler()); } return config; } private static DisassemblyDiagnoser CreateDisassembler() => new DisassemblyDiagnoser(new DisassemblyDiagnoserConfig( maxDepth: 1, // TODO: is depth == 1 enough? formatter: null, // TODO: enable diffable format printSource: false, // we are not interested in getting C# printInstructionAddresses: false, // would make the diffing hard, however could be useful to determine alignment exportGithubMarkdown: false, exportHtml: false, exportCombinedDisassemblyReport: false, exportDiff: false)); } }
1
12,516
This is for creating binlog file for building bdn generated template project. I feel that it's may be helpful in general for bdn diagnostic purpose.
dotnet-performance
.cs
@@ -359,6 +359,18 @@ class QuteProc(testprocess.Process): else: self.send_cmd(':open ' + url) + def open_url(self, url, *, new_tab=False, new_window=False): + """Open the given url in qutebrowser.""" + if new_tab and new_window: + raise ValueError("new_tab and new_window given!") + + if new_tab: + self.send_cmd(':open -t ' + url) + elif new_window: + self.send_cmd(':open -w ' + url) + else: + self.send_cmd(':open ' + url) + def mark_expected(self, category=None, loglevel=None, message=None): """Mark a given logging message as expected.""" line = self.wait_for(category=category, loglevel=loglevel,
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Fixtures to run qutebrowser in a QProcess and communicate.""" import os import re import sys import time import os.path import datetime import logging import tempfile import contextlib import itertools import yaml import pytest from PyQt5.QtCore import pyqtSignal, QUrl import testprocess from qutebrowser.misc import ipc from qutebrowser.utils import log, utils from helpers import utils as testutils instance_counter = itertools.count() def is_ignored_qt_message(message): """Check if the message is listed in qt_log_ignore.""" # pylint: disable=no-member # WORKAROUND for https://bitbucket.org/logilab/pylint/issues/717/ # we should switch to generated-members after that regexes = pytest.config.getini('qt_log_ignore') for regex in regexes: if re.match(regex, message): return True return False class LogLine(testprocess.Line): """A parsed line from the qutebrowser log output. Attributes: timestamp/loglevel/category/module/function/line/message: Parsed from the log output. expected: Whether the message was expected or not. """ LOG_RE = re.compile(r""" (?P<timestamp>\d\d:\d\d:\d\d) \ (?P<loglevel>VDEBUG|DEBUG|INFO|WARNING|ERROR) \ +(?P<category>\w+) \ +(?P<module>(\w+|Unknown\ module)): (?P<function>[^"][^:]*|"[^"]+"): (?P<line>\d+) \ (?P<message>.+) """, re.VERBOSE) def __init__(self, data): super().__init__(data) match = self.LOG_RE.match(data) if match is None: raise testprocess.InvalidLine(data) self.timestamp = datetime.datetime.strptime(match.group('timestamp'), '%H:%M:%S') loglevel = match.group('loglevel') if loglevel == 'VDEBUG': self.loglevel = log.VDEBUG_LEVEL else: self.loglevel = getattr(logging, loglevel) self.category = match.group('category') module = match.group('module') if module == 'Unknown module': self.module = None else: self.module = module function = match.group('function') if function == 'none': self.function = None else: self.function = function.strip('"') line = int(match.group('line')) if self.function is None and line == 0: self.line = None else: self.line = line msg_match = re.match(r'^(\[(?P<prefix>\d+s ago)\] )?(?P<message>.*)', match.group('message')) self.prefix = msg_match.group('prefix') self.message = msg_match.group('message') self.expected = is_ignored_qt_message(self.message) class QuteProc(testprocess.Process): """A running qutebrowser process used for tests. Attributes: _delay: Delay to wait between commands. _ipc_socket: The IPC socket of the started instance. _httpbin: The HTTPBin webserver. basedir: The base directory for this instance. _focus_ready: Whether the main window got focused. _load_ready: Whether the about:blank page got loaded. _profile: If True, do profiling of the subprocesses. _instance_id: An unique ID for this QuteProc instance _run_counter: A counter to get an unique ID for each run. Signals: got_error: Emitted when there was an error log line. """ got_error = pyqtSignal() KEYS = ['timestamp', 'loglevel', 'category', 'module', 'function', 'line', 'message'] def __init__(self, httpbin, delay, *, profile=False, parent=None): super().__init__(parent) self._profile = profile self._delay = delay self._httpbin = httpbin self._ipc_socket = None self.basedir = None self._focus_ready = False self._load_ready = False self._instance_id = next(instance_counter) self._run_counter = itertools.count() def _is_ready(self, what): """Called by _parse_line if loading/focusing is done. When both are done, emits the 'ready' signal. """ if what == 'load': self._load_ready = True elif what == 'focus': self._focus_ready = True else: raise ValueError("Invalid value {!r} for 'what'.".format(what)) if self._load_ready and self._focus_ready: self.ready.emit() def _parse_line(self, line): try: log_line = LogLine(line) except testprocess.InvalidLine: if line.startswith(' '): # Multiple lines in some log output... return None elif not line.strip(): return None elif is_ignored_qt_message(line): return None else: raise self._log(line) start_okay_message_load = ( "load status for <qutebrowser.browser.webview.WebView tab_id=0 " "url='about:blank'>: LoadStatus.success") start_okay_message_focus = ( "Focus object changed: <qutebrowser.browser.webview.WebView " "tab_id=0 url='about:blank'>") if (log_line.category == 'ipc' and log_line.message.startswith("Listening as ")): self._ipc_socket = log_line.message.split(' ', maxsplit=2)[2] elif (log_line.category == 'webview' and log_line.message == start_okay_message_load): self._is_ready('load') elif (log_line.category == 'misc' and log_line.message == start_okay_message_focus): self._is_ready('focus') elif (log_line.category == 'init' and log_line.module == 'standarddir' and log_line.function == 'init' and log_line.message.startswith('Base directory:')): self.basedir = log_line.message.split(':', maxsplit=1)[1].strip() elif self._is_error_logline(log_line): self.got_error.emit() return log_line def _executable_args(self): if hasattr(sys, 'frozen'): if self._profile: raise Exception("Can't profile with sys.frozen!") executable = os.path.join(os.path.dirname(sys.executable), 'qutebrowser') args = [] else: executable = sys.executable if self._profile: profile_dir = os.path.join(os.getcwd(), 'prof') profile_id = '{}_{}'.format(self._instance_id, next(self._run_counter)) profile_file = os.path.join(profile_dir, '{}.pstats'.format(profile_id)) try: os.mkdir(profile_dir) except FileExistsError: pass args = [os.path.join('scripts', 'dev', 'run_profile.py'), '--profile-tool', 'none', '--profile-file', profile_file] else: args = ['-m', 'qutebrowser'] return executable, args def _default_args(self): return ['--debug', '--no-err-windows', '--temp-basedir', 'about:blank'] def path_to_url(self, path, *, port=None, https=False): """Get a URL based on a filename for the localhost webserver. URLs like about:... and qute:... are handled specially and returned verbatim. """ if path.startswith('about:') or path.startswith('qute:'): return path else: return '{}://localhost:{}/{}'.format( 'https' if https else 'http', self._httpbin.port if port is None else port, path if path != '/' else '') def wait_for_js(self, message): """Wait for the given javascript console message.""" self.wait_for(category='js', function='javaScriptConsoleMessage', message='[*] {}'.format(message)) def _is_error_logline(self, msg): """Check if the given LogLine is some kind of error message.""" is_js_error = (msg.category == 'js' and msg.function == 'javaScriptConsoleMessage' and testutils.pattern_match(pattern='[*] [FAIL] *', value=msg.message)) # Try to complain about the most common mistake when accidentally # loading external resources. is_ddg_load = testutils.pattern_match( pattern="load status for <qutebrowser.browser.webview.WebView " "tab_id=* url='*duckduckgo*'>: *", value=msg.message) return msg.loglevel > logging.INFO or is_js_error or is_ddg_load def _maybe_skip(self): """Skip the test if [SKIP] lines were logged.""" skip_texts = [] for msg in self._data: if (msg.category == 'js' and msg.function == 'javaScriptConsoleMessage' and testutils.pattern_match(pattern='[*] [SKIP] *', value=msg.message)): skip_texts.append(msg.message.partition(' [SKIP] ')[2]) if skip_texts: pytest.skip(', '.join(skip_texts)) def after_test(self, did_fail): # pylint: disable=arguments-differ """Handle unexpected/skip logging and clean up after each test. Args: did_fail: Set if the main test failed already, then logged errors are ignored. """ __tracebackhide__ = True bad_msgs = [msg for msg in self._data if self._is_error_logline(msg) and not msg.expected] if did_fail: super().after_test() return try: if bad_msgs: text = 'Logged unexpected errors:\n\n' + '\n'.join( str(e) for e in bad_msgs) # We'd like to use pytrace=False here but don't as a WORKAROUND # for https://github.com/pytest-dev/pytest/issues/1316 pytest.fail(text) else: self._maybe_skip() finally: super().after_test() def send_cmd(self, command, count=None): """Send a command to the running qutebrowser instance.""" assert self._ipc_socket is not None time.sleep(self._delay / 1000) if count is not None: command = ':{}:{}'.format(count, command.lstrip(':')) ipc.send_to_running_instance(self._ipc_socket, [command], target_arg='') self.wait_for(category='commands', module='command', function='run', message='command called: *') def get_setting(self, sect, opt): """Get the value of a qutebrowser setting.""" self.send_cmd(':set {} {}?'.format(sect, opt)) msg = self.wait_for(loglevel=logging.INFO, category='message', message='{} {} = *'.format(sect, opt)) return msg.message.split(' = ')[1] def set_setting(self, sect, opt, value): self.send_cmd(':set "{}" "{}" "{}"'.format(sect, opt, value)) self.wait_for(category='config', message='Config option changed: *') @contextlib.contextmanager def temp_setting(self, sect, opt, value): """Context manager to set a setting and reset it on exit.""" old_value = self.get_setting(sect, opt) self.set_setting(sect, opt, value) yield self.set_setting(sect, opt, old_value) def open_path(self, path, *, new_tab=False, new_window=False, port=None, https=False): """Open the given path on the local webserver in qutebrowser.""" if new_tab and new_window: raise ValueError("new_tab and new_window given!") url = self.path_to_url(path, port=port, https=https) if new_tab: self.send_cmd(':open -t ' + url) elif new_window: self.send_cmd(':open -w ' + url) else: self.send_cmd(':open ' + url) def mark_expected(self, category=None, loglevel=None, message=None): """Mark a given logging message as expected.""" line = self.wait_for(category=category, loglevel=loglevel, message=message) line.expected = True def wait_for_load_finished(self, path, *, port=None, https=False, timeout=None, load_status='success'): """Wait until any tab has finished loading.""" if timeout is None: if 'CI' in os.environ: timeout = 15000 else: timeout = 5000 url = self.path_to_url(path, port=port, https=https) # We really need the same representation that the webview uses in its # __repr__ url = utils.elide(QUrl(url).toDisplayString(QUrl.EncodeUnicode), 100) pattern = re.compile( r"(load status for <qutebrowser\.browser\.webview\.WebView " r"tab_id=\d+ url='{url}/?'>: LoadStatus\.{load_status}|fetch: " r"PyQt5\.QtCore\.QUrl\('{url}'\) -> .*)".format( load_status=re.escape(load_status), url=re.escape(url))) self.wait_for(message=pattern, timeout=timeout) def get_session(self): """Save the session and get the parsed session data.""" with tempfile.TemporaryDirectory() as tmpdir: session = os.path.join(tmpdir, 'session.yml') self.send_cmd(':session-save "{}"'.format(session)) self.wait_for(category='message', loglevel=logging.INFO, message='Saved session {}.'.format(session)) with open(session, encoding='utf-8') as f: data = f.read() self._log(data) return yaml.load(data) def get_content(self, plain=True): """Get the contents of the current page.""" with tempfile.TemporaryDirectory() as tmpdir: path = os.path.join(tmpdir, 'page') if plain: self.send_cmd(':debug-dump-page --plain "{}"'.format(path)) else: self.send_cmd(':debug-dump-page "{}"'.format(path)) self.wait_for(category='message', loglevel=logging.INFO, message='Dumped page to {}.'.format(path)) with open(path, 'r', encoding='utf-8') as f: return f.read() def press_keys(self, keys): """Press the given keys using :fake-key.""" self.send_cmd(':fake-key -g "{}"'.format(keys)) @pytest.yield_fixture(scope='module') def quteproc_process(qapp, httpbin, request): """Fixture for qutebrowser process which is started once per file.""" delay = request.config.getoption('--qute-delay') profile = request.config.getoption('--qute-profile-subprocs') proc = QuteProc(httpbin, delay, profile=profile) proc.start() yield proc proc.terminate() @pytest.yield_fixture def quteproc(quteproc_process, httpbin, request): """Per-test qutebrowser fixture which uses the per-file process.""" request.node._quteproc_log = quteproc_process.captured_log quteproc_process.before_test() yield quteproc_process quteproc_process.after_test(did_fail=request.node.rep_call.failed) @pytest.yield_fixture def quteproc_new(qapp, httpbin, request): """Per-test qutebrowser process to test invocations.""" delay = request.config.getoption('--qute-delay') profile = request.config.getoption('--qute-profile-subprocs') proc = QuteProc(httpbin, delay, profile=profile) request.node._quteproc_log = proc.captured_log # Not calling before_test here as that would start the process yield proc proc.after_test(did_fail=request.node.rep_call.failed)
1
14,378
Can you adjust `open_path` to simply call `path_to_url` and then `open_url` instead of duplicating the code?
qutebrowser-qutebrowser
py
@@ -24,10 +24,10 @@ import ( var logGraphsyncFetcher = logging.Logger("net.graphsync_fetcher") const ( - // Timeout for a single graphsync request (which may be for many blocks). - // We might prefer this timeout to scale with the number of blocks expected in the fetch, - // when that number is large. - requestTimeout = 60 * time.Second + // Timeout for a single graphsync request getting "stuck" + // -- if no more responses are received for a period greater than this, + // we will assume the request has hung-up and cancel it + unresponsiveTimeout = 10 * time.Second ) // Fetcher defines an interface that may be used to fetch data from the network.
1
package net import ( "context" "fmt" "time" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipfs/go-graphsync" bstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log" "github.com/ipld/go-ipld-prime" ipldfree "github.com/ipld/go-ipld-prime/impl/free" cidlink "github.com/ipld/go-ipld-prime/linking/cid" selectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder" "github.com/libp2p/go-libp2p-core/peer" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/consensus" "github.com/filecoin-project/go-filecoin/types" ) var logGraphsyncFetcher = logging.Logger("net.graphsync_fetcher") const ( // Timeout for a single graphsync request (which may be for many blocks). // We might prefer this timeout to scale with the number of blocks expected in the fetch, // when that number is large. requestTimeout = 60 * time.Second ) // Fetcher defines an interface that may be used to fetch data from the network. type Fetcher interface { // FetchTipSets will only fetch TipSets that evaluate to `false` when passed to `done`, // this includes the provided `ts`. The TipSet that evaluates to true when // passed to `done` will be in the returned slice. The returns slice of TipSets is in Traversal order. FetchTipSets(context.Context, types.TipSetKey, peer.ID, func(types.TipSet) (bool, error)) ([]types.TipSet, error) } // interface conformance check var _ Fetcher = (*GraphSyncFetcher)(nil) // GraphExchange is an interface wrapper to Graphsync so it can be stubbed in // unit testing type GraphExchange interface { Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node) (<-chan graphsync.ResponseProgress, <-chan error) } type graphsyncFallbackPeerTracker interface { List() []*types.ChainInfo Self() peer.ID } // GraphSyncFetcher is used to fetch data over the network. It is implemented // using a Graphsync exchange to fetch tipsets recursively type GraphSyncFetcher struct { exchange GraphExchange validator consensus.SyntaxValidator store bstore.Blockstore ssb selectorbuilder.SelectorSpecBuilder peerTracker graphsyncFallbackPeerTracker } // NewGraphSyncFetcher returns a GraphsyncFetcher wired up to the input Graphsync exchange and // attached local blockservice for reloading blocks in memory once they are returned func NewGraphSyncFetcher(ctx context.Context, exchange GraphExchange, blockstore bstore.Blockstore, bv consensus.SyntaxValidator, pt graphsyncFallbackPeerTracker) *GraphSyncFetcher { gsf := &GraphSyncFetcher{ store: blockstore, validator: bv, exchange: exchange, ssb: selectorbuilder.NewSelectorSpecBuilder(ipldfree.NodeBuilder()), peerTracker: pt, } return gsf } // Graphsync can fetch a fixed number of tipsets from a remote peer recursively // with a single request. We don't know until we get all of the response whether // our final tipset was included in the response // // When fetching tipsets we try to balance performance for two competing cases: // - an initial chain sync that is likely to fetch lots and lots of tipsets // - a future update sync that is likely to fetch only a few // // To do this, the Graphsync fetcher starts fetching a single tipset at a time, // then gradually ramps up to fetch lots of tipsets at once, up to a fixed limit // // The constants below determine the maximum number of tipsets fetched at once // (maxRecursionDepth) and how fast the ramp up is (recursionMultipler) const maxRecursionDepth = 64 const recursionMultiplier = 4 // FetchTipSets gets Tipsets starting from the given tipset key and continuing until // the done function returns true or errors // // For now FetchTipSets operates in two parts: // 1. It fetches relevant blocks through Graphsync, which writes them to the block store // 2. It reads them from the block store and validates their syntax as blocks // and constructs a tipset // This does have a potentially unwanted side effect of writing blocks to the block store // that later don't validate (bitswap actually does this as well) // // TODO: In the future, the blocks will be validated directly through graphsync as // go-filecoin migrates to the same IPLD library used by go-graphsync (go-ipld-prime) // // See: https://github.com/filecoin-project/go-filecoin/issues/3175 func (gsf *GraphSyncFetcher) FetchTipSets(ctx context.Context, tsKey types.TipSetKey, originatingPeer peer.ID, done func(types.TipSet) (bool, error)) ([]types.TipSet, error) { // We can run into issues if we fetch from an originatingPeer that we // are not already connected to so we usually ignore this value. // However if the originator is our own peer ID (i.e. this node mined // the block) then we need to fetch from ourselves to retrieve it fetchFromSelf := originatingPeer == gsf.peerTracker.Self() rpf, err := newRequestPeerFinder(gsf.peerTracker, fetchFromSelf) if err != nil { return nil, err } // fetch initial tipset startingTipset, err := gsf.fetchFirstTipset(ctx, tsKey, rpf) if err != nil { return nil, err } // fetch remaining tipsets recursively return gsf.fetchRemainingTipsets(ctx, startingTipset, rpf, done) } func (gsf *GraphSyncFetcher) fetchFirstTipset(ctx context.Context, key types.TipSetKey, rpf *requestPeerFinder) (types.TipSet, error) { blocksToFetch := key.ToSlice() for { peer := rpf.CurrentPeer() logGraphsyncFetcher.Infof("fetching initial tipset %s from peer %s", key, peer) err := gsf.fetchBlocks(ctx, blocksToFetch, peer) if err != nil { // A likely case is the peer doesn't have the tipset. When graphsync provides // this status we should quiet this log. logGraphsyncFetcher.Infof("request failed: %s", err) } var verifiedTip types.TipSet verifiedTip, blocksToFetch, err = gsf.loadAndVerify(ctx, key) if err != nil { return types.UndefTipSet, err } if len(blocksToFetch) == 0 { return verifiedTip, nil } logGraphsyncFetcher.Infof("incomplete fetch for initial tipset %s, trying new peer", key) // Some of the blocks may have been fetched, but avoid tricksy optimization here and just // request the whole bunch again. Graphsync internally will avoid redundant network requests. err = rpf.FindNextPeer() if err != nil { return types.UndefTipSet, errors.Wrapf(err, "fetching tipset: %s", key) } } } func (gsf *GraphSyncFetcher) fetchRemainingTipsets(ctx context.Context, startingTipset types.TipSet, rpf *requestPeerFinder, done func(types.TipSet) (bool, error)) ([]types.TipSet, error) { out := []types.TipSet{startingTipset} isDone, err := done(startingTipset) if err != nil { return nil, err } // fetch remaining tipsets recursively recursionDepth := 1 anchor := startingTipset // The tipset above the one we actually want to fetch. for !isDone { // Because a graphsync query always starts from a single CID, // we fetch tipsets anchored from any block in the last (i.e. highest) tipset and // recursively fetching sets of parents. childBlock := anchor.At(0) peer := rpf.CurrentPeer() logGraphsyncFetcher.Infof("fetching chain from height %d, block %s, peer %s, %d levels", childBlock.Height, childBlock.Cid(), peer, recursionDepth) err := gsf.fetchBlocksRecursively(ctx, childBlock.Cid(), peer, recursionDepth) if err != nil { // something went wrong in a graphsync request, but we want to keep trying other peers, so // just log error logGraphsyncFetcher.Infof("request failed, trying another peer: %s", err) } var incomplete []cid.Cid for i := 0; !isDone && i < recursionDepth; i++ { tsKey, err := anchor.Parents() if err != nil { return nil, err } var verifiedTip types.TipSet verifiedTip, incomplete, err = gsf.loadAndVerify(ctx, tsKey) if err != nil { return nil, err } if len(incomplete) == 0 { out = append(out, verifiedTip) isDone, err = done(verifiedTip) if err != nil { return nil, err } anchor = verifiedTip } else { logGraphsyncFetcher.Infof("incomplete fetch for tipset %s, trying new peer", tsKey) err := rpf.FindNextPeer() if err != nil { return nil, errors.Wrapf(err, "fetching tipset: %s", tsKey) } break // Stop verifying, make another fetch } } if len(incomplete) == 0 && recursionDepth < maxRecursionDepth { recursionDepth *= recursionMultiplier } } return out, nil } // fetchBlocks requests a single set of cids as individual blocks, fetching // non-recursively func (gsf *GraphSyncFetcher) fetchBlocks(ctx context.Context, cids []cid.Cid, targetPeer peer.ID) error { selector := gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) { efsb.Insert("messages", gsf.ssb.Matcher()) efsb.Insert("messageReceipts", gsf.ssb.Matcher()) }).Node() errChans := make([]<-chan error, 0, len(cids)) requestCtx, requestCancel := context.WithTimeout(ctx, requestTimeout) defer requestCancel() for _, c := range cids { _, errChan := gsf.exchange.Request(requestCtx, targetPeer, cidlink.Link{Cid: c}, selector) errChans = append(errChans, errChan) } // Any of the multiple parallel requests might fail. Wait for all of them to complete, then // return any error (in this case, the last one to be received). var anyError error for _, errChan := range errChans { for err := range errChan { anyError = err } } return anyError } // fetchBlocksRecursively gets the blocks from recursionDepth ancestor tipsets // starting from baseCid. func (gsf *GraphSyncFetcher) fetchBlocksRecursively(ctx context.Context, baseCid cid.Cid, targetPeer peer.ID, recursionDepth int) error { requestCtx, requestCancel := context.WithTimeout(ctx, requestTimeout) defer requestCancel() // recursive selector to fetch n sets of parent blocks // starting from block matching base cid: // - fetch all parent blocks, with messages/receipts // - with exactly the first parent block, repeat again for its parents // - continue up to recursion depth selector := gsf.ssb.ExploreRecursive(recursionDepth, gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) { efsb.Insert("parents", gsf.ssb.ExploreUnion( gsf.ssb.ExploreAll( gsf.ssb.ExploreFields(func(efsb selectorbuilder.ExploreFieldsSpecBuilder) { efsb.Insert("messages", gsf.ssb.Matcher()) efsb.Insert("messageReceipts", gsf.ssb.Matcher()) }), ), gsf.ssb.ExploreIndex(0, gsf.ssb.ExploreRecursiveEdge()), )) })).Node() _, errChan := gsf.exchange.Request(requestCtx, targetPeer, cidlink.Link{Cid: baseCid}, selector) for err := range errChan { return err } return nil } // Loads the IPLD blocks for all blocks in a tipset, and checks for the presence of the // message and receipt list structures in the store. // Returns the tipset if complete. Otherwise it returns UndefTipSet and the CIDs of // all blocks missing either their header, messages or receipts. func (gsf *GraphSyncFetcher) loadAndVerify(ctx context.Context, key types.TipSetKey) (types.TipSet, []cid.Cid, error) { // Load the block headers that exist. incomplete := make(map[cid.Cid]struct{}) tip, err := gsf.loadTipHeaders(ctx, key, incomplete) if err != nil { return types.UndefTipSet, nil, err } err = gsf.loadAndVerifySubComponents(ctx, tip, incomplete, func(blk *types.Block) cid.Cid { return blk.Messages }, func(rawBlock blocks.Block) error { messages, err := types.DecodeMessages(rawBlock.RawData()) if err != nil { return errors.Wrapf(err, "fetched data (cid %s) was not a message collection", rawBlock.Cid().String()) } if err := gsf.validator.ValidateMessagesSyntax(ctx, messages); err != nil { return errors.Wrapf(err, "invalid messages for for message collection (cid %s)", rawBlock.Cid()) } return nil }) if err != nil { return types.UndefTipSet, nil, err } err = gsf.loadAndVerifySubComponents(ctx, tip, incomplete, func(blk *types.Block) cid.Cid { return blk.MessageReceipts }, func(rawBlock blocks.Block) error { receipts, err := types.DecodeReceipts(rawBlock.RawData()) if err != nil { return errors.Wrapf(err, "fetched data (cid %s) was not a message receipt collection", rawBlock.Cid().String()) } if err := gsf.validator.ValidateReceiptsSyntax(ctx, receipts); err != nil { return errors.Wrapf(err, "invalid receipts for for receipt collection (cid %s)", rawBlock.Cid()) } return nil }) if err != nil { return types.UndefTipSet, nil, err } if len(incomplete) > 0 { incompleteArr := make([]cid.Cid, 0, len(incomplete)) for cid := range incomplete { incompleteArr = append(incompleteArr, cid) } return types.UndefTipSet, incompleteArr, nil } return tip, nil, nil } // Loads and validates the block headers for a tipset. Returns the tipset if complete, // else the cids of blocks which are not yet stored. func (gsf *GraphSyncFetcher) loadTipHeaders(ctx context.Context, key types.TipSetKey, incomplete map[cid.Cid]struct{}) (types.TipSet, error) { rawBlocks := make([]blocks.Block, 0, key.Len()) for it := key.Iter(); !it.Complete(); it.Next() { hasBlock, err := gsf.store.Has(it.Value()) if err != nil { return types.UndefTipSet, err } if !hasBlock { incomplete[it.Value()] = struct{}{} continue } rawBlock, err := gsf.store.Get(it.Value()) if err != nil { return types.UndefTipSet, err } rawBlocks = append(rawBlocks, rawBlock) } // Validate the headers. validatedBlocks, err := sanitizeBlocks(ctx, rawBlocks, gsf.validator) if err != nil || len(validatedBlocks) == 0 { return types.UndefTipSet, err } tip, err := types.NewTipSet(validatedBlocks...) return tip, err } type getBlockComponentFn func(*types.Block) cid.Cid type verifyComponentFn func(blocks.Block) error // Loads and validates the block messages for a tipset. Returns the tipset if complete, // else the cids of blocks which are not yet stored. func (gsf *GraphSyncFetcher) loadAndVerifySubComponents(ctx context.Context, tip types.TipSet, incomplete map[cid.Cid]struct{}, getBlockComponent getBlockComponentFn, verifyComponent verifyComponentFn) error { subComponents := make([]blocks.Block, 0, tip.Len()) // Check that nested structures are also stored, recording any that are missing as incomplete. for i := 0; i < tip.Len(); i++ { blk := tip.At(i) link := getBlockComponent(blk) ok, err := gsf.store.Has(link) if err != nil { return err } if !ok { incomplete[blk.Cid()] = struct{}{} continue } rawBlock, err := gsf.store.Get(link) if err != nil { return err } subComponents = append(subComponents, rawBlock) } for _, rawBlock := range subComponents { err := verifyComponent(rawBlock) if err != nil { return err } } return nil } type requestPeerFinder struct { peerTracker graphsyncFallbackPeerTracker currentPeer peer.ID triedPeers map[peer.ID]struct{} } func newRequestPeerFinder(peerTracker graphsyncFallbackPeerTracker, fetchFromSelf bool) (*requestPeerFinder, error) { pri := &requestPeerFinder{ peerTracker: peerTracker, triedPeers: make(map[peer.ID]struct{}), } // If the new cid triggering this request came from ourselves then // the first peer to request from should be ourselves. if fetchFromSelf { pri.triedPeers[peerTracker.Self()] = struct{}{} pri.currentPeer = peerTracker.Self() return pri, nil } // Get a peer ID from the peer tracker err := pri.FindNextPeer() if err != nil { return nil, err } return pri, nil } func (pri *requestPeerFinder) CurrentPeer() peer.ID { return pri.currentPeer } func (pri *requestPeerFinder) FindNextPeer() error { chains := pri.peerTracker.List() for _, chain := range chains { if _, tried := pri.triedPeers[chain.Peer]; !tried { pri.triedPeers[chain.Peer] = struct{}{} pri.currentPeer = chain.Peer return nil } } return fmt.Errorf("Unable to find any untried peers") } func sanitizeBlocks(ctx context.Context, unsanitized []blocks.Block, validator consensus.BlockSyntaxValidator) ([]*types.Block, error) { var blocks []*types.Block for _, u := range unsanitized { block, err := types.DecodeBlock(u.RawData()) if err != nil { return nil, errors.Wrapf(err, "fetched data (cid %s) was not a block", u.Cid().String()) } if err := validator.ValidateSyntax(ctx, block); err != nil { return nil, errors.Wrapf(err, "invalid block %s", block.Cid()) } blocks = append(blocks, block) } return blocks, nil }
1
21,500
I'm curious -- do we have information on the upper bound of the delay we would expect with high probability from a peer with no network issues? My intuition is that we want to set this as low as we can reasonably get away with before we start killing productive connections. My uninformed intuition is also that 10 seconds is probably higher than we need and I'd love to know if this is wrong and 10 seconds is already pushing the limit.
filecoin-project-venus
go
@@ -44,7 +44,7 @@ public class SarOperation extends AbstractFixedCostOperation { } else { final int shiftAmountInt = shiftAmount.toInt(); - if (shiftAmountInt >= 256) { + if (shiftAmountInt >= 256 || shiftAmountInt < 0) { frame.pushStackItem(negativeNumber ? ALL_BITS : UInt256.ZERO); } else { // first perform standard shift right.
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.evm.operation; import static org.apache.tuweni.bytes.Bytes32.leftPad; import org.hyperledger.besu.evm.EVM; import org.hyperledger.besu.evm.frame.MessageFrame; import org.hyperledger.besu.evm.gascalculator.GasCalculator; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.bytes.Bytes32; import org.apache.tuweni.units.bigints.UInt256; public class SarOperation extends AbstractFixedCostOperation { private static final UInt256 ALL_BITS = UInt256.MAX_VALUE; public SarOperation(final GasCalculator gasCalculator) { super(0x1d, "SAR", 2, 1, 1, gasCalculator, gasCalculator.getVeryLowTierGasCost()); } @Override public Operation.OperationResult executeFixedCostOperation( final MessageFrame frame, final EVM evm) { Bytes shiftAmount = frame.popStackItem(); final Bytes value = leftPad(frame.popStackItem()); final boolean negativeNumber = value.get(0) < 0; if (shiftAmount.size() > 4 && (shiftAmount = shiftAmount.trimLeadingZeros()).size() > 4) { frame.pushStackItem(negativeNumber ? ALL_BITS : UInt256.ZERO); } else { final int shiftAmountInt = shiftAmount.toInt(); if (shiftAmountInt >= 256) { frame.pushStackItem(negativeNumber ? ALL_BITS : UInt256.ZERO); } else { // first perform standard shift right. Bytes result = value.shiftRight(shiftAmountInt); // if a negative number, carry through the sign. if (negativeNumber) { final Bytes32 significantBits = ALL_BITS.shiftLeft(256 - shiftAmountInt); result = result.or(significantBits); } frame.pushStackItem(result); } } return successResponse; } }
1
26,626
what happens if it's == 0
hyperledger-besu
java
@@ -27,10 +27,6 @@ module Bolt if @config['interpreters'] @config['interpreters'] = normalize_interpreters(@config['interpreters']) end - - if Bolt::Util.windows? && @config['run-as'] - raise Bolt::ValidationError, "run-as is not supported when using PowerShell" - end end end end
1
# frozen_string_literal: true require 'bolt/error' require 'bolt/config/transport/base' module Bolt class Config module Transport class Docker < Base OPTIONS = %w[ cleanup host interpreters service-url shell-command tmpdir tty ].concat(RUN_AS_OPTIONS).sort.freeze DEFAULTS = { 'cleanup' => true }.freeze private def validate super if @config['interpreters'] @config['interpreters'] = normalize_interpreters(@config['interpreters']) end if Bolt::Util.windows? && @config['run-as'] raise Bolt::ValidationError, "run-as is not supported when using PowerShell" end end end end end end
1
18,555
We could potentially log a message here instead that indicates the transport does not support `run-as` on Windows and will be ignored, just in case users expect it to and are surprised. Since it would _always_ be logged when using `--run-as` on Windows, even when the transport isn't being used, it would probably want to be either at debug or trace level.
puppetlabs-bolt
rb
@@ -4,7 +4,7 @@ import ( neturl "net/url" "strings" - "github.com/bonitoo-io/go-sql-bigquery" + bigquery "github.com/bonitoo-io/go-sql-bigquery" "github.com/go-sql-driver/mysql" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/dependencies/url"
1
package sql import ( neturl "net/url" "strings" "github.com/bonitoo-io/go-sql-bigquery" "github.com/go-sql-driver/mysql" "github.com/influxdata/flux/codes" "github.com/influxdata/flux/dependencies/url" "github.com/influxdata/flux/internal/errors" "github.com/snowflakedb/gosnowflake" ) // helper function to validate the data source url (postgres, sqlmock) / dsn (mysql, snowflake) using the URLValidator. func validateDataSource(validator url.Validator, driverName string, dataSourceName string) error { /* NOTE: some parsers don't return an error for an "empty path" (a path consisting of nothing at all, or only whitespace) - not an error as such, but here we rely on the driver implementation "doing the right thing" better not to, and flag this as an error because calling any SQL DB with an empty DSN is likely wrong. */ if strings.TrimSpace(dataSourceName) == "" { return errors.Newf(codes.Invalid, "invalid data source url: %v", "empty path supplied") } var u *neturl.URL var err error switch driverName { case "mysql": // an example is: username:password@tcp(localhost:3306)/dbname?param=value cfg, err := mysql.ParseDSN(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source dsn: %v", err) } u = &neturl.URL{ Scheme: cfg.Net, User: neturl.UserPassword(cfg.User, cfg.Passwd), Host: cfg.Addr, } case "postgres", "sqlmock": // an example for postgres data source is: postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full // this follows the URI semantics u, err = neturl.Parse(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source url: %v", err) } case "sqlite3": /* example SQLite is: file:test.db?cache=shared&mode=memory SQLite supports a superset of DSNs, including several special cases that net/url will flag as errors: :memory: file::memory: so we need to check for these, otherwise will flag as an error */ if dataSourceName == ":memory:" || dataSourceName == "file::memory:" { return nil } // we have a dsn that MIGHT be valid, so need to parse it - if it fails here, it is likely to be invalid u, err = neturl.Parse(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source url: %v", err) } case "snowflake": // an example is: username:password@accountname/dbname/testschema?warehouse=mywh cfg, err := gosnowflake.ParseDSN(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source dsn: %v", err) } u = &neturl.URL{ Scheme: cfg.Protocol, User: neturl.UserPassword(cfg.User, cfg.Password), Host: cfg.Host, } case "mssql", "sqlserver": // URL example: sqlserver://sa:mypass@localhost:1234?database=master // ADO example: server=localhost;user id=sa;database=master cfg, err := mssqlParseDSN(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source dsn: %v", err) } u = &neturl.URL{ Scheme: cfg.Scheme, User: neturl.UserPassword(cfg.User, cfg.Password), Host: cfg.Host, } case "awsathena": // an example is: s3://bucketname/?region=us-west-1&db=dbname&accessID=AKI...&secretAccessKey=NnQ7... u, err = neturl.Parse(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source url: %v", err) } case "bigquery": // an example is: bigquery://projectid/location?dataset=datasetid cfg, err := bigquery.ConfigFromConnString(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source dsn: %v", err) } u = &neturl.URL{ Scheme: "bigquery", Host: cfg.ProjectID, Path: cfg.Location, } case "hdb": // SAP HANA // an example is: hdb://user:password@host:port u, err = neturl.Parse(dataSourceName) if err != nil { return errors.Newf(codes.Invalid, "invalid data source url: %v", err) } default: return errors.Newf(codes.Invalid, "sql driver %s not supported", driverName) } if err = validator.Validate(u); err != nil { return errors.Newf(codes.Invalid, "data source did not pass url validation: %v", err) } return nil }
1
16,781
Did we need this alias? Or is it just a holdout from development?
influxdata-flux
go
@@ -72,7 +72,7 @@ class EditTest < ActiveSupport::TestCase p.update_attributes(organization_id: org.id) edit = PropertyEdit.where(target: p, key: 'organization_id').first edit.project_id.must_equal p.id - edit.organization_id.must_equal org.id + edit.value.must_equal org.id.to_s end it 'test_that_nothing_gets_filled_in_automatically_for_license_edits' do
1
require 'test_helper' class EditTest < ActiveSupport::TestCase before do @user = create(:account) @admin = create(:admin) project = create(:project, description: 'Linux') Edit.for_target(project).delete_all @edit = create(:create_edit, target: project) @previous_edit = create(:create_edit, value: '456', created_at: Time.now - 5.days, target: project) end it 'test_that_we_can_get_the_previous_value_of_an_edit' do @edit.previous_value.must_equal '456' end it 'test_that_previous_value_returns_nil_on_initial_edit' do @previous_edit.previous_value.must_equal nil end it 'test_that_undo_and_redo_work' do @edit.undo!(@admin) @edit.undone.must_equal true @edit.undoer.must_equal @admin @edit.redo!(@user) @edit.undone.must_equal false @edit.undoer.must_equal @user end it 'test_that_undo_requires_an_editor' do proc { @edit.undo!(nil) }.must_raise RuntimeError end it 'test_that_redo_requires_an_editor' do @edit.undo!(@admin) proc { @edit.undo!(nil) }.must_raise RuntimeError end it 'test_that_undo_can_only_be_called_once' do @edit.undo!(@admin) proc { @edit.undo!(@admin) }.must_raise ActsAsEditable::UndoError end it 'test_that_redo_can_only_be_called_after_an_undo' do proc { @edit.redo!(@admin) }.must_raise ActsAsEditable::UndoError end it 'test_that_project_gets_filled_in_automatically_for_project_edits' do p = create(:project) edit = CreateEdit.where(target: p).first edit.project_id.must_equal edit.target.id edit.organization_id.must_equal nil end it 'test_that_project_gets_filled_in_automatically_for_project_license_edits' do pl = create(:project_license, project: create(:project), license: create(:license)) edit = CreateEdit.where(target: pl).first edit.project_id.must_equal edit.target.project.id edit.organization_id.must_equal nil end it 'test_that_organization_gets_filled_in_automatically_for_organization_edits' do org = create(:organization) edit = CreateEdit.where(target: org).first edit.organization_id.must_equal edit.target.id edit.project_id.must_equal nil end it 'test_that_project_and_organization_get_filled_in_automatically_when_associating_project_to_an_org' do p = create(:project, organization: nil) org = create(:organization) p.update_attributes(organization_id: org.id) edit = PropertyEdit.where(target: p, key: 'organization_id').first edit.project_id.must_equal p.id edit.organization_id.must_equal org.id end it 'test_that_nothing_gets_filled_in_automatically_for_license_edits' do l = create(:license) edit = CreateEdit.where(target: l).first edit.project_id.must_equal nil edit.organization_id.must_equal nil end end
1
6,973
This should have failed before.
blackducksoftware-ohloh-ui
rb
@@ -56,7 +56,8 @@ namespace Datadog.Trace.Vendors.Serilog.Sinks.File "(?<" + PeriodMatchGroup + ">\\d{" + _periodFormat.Length + "})" + "(?<" + SequenceNumberMatchGroup + ">_[0-9]{3,}){0,1}" + Regex.Escape(_filenameSuffix) + - "$"); + "$", + RegexOptions.Compiled); DirectorySearchPattern = $"{_filenamePrefix}*{_filenameSuffix}"; }
1
//------------------------------------------------------------------------------ // <auto-generated /> // This file was automatically generated by the UpdateVendors tool. //------------------------------------------------------------------------------ // Copyright 2013-2016 Serilog Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Text.RegularExpressions; namespace Datadog.Trace.Vendors.Serilog.Sinks.File { class PathRoller { const string PeriodMatchGroup = "period"; const string SequenceNumberMatchGroup = "sequence"; readonly string _directory; readonly string _filenamePrefix; readonly string _filenameSuffix; readonly Regex _filenameMatcher; readonly RollingInterval _interval; readonly string _periodFormat; public PathRoller(string path, RollingInterval interval) { if (path == null) throw new ArgumentNullException(nameof(path)); _interval = interval; _periodFormat = interval.GetFormat(); var pathDirectory = Path.GetDirectoryName(path); if (string.IsNullOrEmpty(pathDirectory)) pathDirectory = Directory.GetCurrentDirectory(); _directory = Path.GetFullPath(pathDirectory); _filenamePrefix = Path.GetFileNameWithoutExtension(path); _filenameSuffix = Path.GetExtension(path); _filenameMatcher = new Regex( "^" + Regex.Escape(_filenamePrefix) + "(?<" + PeriodMatchGroup + ">\\d{" + _periodFormat.Length + "})" + "(?<" + SequenceNumberMatchGroup + ">_[0-9]{3,}){0,1}" + Regex.Escape(_filenameSuffix) + "$"); DirectorySearchPattern = $"{_filenamePrefix}*{_filenameSuffix}"; } public string LogFileDirectory => _directory; public string DirectorySearchPattern { get; } public void GetLogFilePath(DateTime date, int? sequenceNumber, out string path) { var currentCheckpoint = GetCurrentCheckpoint(date); var tok = currentCheckpoint?.ToString(_periodFormat, CultureInfo.InvariantCulture) ?? ""; if (sequenceNumber != null) tok += "_" + sequenceNumber.Value.ToString("000", CultureInfo.InvariantCulture); path = Path.Combine(_directory, _filenamePrefix + tok + _filenameSuffix); } public IEnumerable<RollingLogFile> SelectMatches(IEnumerable<string> filenames) { foreach (var filename in filenames) { var match = _filenameMatcher.Match(filename); if (!match.Success) continue; int? inc = null; var incGroup = match.Groups[SequenceNumberMatchGroup]; if (incGroup.Captures.Count != 0) { var incPart = incGroup.Captures[0].Value.Substring(1); inc = int.Parse(incPart, CultureInfo.InvariantCulture); } DateTime? period = null; var periodGroup = match.Groups[PeriodMatchGroup]; if (periodGroup.Captures.Count != 0) { var dateTimePart = periodGroup.Captures[0].Value; if (DateTime.TryParseExact( dateTimePart, _periodFormat, CultureInfo.InvariantCulture, DateTimeStyles.None, out var dateTime)) { period = dateTime; } } yield return new RollingLogFile(filename, period, inc); } } public DateTime? GetCurrentCheckpoint(DateTime instant) => _interval.GetCurrentCheckpoint(instant); public DateTime? GetNextCheckpoint(DateTime instant) => _interval.GetNextCheckpoint(instant); } }
1
19,900
Slower construction, faster matching. I wonder if this will be noticeable in the relenv?
DataDog-dd-trace-dotnet
.cs
@@ -61,6 +61,9 @@ void ThroughputPublisher::DataPubListener::onPublicationMatched( if (info.status == MATCHED_MATCHING) { ++throughput_publisher_.data_discovery_count_; + std::cout << C_RED << "Pub: DATA Pub Matched " + << throughput_publisher_.data_discovery_count_ << "/" << throughput_publisher_.subscribers_ + << C_DEF << std::endl; } else {
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file ThroughputPublisher.cxx * */ #include "ThroughputPublisher.hpp" #include <fastrtps/utils/TimeConversion.h> #include <fastrtps/attributes/ParticipantAttributes.h> #include <fastrtps/attributes/SubscriberAttributes.h> #include <fastrtps/xmlparser/XMLProfileManager.h> #include <fastrtps/publisher/Publisher.h> #include <fastrtps/subscriber/Subscriber.h> #include <fastrtps/subscriber/SampleInfo.h> #include <fastrtps/Domain.h> #include <dds/core/LengthUnlimited.hpp> #include <map> #include <fstream> #include <chrono> using namespace eprosima::fastrtps; using namespace eprosima::fastrtps::rtps; using namespace eprosima::fastrtps::types; // ******************************************************************************************* // ************************************ DATA PUB LISTENER ************************************ // ******************************************************************************************* ThroughputPublisher::DataPubListener::DataPubListener( ThroughputPublisher& throughput_publisher) : throughput_publisher_(throughput_publisher) { } ThroughputPublisher::DataPubListener::~DataPubListener() { } void ThroughputPublisher::DataPubListener::onPublicationMatched( Publisher* /*pub*/, MatchingInfo& info) { std::unique_lock<std::mutex> lock(throughput_publisher_.data_mutex_); if (info.status == MATCHED_MATCHING) { ++throughput_publisher_.data_discovery_count_; } else { --throughput_publisher_.data_discovery_count_; } lock.unlock(); throughput_publisher_.data_discovery_cv_.notify_one(); } // ******************************************************************************************* // ********************************** COMMAND SUB LISTENER *********************************** // ******************************************************************************************* ThroughputPublisher::CommandSubListener::CommandSubListener( ThroughputPublisher& throughput_publisher) : throughput_publisher_(throughput_publisher) { } ThroughputPublisher::CommandSubListener::~CommandSubListener() { } void ThroughputPublisher::CommandSubListener::onSubscriptionMatched( Subscriber* /*sub*/, MatchingInfo& info) { std::unique_lock<std::mutex> lock(throughput_publisher_.command_mutex_); if (info.status == MATCHED_MATCHING) { std::cout << C_RED << "COMMAND Sub Matched" << C_DEF << std::endl; ++throughput_publisher_.command_discovery_count_; } else { std::cout << C_RED << "COMMAND SUBSCRIBER MATCHING REMOVAL" << C_DEF << std::endl; --throughput_publisher_.command_discovery_count_; } lock.unlock(); throughput_publisher_.command_discovery_cv_.notify_one(); } // ******************************************************************************************* // ********************************** COMMAND PUB LISTENER *********************************** // ******************************************************************************************* ThroughputPublisher::CommandPubListener::CommandPubListener( ThroughputPublisher& throughput_publisher) : throughput_publisher_(throughput_publisher) { } ThroughputPublisher::CommandPubListener::~CommandPubListener() { } void ThroughputPublisher::CommandPubListener::onPublicationMatched( Publisher* /*pub*/, MatchingInfo& info) { std::unique_lock<std::mutex> lock(throughput_publisher_.command_mutex_); if (info.status == MATCHED_MATCHING) { std::cout << C_RED << "COMMAND Pub Matched" << C_DEF << std::endl; ++throughput_publisher_.command_discovery_count_; } else { std::cout << C_RED << "COMMAND PUBLISHER MATCHING REMOVAL" << C_DEF << std::endl; --throughput_publisher_.command_discovery_count_; } lock.unlock(); throughput_publisher_.command_discovery_cv_.notify_one(); } // ******************************************************************************************* // ********************************** THROUGHPUT PUBLISHER *********************************** // ******************************************************************************************* ThroughputPublisher::ThroughputPublisher( bool reliable, uint32_t pid, bool hostname, const std::string& export_csv, const eprosima::fastrtps::rtps::PropertyPolicy& part_property_policy, const eprosima::fastrtps::rtps::PropertyPolicy& property_policy, const std::string& xml_config_file, const std::string& demands_file, const std::string& recoveries_file, bool dynamic_types, int forced_domain) : command_discovery_count_(0) , data_discovery_count_(0) , dynamic_data_(dynamic_types) , ready_(true) , reliable_(reliable) , forced_domain_(forced_domain) , demands_file_(demands_file) , export_csv_(export_csv) , xml_config_file_(xml_config_file) , recoveries_file_(recoveries_file) #pragma warning(disable:4355) , data_pub_listener_(*this) , command_pub_listener_(*this) , command_sub_listener_(*this) { // Dummy type registration if (dynamic_data_) { // Create basic builders DynamicTypeBuilder_ptr struct_type_builder(DynamicTypeBuilderFactory::get_instance()->create_struct_builder()); // Add members to the struct. struct_type_builder->add_member(0, "seqnum", DynamicTypeBuilderFactory::get_instance()->create_uint32_type()); struct_type_builder->add_member(1, "data", DynamicTypeBuilderFactory::get_instance()->create_sequence_builder( DynamicTypeBuilderFactory::get_instance()->create_byte_type(), ::dds::core::LENGTH_UNLIMITED)); struct_type_builder->set_name("ThroughputType"); dynamic_type_ = struct_type_builder->build(); dynamic_pub_sub_type_.SetDynamicType(dynamic_type_); } /* Create RTPSParticipant */ std::string participant_profile_name = "pub_participant_profile"; ParticipantAttributes participant_attributes; // Default domain participant_attributes.domainId = pid % 230; // Default participant name participant_attributes.rtps.setName("throughput_test_publisher"); // Load XML file if (xml_config_file_.length() > 0) { if (eprosima::fastrtps::xmlparser::XMLP_ret::XML_OK != eprosima::fastrtps::xmlparser::XMLProfileManager::fillParticipantAttributes(participant_profile_name, participant_attributes)) { ready_ = false; return; } } // Apply user's force domain if (forced_domain_ >= 0) { participant_attributes.domainId = forced_domain_; } // If the user has specified a participant property policy with command line arguments, it overrides whatever the // XML configures. if (PropertyPolicyHelper::length(part_property_policy) > 0) { participant_attributes.rtps.properties = part_property_policy; } // Create the participant participant_ = Domain::createParticipant(participant_attributes); if (participant_ == nullptr) { std::cout << "ERROR creating participant" << std::endl; ready_ = false; return; } // Register the date type throughput_type_ = nullptr; Domain::registerType(participant_, (TopicDataType*)&throuput_command_type_); /* Create Data Publisher */ std::string profile_name = "publisher_profile"; pub_attrs_.topic.topicDataType = "ThroughputType"; pub_attrs_.topic.topicKind = NO_KEY; // Default topic std::ostringstream data_topic; data_topic << "ThroughputTest_"; if (hostname) { data_topic << asio::ip::host_name() << "_"; } data_topic << pid << "_UP"; pub_attrs_.topic.topicName = data_topic.str(); // Reliability if (reliable) { pub_attrs_.times.heartbeatPeriod = TimeConv::MilliSeconds2Time_t(100).to_duration_t(); pub_attrs_.times.nackSupressionDuration = TimeConv::MilliSeconds2Time_t(0).to_duration_t(); pub_attrs_.times.nackResponseDelay = TimeConv::MilliSeconds2Time_t(0).to_duration_t(); pub_attrs_.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; } else { pub_attrs_.qos.m_reliability.kind = BEST_EFFORT_RELIABILITY_QOS; } // Load XML file if (xml_config_file_.length() > 0) { if (xmlparser::XMLP_ret::XML_OK != xmlparser::XMLProfileManager::fillPublisherAttributes(profile_name, pub_attrs_)) { std::cout << "Cannot read publisher profile " << profile_name << std::endl; } } // If the user has specified a publisher property policy with command line arguments, it overrides whatever the // XML configures. if (PropertyPolicyHelper::length(property_policy) > 0) { pub_attrs_.properties = property_policy; } data_publisher_ = nullptr; // COMMAND SUBSCRIBER SubscriberAttributes command_subscriber_attrs; command_subscriber_attrs.topic.historyQos.kind = KEEP_ALL_HISTORY_QOS; command_subscriber_attrs.topic.topicDataType = "ThroughputCommand"; command_subscriber_attrs.topic.topicKind = NO_KEY; std::ostringstream sub_command_topic; sub_command_topic << "ThroughputTest_Command_"; if (hostname) { sub_command_topic << asio::ip::host_name() << "_"; } sub_command_topic << pid << "_SUB2PUB"; command_subscriber_attrs.topic.topicName = sub_command_topic.str(); command_subscriber_attrs.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; command_subscriber_attrs.qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS; command_subscriber_attrs.properties = property_policy; command_subscriber_ = Domain::createSubscriber(participant_, command_subscriber_attrs, (SubscriberListener*)&this->command_sub_listener_); PublisherAttributes command_publisher_attrs; command_publisher_attrs.topic.historyQos.kind = KEEP_ALL_HISTORY_QOS; command_publisher_attrs.topic.topicDataType = "ThroughputCommand"; command_publisher_attrs.topic.topicKind = NO_KEY; std::ostringstream pub_command_topic; pub_command_topic << "ThroughputTest_Command_"; if (hostname) { pub_command_topic << asio::ip::host_name() << "_"; } pub_command_topic << pid << "_PUB2SUB"; command_publisher_attrs.topic.topicName = pub_command_topic.str(); command_publisher_attrs.qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; command_publisher_attrs.qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS; command_publisher_attrs.qos.m_publishMode.kind = SYNCHRONOUS_PUBLISH_MODE; command_publisher_attrs.properties = property_policy; command_publisher_ = Domain::createPublisher(participant_, command_publisher_attrs, (PublisherListener*)&this->command_pub_listener_); // Calculate overhead t_start_ = std::chrono::steady_clock::now(); for (int i = 0; i < 1000; ++i) { t_end_ = std::chrono::steady_clock::now(); } t_overhead_ = std::chrono::duration<double, std::micro>(t_end_ - t_start_) / 1001; std::cout << "Publisher's clock access overhead: " << t_overhead_.count() << " us" << std::endl; if (command_subscriber_ == nullptr || command_publisher_ == nullptr) { ready_ = false; } } ThroughputPublisher::~ThroughputPublisher() { Domain::removeParticipant(participant_); } bool ThroughputPublisher::ready() { return ready_; } void ThroughputPublisher::run( uint32_t test_time, uint32_t recovery_time_ms, int demand, int msg_size) { if (!ready_) { return; } if (demand == 0 || msg_size == 0) { if (!this->load_demands_payload()) { return; } } else { payload_ = msg_size; demand_payload_[msg_size - 8].push_back(demand); } /* Populate the recovery times vector */ if (recoveries_file_ != "") { if (!load_recoveries()) { return; } } else { recovery_times_.push_back(recovery_time_ms); } std::cout << "Recovery times: "; for (uint16_t i = 0; i < recovery_times_.size(); i++) { std::cout << recovery_times_[i] << ", "; } std::cout << std::endl; /* Create the export_csv_ file and add the header */ if (export_csv_ != "") { std::ofstream data_file; data_file.open(export_csv_); data_file << "Payload [Bytes],Demand [sample/burst],Recovery time [ms],Sent [samples],Publication time [us]," << "Publication sample rate [Sample/s],Publication throughput [Mb/s],Received [samples]," << "Lost [samples],Subscription time [us],Subscription sample rate [Sample/s]," << "Subscription throughput [Mb/s]" << std::endl; data_file.flush(); data_file.close(); } std::cout << "Pub Waiting for command discovery" << std::endl; { std::unique_lock<std::mutex> disc_lock(command_mutex_); command_discovery_cv_.wait(disc_lock, [&]() { return command_discovery_count_ == 2; }); } std::cout << "Pub Discovery command complete" << std::endl; ThroughputCommandType command; SampleInfo_t info; for (auto sit = demand_payload_.begin(); sit != demand_payload_.end(); ++sit) { for (auto dit = sit->second.begin(); dit != sit->second.end(); ++dit) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); command.m_size = sit->first; command.m_demand = *dit; // Check history resources depending on the history kind and demand if (pub_attrs_.topic.historyQos.kind == KEEP_LAST_HISTORY_QOS) { // Ensure that the history depth is at least the demand if (pub_attrs_.topic.historyQos.depth < 0 || static_cast<uint32_t>(pub_attrs_.topic.historyQos.depth) < command.m_demand) { logWarning(THROUGHPUTPUBLISHER, "Setting history depth to " << command.m_demand); pub_attrs_.topic.resourceLimitsQos.max_samples = command.m_demand; pub_attrs_.topic.historyQos.depth = command.m_demand; } } // KEEP_ALL case else { // Ensure that the max samples is at least the demand if (pub_attrs_.topic.resourceLimitsQos.max_samples < 0 || static_cast<uint32_t>(pub_attrs_.topic.resourceLimitsQos.max_samples) < command.m_demand) { logWarning(THROUGHPUTPUBLISHER, "Setting resource limit max samples to " << command.m_demand); pub_attrs_.topic.resourceLimitsQos.max_samples = command.m_demand; } } // Set the allocated samples to the max_samples. This is because allocated_sample must be <= max_samples pub_attrs_.topic.resourceLimitsQos.allocated_samples = pub_attrs_.topic.resourceLimitsQos.max_samples; for (uint16_t i = 0; i < recovery_times_.size(); i++) { command.m_command = READY_TO_START; command.m_size = sit->first; command.m_demand = *dit; command_publisher_->write((void*)&command); command_subscriber_->wait_for_unread_samples({20, 0}); command_subscriber_->takeNextData((void*)&command, &info); if (command.m_command == BEGIN) { if (!test(test_time, recovery_times_[i], *dit, sit->first)) { command.m_command = ALL_STOPS; command_publisher_->write((void*)&command); return; } } } } } command.m_command = ALL_STOPS; command_publisher_->write((void*)&command); bool all_acked = command_publisher_->wait_for_all_acked(eprosima::fastrtps::Time_t(20, 0)); print_results(results_); if (!all_acked) { std::cout << "ALL_STOPS Not acked! in 20(s)" << std::endl; } else { // Wait for the subscriber unmatch. std::unique_lock<std::mutex> disc_lock(command_mutex_); command_discovery_cv_.wait(disc_lock, [&]() { return command_discovery_count_ == 0; }); } } bool ThroughputPublisher::test( uint32_t test_time, uint32_t recovery_time_ms, uint32_t demand, uint32_t msg_size) { if (dynamic_data_) { // Create basic builders DynamicTypeBuilder_ptr struct_type_builder(DynamicTypeBuilderFactory::get_instance()->create_struct_builder()); // Add members to the struct. struct_type_builder->add_member(0, "seqnum", DynamicTypeBuilderFactory::get_instance()->create_uint32_type()); struct_type_builder->add_member(1, "data", DynamicTypeBuilderFactory::get_instance()->create_sequence_builder( DynamicTypeBuilderFactory::get_instance()->create_byte_type(), msg_size)); struct_type_builder->set_name("ThroughputType"); dynamic_type_ = struct_type_builder->build(); dynamic_pub_sub_type_.CleanDynamicType(); dynamic_pub_sub_type_.SetDynamicType(dynamic_type_); Domain::registerType(participant_, &dynamic_pub_sub_type_); dynamic_data_type_ = DynamicDataFactory::get_instance()->create_data(dynamic_type_); MemberId id; DynamicData* dynamic_data = dynamic_data_type_->loan_value(dynamic_data_type_->get_member_id_at_index(1)); for (uint32_t i = 0; i < msg_size; ++i) { dynamic_data->insert_sequence_data(id); dynamic_data->set_byte_value(0, id); } dynamic_data_type_->return_loaned_value(dynamic_data); } else { throughput_data_type_ = new ThroughputDataType(msg_size); Domain::registerType(participant_, throughput_data_type_); throughput_type_ = new ThroughputType((uint16_t)msg_size); } data_publisher_ = Domain::createPublisher(participant_, pub_attrs_, &data_pub_listener_); std::unique_lock<std::mutex> data_disc_lock(data_mutex_); data_discovery_cv_.wait(data_disc_lock, [&]() { return data_discovery_count_ > 0; }); data_disc_lock.unlock(); // Declare test time variables std::chrono::duration<double, std::micro> clock_overhead(0); std::chrono::duration<double, std::nano> test_time_ns = std::chrono::seconds(test_time); std::chrono::duration<double, std::nano> recovery_duration_ns = std::chrono::milliseconds(recovery_time_ms); std::chrono::steady_clock::time_point batch_start; // Send a TEST_STARTS and sleep for a while to give the subscriber time to set up uint32_t samples = 0; size_t aux; ThroughputCommandType command_sample; SampleInfo_t info; command_sample.m_command = TEST_STARTS; command_publisher_->write((void*)&command_sample); // If the subscriber does not acknowledge the TEST_STARTS in time, we consider something went wrong. std::chrono::steady_clock::time_point test_start_sent_tp = std::chrono::steady_clock::now(); if (!command_publisher_->wait_for_all_acked(eprosima::fastrtps::Time_t(20, 0))) { std::cout << "Something went wrong: The subscriber has not acknowledged the TEST_STARTS command." << std::endl; return false; } // Calculate how low has it takes for the subscriber to acknowledge TEST_START std::chrono::duration<double, std::micro> test_start_ack_duration = std::chrono::duration<double, std::micro>(std::chrono::steady_clock::now() - test_start_sent_tp); // Send batches until test_time_ns is reached t_start_ = std::chrono::steady_clock::now(); while ((t_end_ - t_start_) < test_time_ns) { // Get start time batch_start = std::chrono::steady_clock::now(); // Send a batch of size demand for (uint32_t sample = 0; sample < demand; sample++) { if (dynamic_data_) { dynamic_data_type_->set_uint32_value(dynamic_data_type_->get_uint32_value(0) + 1, 0); data_publisher_->write((void*)dynamic_data_type_); } else { throughput_type_->seqnum++; data_publisher_->write((void*)throughput_type_); } } // Get end time t_end_ = std::chrono::steady_clock::now(); // Add the number of sent samples samples += demand; /* If the batch took less than the recovery time, sleep for the difference recovery_duration - batch_duration. Else, go ahead with the next batch without time to recover. The previous is achieved with a call to sleep_for(). If the duration specified for sleep_for is negative, all implementations we know about return without setting the thread to sleep. */ std::this_thread::sleep_for(recovery_duration_ns - (t_end_ - batch_start)); clock_overhead += t_overhead_ * 2; // We access the clock twice per batch. } command_sample.m_command = TEST_ENDS; command_publisher_->write((void*)&command_sample); data_publisher_->removeAllChange(); // If the subscriber does not acknowledge the TEST_ENDS in time, we consider something went wrong. if (!command_publisher_->wait_for_all_acked(eprosima::fastrtps::Time_t(20, 0))) { std::cout << "Something went wrong: The subscriber has not acknowledged the TEST_ENDS command." << std::endl; return false; } if (dynamic_data_) { DynamicTypeBuilderFactory::delete_instance(); DynamicDataFactory::get_instance()->delete_data(dynamic_data_type_); } else { delete(throughput_type_); } pub_attrs_ = data_publisher_->getAttributes(); Domain::removePublisher(data_publisher_); data_publisher_ = nullptr; Domain::unregisterType(participant_, "ThroughputType"); if (!dynamic_data_) { delete throughput_data_type_; } command_subscriber_->wait_for_unread_samples({20, 0}); if (command_subscriber_->takeNextData((void*)&command_sample, &info)) { if (command_sample.m_command == TEST_RESULTS) { TroughputResults result; result.payload_size = msg_size + 4 + 4; result.demand = demand; result.recovery_time_ms = recovery_time_ms; result.publisher.send_samples = samples; result.publisher.totaltime_us = std::chrono::duration<double, std::micro>(t_end_ - t_start_) - clock_overhead; result.subscriber.recv_samples = command_sample.m_lastrecsample - command_sample.m_lostsamples; result.subscriber.lost_samples = command_sample.m_lostsamples; result.subscriber.totaltime_us = std::chrono::microseconds(command_sample.m_totaltime) - test_start_ack_duration - clock_overhead; result.compute(); results_.push_back(result); /* Log data to CSV file */ if (export_csv_ != "") { std::ofstream data_file; data_file.open(export_csv_, std::fstream::app); data_file << std::fixed << std::setprecision(3) << result.payload_size << "," << result.demand << "," << result.recovery_time_ms << "," << result.publisher.send_samples << "," << result.publisher.totaltime_us.count() << "," << result.publisher.Packssec << "," << result.publisher.MBitssec << "," << result.subscriber.recv_samples << "," << result.subscriber.lost_samples << "," << result.subscriber.totaltime_us.count() << "," << result.subscriber.Packssec << "," << result.subscriber.MBitssec << std::endl; data_file.flush(); data_file.close(); } command_publisher_->removeAllChange(&aux); return true; } else { std::cout << "The test expected results, stopping" << std::endl; } } else { std::cout << "PROBLEM READING RESULTS;" << std::endl; } return false; } bool ThroughputPublisher::load_demands_payload() { std::ifstream fi(demands_file_); std::cout << "Reading demands file: " << demands_file_ << std::endl; std::string DELIM = ";"; if (!fi.is_open()) { std::cout << "Could not open demands file: " << demands_file_ << " , closing." << std::endl; return false; } std::string line; size_t start; size_t end; bool first = true; bool more = true; while (std::getline(fi, line)) { start = 0; end = line.find(DELIM); first = true; uint32_t demand; more = true; while (more) { std::istringstream iss(line.substr(start, end - start)); if (first) { iss >> payload_; if (payload_ < 8) { std::cout << "Minimum payload is 16 bytes" << std::endl; return false; } payload_ -= 8; first = false; } else { iss >> demand; demand_payload_[payload_].push_back(demand); } start = end + DELIM.length(); end = line.find(DELIM, start); if (end == std::string::npos) { more = false; std::istringstream n_iss(line.substr(start, end - start)); if (n_iss >> demand) { demand_payload_[payload_].push_back(demand); } } } } fi.close(); payload_ += 8; std::cout << "Performing test with this payloads/demands:" << std::endl; for (auto sit = demand_payload_.begin(); sit != demand_payload_.end(); ++sit) { printf("Payload: %6d; Demands: ", sit->first + 8); for (auto dit = sit->second.begin(); dit != sit->second.end(); ++dit) { printf("%6d, ", *dit); } printf("\n"); } return true; } bool ThroughputPublisher::load_recoveries() { std::ifstream fi(recoveries_file_); std::cout << "Reading recoveries file: " << recoveries_file_ << std::endl; std::string DELIM = ";"; if (!fi.is_open()) { std::cout << "Could not open recoveries file: " << recoveries_file_ << " , closing." << std::endl; return false; } std::string line; size_t start; size_t end; uint32_t recovery; int32_t input_recovery; bool more = true; while (std::getline(fi, line)) { start = 0; end = line.find(DELIM); more = true; while (more) { std::istringstream iss(line.substr(start, end - start)); iss >> input_recovery; if (input_recovery < 0) { std::cout << "Recovery times must be positive. " << input_recovery << " found" << std::endl; return false; } recovery = static_cast<uint32_t>(input_recovery); // Only add if it was not there already if (std::find(recovery_times_.begin(), recovery_times_.end(), recovery) == recovery_times_.end()) { recovery_times_.push_back(recovery); } start = end + DELIM.length(); end = line.find(DELIM, start); if (end == std::string::npos) { more = false; std::istringstream n_iss(line.substr(start, end - start)); if (n_iss >> recovery) { // Only add if it was not there already if (std::find(recovery_times_.begin(), recovery_times_.end(), recovery) == recovery_times_.end()) { recovery_times_.push_back(recovery); } } } } } fi.close(); return true; }
1
17,843
I think that if `data_discovery_count_ > static_cast<int>(throughput_publisher_.subscribers_)`, then we should not proceed, since we have discovered some unexpected subscriber that can affect the test results. I'd change the comparison to `==` and have and `else if` contemplating the `>` case
eProsima-Fast-DDS
cpp
@@ -51,6 +51,7 @@ #include <iomanip> #include <stdexcept> #include <impl/Kokkos_Error.hpp> +#include <Cuda/Kokkos_Cuda_Error.hpp> //---------------------------------------------------------------------------- //----------------------------------------------------------------------------
1
/* //@HEADER // ************************************************************************ // // Kokkos v. 3.0 // Copyright (2020) National Technology & Engineering // Solutions of Sandia, LLC (NTESS). // // Under the terms of Contract DE-NA0003525 with NTESS, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact Christian R. Trott ([email protected]) // // ************************************************************************ //@HEADER */ #include <cstdio> #include <cstring> #include <cstdlib> #include <ostream> #include <sstream> #include <iomanip> #include <stdexcept> #include <impl/Kokkos_Error.hpp> //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- namespace Kokkos { namespace Impl { void host_abort(const char *const message) { fwrite(message, 1, strlen(message), stderr); fflush(stderr); ::abort(); } void throw_runtime_exception(const std::string &msg) { std::ostringstream o; o << msg; traceback_callstack(o); throw std::runtime_error(o.str()); } std::string human_memory_size(size_t arg_bytes) { double bytes = arg_bytes; const double K = 1024; const double M = K * 1024; const double G = M * 1024; std::ostringstream out; if (bytes < K) { out << std::setprecision(4) << bytes << " B"; } else if (bytes < M) { bytes /= K; out << std::setprecision(4) << bytes << " K"; } else if (bytes < G) { bytes /= M; out << std::setprecision(4) << bytes << " M"; } else { bytes /= G; out << std::setprecision(4) << bytes << " G"; } return out.str(); } } // namespace Impl void Experimental::RawMemoryAllocationFailure::print_error_message( std::ostream &o) const { o << "Allocation of size " << Impl::human_memory_size(m_attempted_size); o << " failed"; switch (m_failure_mode) { case FailureMode::OutOfMemoryError: o << ", likely due to insufficient memory."; break; case FailureMode::AllocationNotAligned: o << " because the allocation was improperly aligned."; break; case FailureMode::InvalidAllocationSize: o << " because the requested allocation size is not a valid size for the" " requested allocation mechanism (it's probably too large)."; break; // TODO move this to the subclass for Cuda-related things case FailureMode::MaximumCudaUVMAllocationsExceeded: o << " because the maximum Cuda UVM allocations was exceeded."; break; case FailureMode::Unknown: o << " because of an unknown error."; break; } o << " (The allocation mechanism was "; switch (m_mechanism) { case AllocationMechanism::StdMalloc: o << "standard malloc()."; break; case AllocationMechanism::PosixMemAlign: o << "posix_memalign()."; break; case AllocationMechanism::PosixMMap: o << "POSIX mmap()."; break; case AllocationMechanism::IntelMMAlloc: o << "the Intel _mm_malloc() intrinsic."; break; case AllocationMechanism::CudaMalloc: o << "cudaMalloc()."; break; case AllocationMechanism::CudaMallocManaged: o << "cudaMallocManaged()."; break; case AllocationMechanism::CudaHostAlloc: o << "cudaHostAlloc()."; break; case AllocationMechanism::HIPMalloc: o << "hipMalloc()."; break; case AllocationMechanism::HIPHostMalloc: o << "hipHostMalloc()."; break; } append_additional_error_information(o); o << ")" << std::endl; } std::string Experimental::RawMemoryAllocationFailure::get_error_message() const { std::ostringstream out; print_error_message(out); return out.str(); } } // namespace Kokkos //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- namespace Kokkos { namespace Impl { void traceback_callstack(std::ostream &msg) { msg << std::endl << "Traceback functionality not available" << std::endl; } } // namespace Impl } // namespace Kokkos
1
23,626
Remove that include
kokkos-kokkos
cpp
@@ -130,6 +130,7 @@ class EasyAdminFormType extends AbstractType return function (Options $options, $value) { return array_replace(array( 'id' => sprintf('%s-%s-form', $options['view'], strtolower($options['entity'])), + 'class' => sprintf('%s-form', $options['view']), ), $value); }; }
1
<?php /* * This file is part of the EasyAdminBundle. * * (c) Javier Eguiluz <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace JavierEguiluz\Bundle\EasyAdminBundle\Form\Type; use JavierEguiluz\Bundle\EasyAdminBundle\Form\Type\Configurator\TypeConfiguratorInterface; use JavierEguiluz\Bundle\EasyAdminBundle\Configuration\Configurator; use Symfony\Component\Form\AbstractType; use Symfony\Component\Form\FormBuilderInterface; use Symfony\Component\OptionsResolver\Options; use Symfony\Component\OptionsResolver\OptionsResolver; use Symfony\Component\OptionsResolver\OptionsResolverInterface; /** * Custom form type that deals with some of the logic used to render the * forms used to create and edit EasyAdmin entities. * * @author Maxime Steinhausser <[email protected]> */ class EasyAdminFormType extends AbstractType { /** @var Configurator */ private $configurator; /** @var array */ private $config; /** @var TypeConfiguratorInterface[] */ private $configurators; /** * @param Configurator $configurator * @param TypeConfiguratorInterface[] $configurators */ public function __construct(Configurator $configurator, array $configurators = array()) { $this->configurator = $configurator; $this->config = $configurator->getBackendConfig(); $this->configurators = $configurators; } /** * {@inheritdoc} */ public function buildForm(FormBuilderInterface $builder, array $options) { $entity = $options['entity']; $view = $options['view']; $entityConfig = $this->configurator->getEntityConfiguration($entity); $entityProperties = $entityConfig[$view]['fields']; foreach ($entityProperties as $name => $metadata) { $formFieldOptions = $metadata['type_options']; // Configure options using the list of registered type configurators: foreach ($this->configurators as $configurator) { if ($configurator->supports($metadata['fieldType'], $formFieldOptions, $metadata)) { $formFieldOptions = $configurator->configure($name, $formFieldOptions, $metadata, $builder); } } $formFieldType = $this->useLegacyFormComponent() ? $metadata['fieldType'] : $this->getFormTypeFqcn($metadata['fieldType']); $builder->add($name, $formFieldType, $formFieldOptions); } } /** * {@inheritdoc} */ public function configureOptions(OptionsResolver $resolver) { $configurator = $this->configurator; $resolver ->setDefaults(array( 'allow_extra_fields' => true, 'data_class' => function (Options $options) use ($configurator) { $entity = $options['entity']; $entityConfig = $configurator->getEntityConfiguration($entity); return $entityConfig['class']; }, )) ->setRequired(array('entity', 'view')); if ($this->useLegacyFormComponent()) { $resolver->setNormalizers(array('attr' => $this->getAttributesNormalizer())); } else { $resolver->setNormalizer('attr', $this->getAttributesNormalizer()); } } // BC for SF < 2.7 public function setDefaultOptions(OptionsResolverInterface $resolver) { $this->configureOptions($resolver); } /** * {@inheritdoc} */ public function getBlockPrefix() { return 'easyadmin'; } /** * {@inheritdoc} */ public function getName() { return $this->getBlockPrefix(); } /** * Returns a closure normalizing the form html attributes. * * @return \Closure */ private function getAttributesNormalizer() { return function (Options $options, $value) { return array_replace(array( 'id' => sprintf('%s-%s-form', $options['view'], strtolower($options['entity'])), ), $value); }; } /** * It returns the FQCN of the given short type name. * Example: 'text' -> 'Symfony\Component\Form\Extension\Core\Type\TextType' * * @param string $shortType * * @return string */ private function getFormTypeFqcn($shortType) { $builtinTypes = array( 'birthday', 'button', 'checkbox', 'choice', 'collection', 'country', 'currency', 'datetime', 'date', 'email', 'entity', 'file', 'form', 'hidden', 'integer', 'language', 'locale', 'money', 'number', 'password', 'percent', 'radio', 'range', 'repeated', 'reset', 'search', 'submit', 'textarea', 'text', 'time', 'timezone', 'url', ); if (!in_array($shortType, $builtinTypes)) { return $shortType; } $irregularTypeFqcn = array( 'entity' => 'Symfony\\Bridge\\Doctrine\\Form\\Type\\EntityType', 'datetime' => 'Symfony\\Component\\Form\\Extension\\Core\\Type\\DateTimeType', ); if (array_key_exists($shortType, $irregularTypeFqcn)) { return $irregularTypeFqcn[$shortType]; } return sprintf('Symfony\\Component\\Form\\Extension\\Core\\Type\\%sType', ucfirst($shortType)); } /** * Returns true if the legacy Form component is being used by the application. * * @return bool */ private function useLegacyFormComponent() { return false === class_exists('Symfony\\Component\\Form\\Util\\StringUtil'); } }
1
10,006
IMO this should be defined in the form theme instead. This class should always be there. If the user configured additional css classes, it should be appended instead of replacing the `{view}-form` css class.
EasyCorp-EasyAdminBundle
php
@@ -36,6 +36,7 @@ import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse; import com.salesforce.androidsdk.rest.RestClient.AuthTokenProvider; import com.salesforce.androidsdk.rest.RestClient.ClientInfo; import com.salesforce.androidsdk.rest.RestRequest.RestMethod; +import com.salesforce.androidsdk.util.JSONObjectHelper; import org.json.JSONArray; import org.json.JSONException;
1
/* * Copyright (c) 2011-present, salesforce.com, inc. * All rights reserved. * Redistribution and use of this software in source and binary forms, with or * without modification, are permitted provided that the following conditions * are met: * - Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither the name of salesforce.com, inc. nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission of salesforce.com, inc. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package com.salesforce.androidsdk.rest; import android.test.InstrumentationTestCase; import com.salesforce.androidsdk.TestCredentials; import com.salesforce.androidsdk.app.SalesforceSDKManager; import com.salesforce.androidsdk.auth.HttpAccess; import com.salesforce.androidsdk.auth.OAuth2; import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse; import com.salesforce.androidsdk.rest.RestClient.AuthTokenProvider; import com.salesforce.androidsdk.rest.RestClient.ClientInfo; import com.salesforce.androidsdk.rest.RestRequest.RestMethod; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; /** * Tests for RestClient * * Does live calls to a test org * */ public class RestClientTest extends InstrumentationTestCase { private static final String ENTITY_NAME_PREFIX = "RestClientTest"; private static final String SEARCH_ENTITY_NAME = "Acme"; private static final String BAD_TOKEN = "bad-token"; private ClientInfo clientInfo; private HttpAccess httpAccess; private RestClient restClient; private String authToken; private String instanceUrl; private List<String> testOauthKeys; private Map<String, String> testOauthValues; public static final String TEST_FIRST_NAME = "firstName"; public static final String TEST_LAST_NAME = "lastName"; public static final String TEST_DISPLAY_NAME = "displayName"; public static final String TEST_EMAIL = "[email protected]"; public static final String TEST_PHOTO_URL = "http://some.photo.url"; public static final String TEST_THUMBNAIL_URL = "http://some.thumbnail.url"; public static final String TEST_CUSTOM_KEY = "test_custom_key"; public static final String TEST_CUSTOM_VALUE = "test_custom_value"; @Override public void setUp() throws Exception { super.setUp(); TestCredentials.init(getInstrumentation().getContext()); httpAccess = new HttpAccess(null, "dummy-agent"); TokenEndpointResponse refreshResponse = OAuth2.refreshAuthToken(httpAccess, new URI(TestCredentials.INSTANCE_URL), TestCredentials.CLIENT_ID, TestCredentials.REFRESH_TOKEN); authToken = refreshResponse.authToken; instanceUrl = refreshResponse.instanceUrl; testOauthKeys = new ArrayList<>(); testOauthKeys.add(TEST_CUSTOM_KEY); testOauthValues = new HashMap<>(); testOauthValues.put(TEST_CUSTOM_KEY, TEST_CUSTOM_VALUE); SalesforceSDKManager.getInstance().setAdditionalOauthKeys(testOauthKeys); clientInfo = new ClientInfo(TestCredentials.CLIENT_ID, new URI(TestCredentials.INSTANCE_URL), new URI(TestCredentials.LOGIN_URL), new URI(TestCredentials.IDENTITY_URL), TestCredentials.ACCOUNT_NAME, TestCredentials.USERNAME, TestCredentials.USER_ID, TestCredentials.ORG_ID, null, null, TEST_FIRST_NAME, TEST_LAST_NAME, TEST_DISPLAY_NAME, TEST_EMAIL, TEST_PHOTO_URL, TEST_THUMBNAIL_URL, testOauthValues); restClient = new RestClient(clientInfo, authToken, httpAccess, null); } @Override public void tearDown() throws Exception { cleanup(); testOauthKeys = null; testOauthValues = null; SalesforceSDKManager.getInstance().setAdditionalOauthKeys(testOauthKeys); } /** * Testing getClientInfo * @throws URISyntaxException */ public void testGetClientInfo() throws URISyntaxException { assertEquals("Wrong client id", TestCredentials.CLIENT_ID, restClient.getClientInfo().clientId); assertEquals("Wrong instance url", new URI(TestCredentials.INSTANCE_URL), restClient.getClientInfo().instanceUrl); assertEquals("Wrong login url", new URI(TestCredentials.LOGIN_URL), restClient.getClientInfo().loginUrl); assertEquals("Wrong account name", TestCredentials.ACCOUNT_NAME, restClient.getClientInfo().accountName); assertEquals("Wrong username", TestCredentials.USERNAME, restClient.getClientInfo().username); assertEquals("Wrong userId", TestCredentials.USER_ID, restClient.getClientInfo().userId); assertEquals("Wrong orgId", TestCredentials.ORG_ID, restClient.getClientInfo().orgId); assertEquals("Wrong firstName", TEST_FIRST_NAME, restClient.getClientInfo().firstName); assertEquals("Wrong lastName", TEST_LAST_NAME, restClient.getClientInfo().lastName); assertEquals("Wrong displayName", TEST_DISPLAY_NAME, restClient.getClientInfo().displayName); assertEquals("Wrong email", TEST_EMAIL, restClient.getClientInfo().email); assertEquals("Wrong photoUrl", TEST_PHOTO_URL, restClient.getClientInfo().photoUrl); assertEquals("Wrong thumbnailUrl", TEST_THUMBNAIL_URL, restClient.getClientInfo().thumbnailUrl); assertEquals("Wrong additional OAuth value", testOauthValues, restClient.getClientInfo().additionalOauthValues); } public void testClientInfoResolveUrl() { assertEquals("Wrong url", TestCredentials.INSTANCE_URL + "/a/b/", clientInfo.resolveUrl("a/b/").toString()); assertEquals("Wrong url", TestCredentials.INSTANCE_URL + "/a/b/", clientInfo.resolveUrl("/a/b/").toString()); } public void testClientInfoResolveUrlForHttpsUrl() { assertEquals("Wrong url", "https://testurl", clientInfo.resolveUrl("https://testurl").toString()); assertEquals("Wrong url", "http://testurl", clientInfo.resolveUrl("http://testurl").toString()); assertEquals("Wrong url", "HTTPS://testurl", clientInfo.resolveUrl("HTTPS://testurl").toString()); assertEquals("Wrong url", "HTTP://testurl", clientInfo.resolveUrl("HTTP://testurl").toString()); } public void testClientInfoResolveUrlForCommunityUrl() throws Exception { final ClientInfo info = new ClientInfo(TestCredentials.CLIENT_ID, new URI(TestCredentials.INSTANCE_URL), new URI(TestCredentials.LOGIN_URL), new URI(TestCredentials.IDENTITY_URL), TestCredentials.ACCOUNT_NAME, TestCredentials.USERNAME, TestCredentials.USER_ID, TestCredentials.ORG_ID, null, TestCredentials.COMMUNITY_URL, null, null, null, null, null, null, testOauthValues); assertEquals("Wrong url", TestCredentials.COMMUNITY_URL + "/a/b/", info.resolveUrl("a/b/").toString()); assertEquals("Wrong url", TestCredentials.COMMUNITY_URL + "/a/b/", info.resolveUrl("/a/b/").toString()); } public void testGetInstanceUrlForCommunity() throws Exception { final ClientInfo info = new ClientInfo(TestCredentials.CLIENT_ID, new URI(TestCredentials.INSTANCE_URL), new URI(TestCredentials.LOGIN_URL), new URI(TestCredentials.IDENTITY_URL), TestCredentials.ACCOUNT_NAME, TestCredentials.USERNAME, TestCredentials.USER_ID, TestCredentials.ORG_ID, null, TestCredentials.COMMUNITY_URL, null, null, null, null, null, null, testOauthValues); assertEquals("Wrong url", TestCredentials.COMMUNITY_URL, info.getInstanceUrlAsString()); } public void testGetInstanceUrl() { assertEquals("Wrong url", TestCredentials.INSTANCE_URL, clientInfo.getInstanceUrlAsString()); } /** * Testing getAuthToken */ public void testGetAuthToken() { assertEquals("Wrong auth token", authToken, restClient.getAuthToken()); } /** * Testing a call with a bad auth token when restClient has no token provider * Expect a 401. * @throws URISyntaxException * @throws IOException */ public void testCallWithBadAuthToken() throws URISyntaxException, IOException { RestClient.clearOkClientsCache(); RestClient unauthenticatedRestClient = new RestClient(clientInfo, BAD_TOKEN, httpAccess, null); RestResponse response = unauthenticatedRestClient.sendSync(RestRequest.getRequestForResources(TestCredentials.API_VERSION)); assertFalse("Expected error", response.isSuccess()); checkResponse(response, HttpURLConnection.HTTP_UNAUTHORIZED, true); } /** * Testing a call with a bad auth token when restClient has a token provider * Expect token provider to be invoked and new token to be used. * @throws URISyntaxException * @throws IOException */ public void testCallWithBadTokenAndTokenProvider() throws URISyntaxException, IOException { RestClient.clearOkClientsCache(); AuthTokenProvider authTokenProvider = new AuthTokenProvider() { @Override public String getNewAuthToken() { return authToken; } @Override public String getRefreshToken() { return null; } @Override public long getLastRefreshTime() { return -1; } @Override public String getInstanceUrl() { return instanceUrl; } }; RestClient unauthenticatedRestClient = new RestClient(clientInfo, BAD_TOKEN, httpAccess, authTokenProvider); assertEquals("RestClient should be using the bad token initially", BAD_TOKEN, unauthenticatedRestClient.getAuthToken()); RestResponse response = unauthenticatedRestClient.sendSync(RestRequest.getRequestForResources(TestCredentials.API_VERSION)); assertEquals("RestClient should now be using the good token", authToken, unauthenticatedRestClient.getAuthToken()); assertTrue("Expected success", response.isSuccess()); checkResponse(response, HttpURLConnection.HTTP_OK, false); } /** * Testing a call with a bad auth token when restClient has a token provider * Expect token provider to be invoked and new token to be used and a new instance url to be returned. * @throws URISyntaxException * @throws IOException */ public void testCallWithBadInstanceUrl() throws URISyntaxException, IOException { RestClient.clearOkClientsCache(); AuthTokenProvider authTokenProvider = new AuthTokenProvider() { @Override public String getNewAuthToken() { return authToken; } @Override public String getRefreshToken() { return null; } @Override public long getLastRefreshTime() { return -1; } @Override public String getInstanceUrl() { return instanceUrl; } }; RestClient unauthenticatedRestClient = new RestClient(clientInfo, BAD_TOKEN, httpAccess, authTokenProvider); assertEquals("RestClient has bad instance url", new URI(TestCredentials.INSTANCE_URL), unauthenticatedRestClient.getClientInfo().instanceUrl); RestResponse response = unauthenticatedRestClient.sendSync(RestRequest.getRequestForResources(TestCredentials.API_VERSION)); assertEquals("RestClient should now have the correct instance url", new URI(instanceUrl), unauthenticatedRestClient.getClientInfo().instanceUrl); assertTrue("Expected success", response.isSuccess()); checkResponse(response, HttpURLConnection.HTTP_OK, false); } /** * Testing a get versions call to the server - check response * @throws Exception */ public void testGetVersions() throws Exception { // We don't need to be authenticated RestClient unauthenticatedRestClient = new RestClient(clientInfo, BAD_TOKEN, httpAccess, null); RestResponse response = unauthenticatedRestClient.sendSync(RestRequest.getRequestForVersions()); checkResponse(response, HttpURLConnection.HTTP_OK, true); checkKeys(response.asJSONArray().getJSONObject(0), "label", "url", "version"); } /** * Testing a get resources call to the server - check response * @throws Exception */ public void testGetResources() throws Exception { RestResponse response = restClient.sendSync(RestRequest.getRequestForResources(TestCredentials.API_VERSION)); checkResponse(response, HttpURLConnection.HTTP_OK, false); checkKeys(response.asJSONObject(), "sobjects", "search", "recent"); } /** * Testing a get resources async call to the server - check response * @throws Exception */ public void testGetResourcesAsync() throws Exception { RestResponse response = sendAsync(restClient, RestRequest.getRequestForResources(TestCredentials.API_VERSION)); checkResponse(response, HttpURLConnection.HTTP_OK, false); checkKeys(response.asJSONObject(), "sobjects", "search", "recent"); } /** * Testing a describe global call to the server - check response * @throws Exception */ public void testDescribeGlobal() throws Exception { RestResponse response = restClient.sendSync(RestRequest.getRequestForDescribeGlobal(TestCredentials.API_VERSION)); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "encoding", "maxBatchSize", "sobjects"); checkKeys(jsonResponse.getJSONArray("sobjects").getJSONObject(0), "name", "label", "custom", "keyPrefix"); } /** * Testing a describe global async call to the server - check response * @throws Exception */ public void testDescribeGlobalAsync() throws Exception { RestResponse response = sendAsync(restClient, RestRequest.getRequestForDescribeGlobal(TestCredentials.API_VERSION)); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "encoding", "maxBatchSize", "sobjects"); checkKeys(jsonResponse.getJSONArray("sobjects").getJSONObject(0), "name", "label", "custom", "keyPrefix"); } /** * Testing a metadata call to the server - check response * @throws Exception */ public void testMetadata() throws Exception { RestResponse response = restClient.sendSync(RestRequest.getRequestForMetadata(TestCredentials.API_VERSION, "account")); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "objectDescribe", "recentItems"); checkKeys(jsonResponse.getJSONObject("objectDescribe"), "name", "label", "keyPrefix"); assertEquals("Wrong object name", "Account", jsonResponse.getJSONObject("objectDescribe").getString("name")); } /** * Testing a describe call to the server - check response * @throws Exception */ public void testDescribe() throws Exception { RestResponse response = restClient.sendSync(RestRequest.getRequestForDescribe(TestCredentials.API_VERSION, "account")); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "name", "fields", "urls", "label"); assertEquals("Wrong object name", "Account", jsonResponse.getString("name")); } /** * Testing a create call to the server - check response * @throws Exception */ public void testCreate() throws Exception { Map<String, Object> fields = new HashMap<String, Object>(); String newAccountName = ENTITY_NAME_PREFIX + System.nanoTime(); fields.put("name", newAccountName); RestResponse response = restClient.sendSync(RestRequest.getRequestForCreate(TestCredentials.API_VERSION, "account", fields)); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "id", "errors", "success"); assertTrue("Create failed", jsonResponse.getBoolean("success")); } /** * Testing a retrieve call to the server. * Create new account then retrieve it. * @throws Exception */ public void testRetrieve() throws Exception { List<String> fields = Arrays.asList(new String[] {"name", "ownerId"}); IdName newAccountIdName = createAccount(); RestResponse response = restClient.sendSync(RestRequest.getRequestForRetrieve(TestCredentials.API_VERSION, "account", newAccountIdName.id, fields)); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "attributes", "Name", "OwnerId", "Id"); assertEquals("Wrong row returned", newAccountIdName.name, jsonResponse.getString("Name")); } /** * Testing an update call to the server. * Create new account then update it then get it back * @throws Exception */ public void testUpdate() throws Exception { // Create IdName newAccountIdName = createAccount(); // Update Map<String, Object> fields = new HashMap<String, Object>(); String updatedAccountName = ENTITY_NAME_PREFIX + "-" + System.nanoTime(); fields.put("name", updatedAccountName); RestResponse updateResponse = restClient.sendSync(RestRequest.getRequestForUpdate(TestCredentials.API_VERSION, "account", newAccountIdName.id, fields)); assertTrue("Update failed", updateResponse.isSuccess()); // Retrieve - expect updated name RestResponse response = restClient.sendSync(RestRequest.getRequestForRetrieve(TestCredentials.API_VERSION, "account", newAccountIdName.id, Arrays.asList(new String[]{"name"}))); assertEquals("Wrong row returned", updatedAccountName, response.asJSONObject().getString("Name")); } /** * Testing a delete call to the server. * Create new account then delete it then try to retrieve it again (expect 404). * @throws Exception */ public void testDelete() throws Exception { // Create IdName newAccountIdName = createAccount(); // Delete RestResponse deleteResponse = restClient.sendSync(RestRequest.getRequestForDelete(TestCredentials.API_VERSION, "account", newAccountIdName.id)); assertTrue("Delete failed", deleteResponse.isSuccess()); // Retrieve - expect 404 List<String> fields = Arrays.asList(new String[] {"name"}); RestResponse response = restClient.sendSync(RestRequest.getRequestForRetrieve(TestCredentials.API_VERSION, "account", newAccountIdName.id, fields)); assertEquals("404 was expected", HttpURLConnection.HTTP_NOT_FOUND, response.getStatusCode()); } /** * Testing a query call to the server. * Create new account then look for it using soql. * @throws Exception */ public void testQuery() throws Exception { IdName newAccountIdName = createAccount(); RestResponse response = restClient.sendSync(RestRequest.getRequestForQuery(TestCredentials.API_VERSION, "select name from account where id = '" + newAccountIdName.id + "'")); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "done", "totalSize", "records"); assertEquals("Expected one row", 1, jsonResponse.getInt("totalSize")); assertEquals("Wrong row returned", newAccountIdName.name, jsonResponse.getJSONArray("records").getJSONObject(0).get("Name")); } /** * Testing a search call to the server. * Create new account then look for it using sosl. * @throws Exception */ public void testSearch() throws Exception { //TODO: add a test base class to supply helpers for test record creation and delete RestResponse response = restClient.sendSync(RestRequest.getRequestForSearch(TestCredentials.API_VERSION, "find {" + SEARCH_ENTITY_NAME + "}")); checkResponse(response, HttpURLConnection.HTTP_OK, true); JSONArray matchingRows = response.asJSONArray(); assertTrue("Expected at least one row returned", matchingRows.length()>0); JSONObject matchingRow = matchingRows.getJSONObject(0); checkKeys(matchingRow, "attributes", "Id"); } /** * Testing that calling resume more than once on a RestResponse doesn't throw an exception * @throws Exception */ public void testDoubleConsume() throws Exception { RestResponse response = restClient.sendSync(RestRequest.getRequestForMetadata(TestCredentials.API_VERSION, "account")); checkResponse(response, HttpURLConnection.HTTP_OK, false); try { response.consume(); response.consume(); } catch (IllegalStateException e) { fail("Calling consume should not have thrown an exception"); } } /** * Testing doing a sync request against a non salesforce public api with a RestClient that uses an UnauthenticatedClientInfo * @return * @throws Exception */ public void testRestClientUnauthenticatedlientInfo() throws Exception { RestClient unauthenticatedRestClient = new RestClient(new RestClient.UnauthenticatedClientInfo(), null, HttpAccess.DEFAULT, null); RestRequest request = new RestRequest(RestMethod.GET, "https://api.spotify.com/v1/search?q=James%20Brown&type=artist", null); RestResponse response = unauthenticatedRestClient.sendSync(request); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "artists"); checkKeys(jsonResponse.getJSONObject("artists"), "href", "items", "limit", "next", "offset", "previous", "total"); } /** * Testing doing an async request against a non salesforce public api with a RestClient that uses an UnauthenticatedClientInfo * @return * @throws Exception */ public void testRestClientUnauthenticatedlientInfoAsync() throws Exception { RestClient unauthenticatedRestClient = new RestClient(new RestClient.UnauthenticatedClientInfo(), null, HttpAccess.DEFAULT, null); RestRequest request = new RestRequest(RestMethod.GET, "https://api.spotify.com/v1/search?q=James%20Brown&type=artist", null); RestResponse response = sendAsync(unauthenticatedRestClient, request); checkResponse(response, HttpURLConnection.HTTP_OK, false); JSONObject jsonResponse = response.asJSONObject(); checkKeys(jsonResponse, "artists"); checkKeys(jsonResponse.getJSONObject("artists"), "href", "items", "limit", "next", "offset", "previous", "total"); } /** * Tests if a stream from {@link RestResponse#asInputStream()} is readable. * * @throws Exception */ public void testResponseStreamIsReadable() throws Exception { final RestResponse response = getStreamTestResponse(); try { InputStream in = response.asInputStream(); assertStreamTestResponseStreamIsValid(in); } catch (IOException e) { fail("The InputStream should be readable and an IOException should not have been thrown"); } catch (JSONException e) { fail("Valid JSON data should have been returned"); } finally { response.consumeQuietly(); } } /** * Tests if a stream from {@link RestResponse#asInputStream()} is consumed (according to the REST client) by fully reading the stream. * * @throws Exception */ public void testResponseStreamConsumedByReadingStream() throws Exception { final RestResponse response = getStreamTestResponse(); try { InputStream in = response.asInputStream(); inputStreamToString(in); } catch (IOException e) { fail("The InputStream should be readable and an IOException should not have been thrown"); } // We read the entire stream but forgot to call consume() or consumeQuietly() - can another REST call be made? final RestResponse anotherResponse = getStreamTestResponse(); assertNotNull(anotherResponse); } /** * Tests that a stream from {@link RestResponse#asInputStream()} cannot be read from twice. * * @throws Exception */ public void testResponseStreamCannotBeReadTwice() throws Exception { final RestResponse response = getStreamTestResponse(); try { final InputStream in = response.asInputStream(); inputStreamToString(in); } catch (IOException e) { fail("The InputStream should be readable and an IOException should not have been thrown"); } try { response.asInputStream(); fail("An IOException should have been thrown while trying to read the InputStream a second time"); } catch (IOException e) { // Expected } finally { response.consumeQuietly(); } } /** * Tests that {@link RestResponse}'s accessor methods (like {@link RestResponse#asBytes()} do not return valid data if the response is streamed first. * * @throws Exception */ public void testOtherAccessorsNotAvailableAfterResponseStreaming() throws Exception { final RestResponse response = getStreamTestResponse(); final Runnable testAccessorsNotAccessible = new Runnable() { @Override public void run() { try { // The other accessors should not return valid data as soon as the stream is opened assertNotNull(response.asBytes()); assertEquals("asBytes() array should be empty", 0, response.asBytes().length); assertEquals("asString() should return the empty string", "", response.asString()); try { assertNull(response.asJSONObject()); fail("asJSONObject() should fail"); } catch (JSONException e) { // Expected } try { assertNull(response.asJSONArray()); fail("asJSONArray() should fail"); } catch (JSONException e) { // Expected } } catch (IOException e) { fail("IOException not expected"); } } }; try { response.asInputStream(); testAccessorsNotAccessible.run(); } catch (IOException e) { fail("The InputStream should be readable and an IOException should not have been thrown"); } finally { response.consumeQuietly(); } // Ensure that consuming the stream doesn't make the accessors accessible again testAccessorsNotAccessible.run(); } /** * Tests that any call to {@link RestResponse}'s accessor methods prevent the response data from being streamed via {@link RestResponse#asInputStream()}. * * @throws Exception */ public void testAccessorMethodsPreventResponseStreaming() throws Exception { final RestResponse response = getStreamTestResponse(); response.asBytes(); try { response.asInputStream(); fail("The InputStream should not be readable after an accessor method is called"); } catch (IOException e) { // Expected } finally { response.consumeQuietly(); } } // // Helper methods // /** * @return a {@link RestResponse} for testing streaming. It should contain some JSON data. * @throws IOException if the response could not be made */ private RestResponse getStreamTestResponse() throws IOException { final RestResponse response = restClient.sendSync(RestRequest.getRequestForResources(TestCredentials.API_VERSION)); assertEquals("Response code should be HTTP OK", response.getStatusCode(), HttpURLConnection.HTTP_OK); return response; } /** * Assert that the {@link RestResponse} returned from {@link #getStreamTestResponse()} is valid. * @param in the {@link InputStream} of response data * @throws IOException if the stream could not be read * @throws JSONException if the response could not be decoded to a valid JSON object */ private void assertStreamTestResponseStreamIsValid(InputStream in) throws IOException, JSONException { final String responseData = inputStreamToString(in); assertNotNull("The response should contain data", responseData); final JSONObject responseJson = new JSONObject(responseData); checkKeys(responseJson, "sobjects", "search", "recent"); } private String inputStreamToString(InputStream inputStream) throws IOException { StringBuilder builder = new StringBuilder(); BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8)); String line; while ((line = reader.readLine()) != null) { builder.append(line); } return builder.toString(); } /** * Send request using sendAsync method * @param client * @param request * @return * @throws InterruptedException */ private RestResponse sendAsync(RestClient client, RestRequest request) throws InterruptedException { final BlockingQueue<RestResponse> responseBlockingQueue = new ArrayBlockingQueue<>(1); client.sendAsync(request, new RestClient.AsyncRequestCallback() { @Override public void onSuccess(RestRequest request, RestResponse response) { responseBlockingQueue.add(response); } @Override public void onError(Exception exception) { responseBlockingQueue.add(null); } }); return responseBlockingQueue.poll(30, TimeUnit.SECONDS); } /** * Helper method to create a account with a unique name and returns its name and id */ private IdName createAccount() throws Exception { Map<String, Object> fields = new HashMap<String, Object>(); String newAccountName = ENTITY_NAME_PREFIX + "-" + System.nanoTime(); fields.put("name", newAccountName); RestResponse response = restClient.sendSync(RestRequest.getRequestForCreate(TestCredentials.API_VERSION, "account", fields)); String newAccountId = response.asJSONObject().getString("id"); return new IdName(newAccountId, newAccountName); } /** * Helper method to delete any entities created by one of the test */ private void cleanup() { try { RestResponse searchResponse = restClient.sendSync(RestRequest.getRequestForSearch(TestCredentials.API_VERSION, "find {" + ENTITY_NAME_PREFIX + "}")); JSONArray matchingRows = searchResponse.asJSONArray(); for (int i = 0; i < matchingRows.length(); i++) { JSONObject matchingRow = matchingRows.getJSONObject(i); String matchingRowType = matchingRow.getJSONObject("attributes").getString("type"); String matchingRowId = matchingRow.getString("Id"); restClient.sendSync(RestRequest.getRequestForDelete(TestCredentials.API_VERSION, matchingRowType, matchingRowId)); } } catch(Exception e) { // We tried our best :-( } } /** * Helper method to validate responses * @param response * @param expectedStatusCode */ private void checkResponse(RestResponse response, int expectedStatusCode, boolean isJsonArray) { // Check status code assertEquals(expectedStatusCode + " response expected", expectedStatusCode, response.getStatusCode()); // Try to parse as json try { if (isJsonArray) { response.asJSONArray(); } else { response.asJSONObject(); } } catch (Exception e) { fail("Failed to parse response body"); e.printStackTrace(); } } /** * Helper method to check if a jsonObject has all the expected keys * @param jsonObject * @param expectedKeys */ private void checkKeys(JSONObject jsonObject, String... expectedKeys) { for (String expectedKey : expectedKeys) { assertTrue("Object should have key: " + expectedKey, jsonObject.has(expectedKey)); } } /** * Helper class to hold name and id */ private static class IdName { public final String id; public final String name; public IdName(String id, String name) { this.id = id; this.name = name; } } }
1
15,880
These tests actually go to the server.
forcedotcom-SalesforceMobileSDK-Android
java
@@ -147,6 +147,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests { var onStartingCalled = false; var onCompletedCalled = false; + var onCompleted = Task.Run(() => onCompletedCalled = true); var hostBuilder = TransportSelector.GetWebHostBuilder() .UseKestrel()
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Net; using System.Net.Http; using System.Net.Security; using System.Net.Sockets; using System.Security.Authentication; using System.Security.Cryptography.X509Certificates; using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Http.Features; using Microsoft.AspNetCore.Server.Kestrel.Core; using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; using Microsoft.AspNetCore.Server.Kestrel.Https; using Microsoft.AspNetCore.Server.Kestrel.Https.Internal; using Microsoft.AspNetCore.Testing; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Logging.Testing; using Microsoft.Extensions.Primitives; using Moq; using Xunit; using Xunit.Abstractions; namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests { public class ResponseTests : LoggedTest { public static TheoryData<ListenOptions> ConnectionAdapterData => new TheoryData<ListenOptions> { new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)), new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)) { ConnectionAdapters = { new PassThroughConnectionAdapter() } } }; public ResponseTests(ITestOutputHelper outputHelper) : base(outputHelper) { } [Fact] public async Task LargeDownload() { var hostBuilder = TransportSelector.GetWebHostBuilder() .UseKestrel() .UseUrls("http://127.0.0.1:0/") .Configure(app => { app.Run(async context => { var bytes = new byte[1024]; for (int i = 0; i < bytes.Length; i++) { bytes[i] = (byte)i; } context.Response.ContentLength = bytes.Length * 1024; for (int i = 0; i < 1024; i++) { await context.Response.Body.WriteAsync(bytes, 0, bytes.Length); } }); }); using (var host = hostBuilder.Build()) { host.Start(); using (var client = new HttpClient()) { var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/"); response.EnsureSuccessStatusCode(); var responseBody = await response.Content.ReadAsStreamAsync(); // Read the full response body var total = 0; var bytes = new byte[1024]; var count = await responseBody.ReadAsync(bytes, 0, bytes.Length); while (count > 0) { for (int i = 0; i < count; i++) { Assert.Equal(total % 256, bytes[i]); total++; } count = await responseBody.ReadAsync(bytes, 0, bytes.Length); } } } } [Theory, MemberData(nameof(NullHeaderData))] public async Task IgnoreNullHeaderValues(string headerName, StringValues headerValue, string expectedValue) { var hostBuilder = TransportSelector.GetWebHostBuilder() .UseKestrel() .UseUrls("http://127.0.0.1:0/") .Configure(app => { app.Run(async context => { context.Response.Headers.Add(headerName, headerValue); await context.Response.WriteAsync(""); }); }); using (var host = hostBuilder.Build()) { host.Start(); using (var client = new HttpClient()) { var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/"); response.EnsureSuccessStatusCode(); var headers = response.Headers; if (expectedValue == null) { Assert.False(headers.Contains(headerName)); } else { Assert.True(headers.Contains(headerName)); Assert.Equal(headers.GetValues(headerName).Single(), expectedValue); } } } } [Fact] public async Task OnCompleteCalledEvenWhenOnStartingNotCalled() { var onStartingCalled = false; var onCompletedCalled = false; var hostBuilder = TransportSelector.GetWebHostBuilder() .UseKestrel() .UseUrls("http://127.0.0.1:0/") .Configure(app => { app.Run(context => { context.Response.OnStarting(() => Task.Run(() => onStartingCalled = true)); context.Response.OnCompleted(() => Task.Run(() => onCompletedCalled = true)); // Prevent OnStarting call (see HttpProtocol.ProcessRequestsAsync()). throw new Exception(); }); }); using (var host = hostBuilder.Build()) { host.Start(); using (var client = new HttpClient()) { var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/"); Assert.Equal(HttpStatusCode.InternalServerError, response.StatusCode); Assert.False(onStartingCalled); Assert.True(onCompletedCalled); } } } [Fact] public async Task OnStartingThrowsWhenSetAfterResponseHasAlreadyStarted() { InvalidOperationException ex = null; var hostBuilder = TransportSelector.GetWebHostBuilder() .UseKestrel() .UseUrls("http://127.0.0.1:0/") .Configure(app => { app.Run(async context => { await context.Response.WriteAsync("hello, world"); await context.Response.Body.FlushAsync(); ex = Assert.Throws<InvalidOperationException>(() => context.Response.OnStarting(_ => Task.CompletedTask, null)); }); }); using (var host = hostBuilder.Build()) { host.Start(); using (var client = new HttpClient()) { var response = await client.GetAsync($"http://127.0.0.1:{host.GetPort()}/"); // Despite the error, the response had already started Assert.Equal(HttpStatusCode.OK, response.StatusCode); Assert.NotNull(ex); } } } [Fact] public Task ResponseStatusCodeSetBeforeHttpContextDisposeAppException() { return ResponseStatusCodeSetBeforeHttpContextDispose( context => { throw new Exception(); }, expectedClientStatusCode: HttpStatusCode.InternalServerError, expectedServerStatusCode: HttpStatusCode.InternalServerError); } [Fact] public Task ResponseStatusCodeSetBeforeHttpContextDisposeRequestAborted() { return ResponseStatusCodeSetBeforeHttpContextDispose( context => { context.Abort(); return Task.CompletedTask; }, expectedClientStatusCode: null, expectedServerStatusCode: 0); } [Fact] public Task ResponseStatusCodeSetBeforeHttpContextDisposeRequestAbortedAppException() { return ResponseStatusCodeSetBeforeHttpContextDispose( context => { context.Abort(); throw new Exception(); }, expectedClientStatusCode: null, expectedServerStatusCode: 0); } [Fact] public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformed() { return ResponseStatusCodeSetBeforeHttpContextDispose( context => { return Task.CompletedTask; }, expectedClientStatusCode: HttpStatusCode.OK, expectedServerStatusCode: HttpStatusCode.OK, sendMalformedRequest: true); } [Fact] public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformedRead() { return ResponseStatusCodeSetBeforeHttpContextDispose( async context => { await context.Request.Body.ReadAsync(new byte[1], 0, 1); }, expectedClientStatusCode: null, expectedServerStatusCode: HttpStatusCode.BadRequest, sendMalformedRequest: true); } [Fact] public Task ResponseStatusCodeSetBeforeHttpContextDisposedRequestMalformedReadIgnored() { return ResponseStatusCodeSetBeforeHttpContextDispose( async context => { try { await context.Request.Body.ReadAsync(new byte[1], 0, 1); } catch (BadHttpRequestException) { } }, expectedClientStatusCode: HttpStatusCode.OK, expectedServerStatusCode: HttpStatusCode.OK, sendMalformedRequest: true); } private static async Task ResponseStatusCodeSetBeforeHttpContextDispose( RequestDelegate handler, HttpStatusCode? expectedClientStatusCode, HttpStatusCode expectedServerStatusCode, bool sendMalformedRequest = false) { var mockHttpContextFactory = new Mock<IHttpContextFactory>(); mockHttpContextFactory.Setup(f => f.Create(It.IsAny<IFeatureCollection>())) .Returns<IFeatureCollection>(fc => new DefaultHttpContext(fc)); var disposedTcs = new TaskCompletionSource<int>(); mockHttpContextFactory.Setup(f => f.Dispose(It.IsAny<HttpContext>())) .Callback<HttpContext>(c => { disposedTcs.TrySetResult(c.Response.StatusCode); }); var sink = new TestSink(); var logger = new TestLogger("TestLogger", sink, enabled: true); using (var server = new TestServer(handler, new TestServiceContext() { Log = new KestrelTrace(logger) }, new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)), services => services.AddSingleton(mockHttpContextFactory.Object))) { if (!sendMalformedRequest) { using (var client = new HttpClient()) { try { var response = await client.GetAsync($"http://127.0.0.1:{server.Port}/"); Assert.Equal(expectedClientStatusCode, response.StatusCode); } catch { if (expectedClientStatusCode != null) { throw; } } } } else { using (var connection = new TestConnection(server.Port)) { await connection.Send( "POST / HTTP/1.1", "Host:", "Transfer-Encoding: chunked", "", "gg"); if (expectedClientStatusCode == HttpStatusCode.OK) { await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } else { await connection.ReceiveForcedEnd( "HTTP/1.1 400 Bad Request", "Connection: close", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } } var disposedStatusCode = await disposedTcs.Task.TimeoutAfter(TestConstants.DefaultTimeout); Assert.Equal(expectedServerStatusCode, (HttpStatusCode)disposedStatusCode); } if (sendMalformedRequest) { Assert.Contains(sink.Writes, w => w.EventId.Id == 17 && w.LogLevel == LogLevel.Information && w.Exception is BadHttpRequestException && ((BadHttpRequestException)w.Exception).StatusCode == StatusCodes.Status400BadRequest); } else { Assert.DoesNotContain(sink.Writes, w => w.EventId.Id == 17 && w.LogLevel == LogLevel.Information && w.Exception is BadHttpRequestException && ((BadHttpRequestException)w.Exception).StatusCode == StatusCodes.Status400BadRequest); } } // https://github.com/aspnet/KestrelHttpServer/pull/1111/files#r80584475 explains the reason for this test. [Fact] public async Task NoErrorResponseSentWhenAppSwallowsBadRequestException() { BadHttpRequestException readException = null; var sink = new TestSink(); var logger = new TestLogger("TestLogger", sink, enabled: true); using (var server = new TestServer(async httpContext => { readException = await Assert.ThrowsAsync<BadHttpRequestException>( async () => await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1)); }, new TestServiceContext() { Log = new KestrelTrace(logger) })) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Transfer-Encoding: chunked", "", "gg"); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } Assert.NotNull(readException); Assert.Contains(sink.Writes, w => w.EventId.Id == 17 && w.LogLevel == LogLevel.Information && w.Exception is BadHttpRequestException && ((BadHttpRequestException)w.Exception).StatusCode == StatusCodes.Status400BadRequest); } [Fact] public async Task TransferEncodingChunkedSetOnUnknownLengthHttp11Response() { using (var server = new TestServer(async httpContext => { await httpContext.Response.WriteAsync("hello, "); await httpContext.Response.WriteAsync("world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Transfer-Encoding: chunked", "", "7", "hello, ", "5", "world", "0", "", ""); } } } [Theory] [InlineData(StatusCodes.Status204NoContent)] [InlineData(StatusCodes.Status205ResetContent)] [InlineData(StatusCodes.Status304NotModified)] public async Task TransferEncodingChunkedNotSetOnNonBodyResponse(int statusCode) { using (var server = new TestServer(httpContext => { httpContext.Response.StatusCode = statusCode; return Task.CompletedTask; })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 {Encoding.ASCII.GetString(ReasonPhrases.ToStatusBytes(statusCode))}", $"Date: {server.Context.DateHeaderValue}", "", ""); } } } [Fact] public async Task TransferEncodingNotSetOnHeadResponse() { using (var server = new TestServer(httpContext => { return Task.CompletedTask; })) { using (var connection = server.CreateConnection()) { await connection.Send( "HEAD / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "", ""); } } } [Fact] public async Task ResponseBodyNotWrittenOnHeadResponseAndLoggedOnlyOnce() { const string response = "hello, world"; var logTcs = new TaskCompletionSource<object>(); var mockKestrelTrace = new Mock<IKestrelTrace>(); mockKestrelTrace .Setup(trace => trace.ConnectionHeadResponseBodyWrite(It.IsAny<string>(), response.Length)) .Callback<string, long>((connectionId, count) => logTcs.SetResult(null)); using (var server = new TestServer(async httpContext => { await httpContext.Response.WriteAsync(response); await httpContext.Response.Body.FlushAsync(); }, new TestServiceContext { Log = mockKestrelTrace.Object })) { using (var connection = server.CreateConnection()) { await connection.Send( "HEAD / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "", ""); // Wait for message to be logged before disposing the socket. // Disposing the socket will abort the connection and HttpProtocol._requestAborted // might be 1 by the time ProduceEnd() gets called and the message is logged. await logTcs.Task.TimeoutAfter(TestConstants.DefaultTimeout); } } mockKestrelTrace.Verify(kestrelTrace => kestrelTrace.ConnectionHeadResponseBodyWrite(It.IsAny<string>(), response.Length), Times.Once); } [Fact] public async Task ThrowsAndClosesConnectionWhenAppWritesMoreThanContentLengthWrite() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger), ServerOptions = { AllowSynchronousIO = true } }; using (var server = new TestServer(httpContext => { httpContext.Response.ContentLength = 11; httpContext.Response.Body.Write(Encoding.ASCII.GetBytes("hello,"), 0, 6); httpContext.Response.Body.Write(Encoding.ASCII.GetBytes(" world"), 0, 6); return Task.CompletedTask; }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 11", "", "hello,"); await connection.WaitForConnectionClose().TimeoutAfter(TestConstants.DefaultTimeout); } } var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error); Assert.Equal( $"Response Content-Length mismatch: too many bytes written (12 of 11).", logMessage.Exception.Message); } [Fact] public async Task ThrowsAndClosesConnectionWhenAppWritesMoreThanContentLengthWriteAsync() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) }; using (var server = new TestServer(async httpContext => { httpContext.Response.ContentLength = 11; await httpContext.Response.WriteAsync("hello,"); await httpContext.Response.WriteAsync(" world"); }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 11", "", "hello,"); } } var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error); Assert.Equal( $"Response Content-Length mismatch: too many bytes written (12 of 11).", logMessage.Exception.Message); } [Fact] public async Task InternalServerErrorAndConnectionClosedOnWriteWithMoreThanContentLengthAndResponseNotStarted() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger), ServerOptions = { AllowSynchronousIO = true } }; using (var server = new TestServer(httpContext => { var response = Encoding.ASCII.GetBytes("hello, world"); httpContext.Response.ContentLength = 5; httpContext.Response.Body.Write(response, 0, response.Length); return Task.CompletedTask; }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( $"HTTP/1.1 500 Internal Server Error", "Connection: close", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error); Assert.Equal( $"Response Content-Length mismatch: too many bytes written (12 of 5).", logMessage.Exception.Message); } [Fact] public async Task InternalServerErrorAndConnectionClosedOnWriteAsyncWithMoreThanContentLengthAndResponseNotStarted() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) }; using (var server = new TestServer(httpContext => { var response = Encoding.ASCII.GetBytes("hello, world"); httpContext.Response.ContentLength = 5; return httpContext.Response.Body.WriteAsync(response, 0, response.Length); }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( $"HTTP/1.1 500 Internal Server Error", "Connection: close", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } var logMessage = Assert.Single(testLogger.Messages, message => message.LogLevel == LogLevel.Error); Assert.Equal( $"Response Content-Length mismatch: too many bytes written (12 of 5).", logMessage.Exception.Message); } [Fact] public async Task WhenAppWritesLessThanContentLengthErrorLogged() { var logTcs = new TaskCompletionSource<object>(); var mockTrace = new Mock<IKestrelTrace>(); mockTrace .Setup(trace => trace.ApplicationError(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<InvalidOperationException>())) .Callback<string, string, Exception>((connectionId, requestId, ex) => { logTcs.SetResult(null); }); using (var server = new TestServer(async httpContext => { httpContext.Response.ContentLength = 13; await httpContext.Response.WriteAsync("hello, world"); }, new TestServiceContext { Log = mockTrace.Object })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); // Don't use ReceiveEnd here, otherwise the FIN might // abort the request before the server checks the // response content length, in which case the check // will be skipped. await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 13", "", "hello, world"); // Wait for error message to be logged. await logTcs.Task.TimeoutAfter(TestConstants.DefaultTimeout); // The server should close the connection in this situation. await connection.WaitForConnectionClose().TimeoutAfter(TestConstants.DefaultTimeout); } } mockTrace.Verify(trace => trace.ApplicationError( It.IsAny<string>(), It.IsAny<string>(), It.Is<InvalidOperationException>(ex => ex.Message.Equals($"Response Content-Length mismatch: too few bytes written (12 of 13).", StringComparison.Ordinal)))); } [Fact] public async Task WhenAppWritesLessThanContentLengthButRequestIsAbortedErrorNotLogged() { var requestAborted = new SemaphoreSlim(0); var mockTrace = new Mock<IKestrelTrace>(); using (var server = new TestServer(async httpContext => { httpContext.RequestAborted.Register(() => { requestAborted.Release(2); }); httpContext.Response.ContentLength = 12; await httpContext.Response.WriteAsync("hello,"); // Wait until the request is aborted so we know HttpProtocol will skip the response content length check. Assert.True(await requestAborted.WaitAsync(TestConstants.DefaultTimeout)); }, new TestServiceContext { Log = mockTrace.Object })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 12", "", "hello,"); } // Verify the request was really aborted. A timeout in // the app would cause a server error and skip the content length // check altogether, making the test pass for the wrong reason. // Await before disposing the server to prevent races between the // abort triggered by the connection RST and the abort called when // disposing the server. Assert.True(await requestAborted.WaitAsync(TestConstants.DefaultTimeout)); } // With the server disposed we know all connections were drained and all messages were logged. mockTrace.Verify(trace => trace.ApplicationError(It.IsAny<string>(), It.IsAny<string>(), It.IsAny<InvalidOperationException>()), Times.Never); } [Fact] public async Task WhenAppSetsContentLengthButDoesNotWriteBody500ResponseSentAndConnectionDoesNotClose() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) }; using (var server = new TestServer(httpContext => { httpContext.Response.ContentLength = 5; return Task.CompletedTask; }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 500 Internal Server Error", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", "HTTP/1.1 500 Internal Server Error", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } var error = testLogger.Messages.Where(message => message.LogLevel == LogLevel.Error); Assert.Equal(2, error.Count()); Assert.All(error, message => message.Equals("Response Content-Length mismatch: too few bytes written (0 of 5).")); } [Theory] [InlineData(false)] [InlineData(true)] public async Task WhenAppSetsContentLengthToZeroAndDoesNotWriteNoErrorIsThrown(bool flushResponse) { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) }; using (var server = new TestServer(async httpContext => { httpContext.Response.ContentLength = 0; if (flushResponse) { await httpContext.Response.Body.FlushAsync(); } }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } Assert.Equal(0, testLogger.ApplicationErrorsLogged); } // https://tools.ietf.org/html/rfc7230#section-3.3.3 // If a message is received with both a Transfer-Encoding and a // Content-Length header field, the Transfer-Encoding overrides the // Content-Length. [Fact] public async Task WhenAppSetsTransferEncodingAndContentLengthWritingLessIsNotAnError() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) }; using (var server = new TestServer(async httpContext => { httpContext.Response.Headers["Transfer-Encoding"] = "chunked"; httpContext.Response.ContentLength = 13; await httpContext.Response.WriteAsync("hello, world"); }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 13", "Transfer-Encoding: chunked", "", "hello, world"); } } Assert.Equal(0, testLogger.ApplicationErrorsLogged); } // https://tools.ietf.org/html/rfc7230#section-3.3.3 // If a message is received with both a Transfer-Encoding and a // Content-Length header field, the Transfer-Encoding overrides the // Content-Length. [Fact] public async Task WhenAppSetsTransferEncodingAndContentLengthWritingMoreIsNotAnError() { var testLogger = new TestApplicationErrorLogger(); var serviceContext = new TestServiceContext { Log = new TestKestrelTrace(testLogger) }; using (var server = new TestServer(async httpContext => { httpContext.Response.Headers["Transfer-Encoding"] = "chunked"; httpContext.Response.ContentLength = 11; await httpContext.Response.WriteAsync("hello, world"); }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( $"HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 11", "Transfer-Encoding: chunked", "", "hello, world"); } } Assert.Equal(0, testLogger.ApplicationErrorsLogged); } [Fact] public async Task HeadResponseCanContainContentLengthHeader() { using (var server = new TestServer(httpContext => { httpContext.Response.ContentLength = 42; return Task.CompletedTask; })) { using (var connection = server.CreateConnection()) { await connection.Send( "HEAD / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 42", "", ""); } } } [Fact] public async Task HeadResponseBodyNotWrittenWithAsyncWrite() { var flushed = new SemaphoreSlim(0, 1); using (var server = new TestServer(async httpContext => { httpContext.Response.ContentLength = 12; await httpContext.Response.WriteAsync("hello, world"); await flushed.WaitAsync(); })) { using (var connection = server.CreateConnection()) { await connection.Send( "HEAD / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 12", "", ""); flushed.Release(); } } } [Fact] public async Task HeadResponseBodyNotWrittenWithSyncWrite() { var flushed = new SemaphoreSlim(0, 1); var serviceContext = new TestServiceContext { ServerOptions = { AllowSynchronousIO = true } }; using (var server = new TestServer(httpContext => { httpContext.Response.ContentLength = 12; httpContext.Response.Body.Write(Encoding.ASCII.GetBytes("hello, world"), 0, 12); flushed.Wait(); return Task.CompletedTask; }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "HEAD / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 12", "", ""); flushed.Release(); } } } [Fact] public async Task ZeroLengthWritesFlushHeaders() { var flushed = new SemaphoreSlim(0, 1); using (var server = new TestServer(async httpContext => { httpContext.Response.ContentLength = 12; await httpContext.Response.WriteAsync(""); flushed.Wait(); await httpContext.Response.WriteAsync("hello, world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 12", "", ""); flushed.Release(); await connection.ReceiveEnd("hello, world"); } } } [Fact] public async Task WriteAfterConnectionCloseNoops() { var connectionClosed = new ManualResetEventSlim(); var requestStarted = new ManualResetEventSlim(); var tcs = new TaskCompletionSource<object>(); using (var server = new TestServer(async httpContext => { try { requestStarted.Set(); connectionClosed.Wait(); httpContext.Response.ContentLength = 12; await httpContext.Response.WriteAsync("hello, world"); tcs.TrySetResult(null); } catch (Exception ex) { tcs.TrySetException(ex); } })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); requestStarted.Wait(); connection.Shutdown(SocketShutdown.Send); await connection.WaitForConnectionClose().TimeoutAfter(TestConstants.DefaultTimeout); } connectionClosed.Set(); await tcs.Task.TimeoutAfter(TestConstants.DefaultTimeout); } } [Fact] public async Task AppCanWriteOwnBadRequestResponse() { var expectedResponse = string.Empty; var responseWritten = new SemaphoreSlim(0); using (var server = new TestServer(async httpContext => { try { await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1); } catch (BadHttpRequestException ex) { expectedResponse = ex.Message; httpContext.Response.StatusCode = StatusCodes.Status400BadRequest; httpContext.Response.ContentLength = ex.Message.Length; await httpContext.Response.WriteAsync(ex.Message); responseWritten.Release(); } })) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Transfer-Encoding: chunked", "", "gg"); await responseWritten.WaitAsync().TimeoutAfter(TestConstants.DefaultTimeout); await connection.ReceiveEnd( "HTTP/1.1 400 Bad Request", $"Date: {server.Context.DateHeaderValue}", $"Content-Length: {expectedResponse.Length}", "", expectedResponse); } } } [Theory] [InlineData("gzip")] [InlineData("chunked, gzip")] public async Task ConnectionClosedWhenChunkedIsNotFinalTransferCoding(string responseTransferEncoding) { using (var server = new TestServer(async httpContext => { httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding; await httpContext.Response.WriteAsync("hello, world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", "Connection: close", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: {responseTransferEncoding}", "", "hello, world"); } using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.0", "Connection: keep-alive", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", "Connection: close", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: {responseTransferEncoding}", "", "hello, world"); } } } [Theory] [InlineData("gzip")] [InlineData("chunked, gzip")] public async Task ConnectionClosedWhenChunkedIsNotFinalTransferCodingEvenIfConnectionKeepAliveSetInResponse(string responseTransferEncoding) { using (var server = new TestServer(async httpContext => { httpContext.Response.Headers["Connection"] = "keep-alive"; httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding; await httpContext.Response.WriteAsync("hello, world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", "Connection: keep-alive", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: {responseTransferEncoding}", "", "hello, world"); } using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.0", "Connection: keep-alive", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", "Connection: keep-alive", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: {responseTransferEncoding}", "", "hello, world"); } } } [Theory] [InlineData("chunked")] [InlineData("gzip, chunked")] public async Task ConnectionKeptAliveWhenChunkedIsFinalTransferCoding(string responseTransferEncoding) { using (var server = new TestServer(async httpContext => { httpContext.Response.Headers["Transfer-Encoding"] = responseTransferEncoding; // App would have to chunk manually, but here we don't care await httpContext.Response.WriteAsync("hello, world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: {responseTransferEncoding}", "", "hello, world"); // Make sure connection was kept open await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: {responseTransferEncoding}", "", "hello, world"); } } } [Fact] public async Task FirstWriteVerifiedAfterOnStarting() { var serviceContext = new TestServiceContext { ServerOptions = { AllowSynchronousIO = true } }; using (var server = new TestServer(httpContext => { httpContext.Response.OnStarting(() => { // Change response to chunked httpContext.Response.ContentLength = null; return Task.CompletedTask; }); var response = Encoding.ASCII.GetBytes("hello, world"); httpContext.Response.ContentLength = response.Length - 1; // If OnStarting is not run before verifying writes, an error response will be sent. httpContext.Response.Body.Write(response, 0, response.Length); return Task.CompletedTask; }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: chunked", "", "c", "hello, world", "0", "", ""); } } } [Fact] public async Task SubsequentWriteVerifiedAfterOnStarting() { var serviceContext = new TestServiceContext { ServerOptions = { AllowSynchronousIO = true } }; using (var server = new TestServer(httpContext => { httpContext.Response.OnStarting(() => { // Change response to chunked httpContext.Response.ContentLength = null; return Task.CompletedTask; }); var response = Encoding.ASCII.GetBytes("hello, world"); httpContext.Response.ContentLength = response.Length - 1; // If OnStarting is not run before verifying writes, an error response will be sent. httpContext.Response.Body.Write(response, 0, response.Length / 2); httpContext.Response.Body.Write(response, response.Length / 2, response.Length - response.Length / 2); return Task.CompletedTask; }, serviceContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: chunked", "", "6", "hello,", "6", " world", "0", "", ""); } } } [Fact] public async Task FirstWriteAsyncVerifiedAfterOnStarting() { using (var server = new TestServer(httpContext => { httpContext.Response.OnStarting(() => { // Change response to chunked httpContext.Response.ContentLength = null; return Task.CompletedTask; }); var response = Encoding.ASCII.GetBytes("hello, world"); httpContext.Response.ContentLength = response.Length - 1; // If OnStarting is not run before verifying writes, an error response will be sent. return httpContext.Response.Body.WriteAsync(response, 0, response.Length); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: chunked", "", "c", "hello, world", "0", "", ""); } } } [Fact] public async Task SubsequentWriteAsyncVerifiedAfterOnStarting() { using (var server = new TestServer(async httpContext => { httpContext.Response.OnStarting(() => { // Change response to chunked httpContext.Response.ContentLength = null; return Task.CompletedTask; }); var response = Encoding.ASCII.GetBytes("hello, world"); httpContext.Response.ContentLength = response.Length - 1; // If OnStarting is not run before verifying writes, an error response will be sent. await httpContext.Response.Body.WriteAsync(response, 0, response.Length / 2); await httpContext.Response.Body.WriteAsync(response, response.Length / 2, response.Length - response.Length / 2); })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: chunked", "", "6", "hello,", "6", " world", "0", "", ""); } } } [Fact] public async Task WhenResponseAlreadyStartedResponseEndedBeforeConsumingRequestBody() { using (var server = new TestServer(async httpContext => { await httpContext.Response.WriteAsync("hello, world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Content-Length: 1", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: chunked", "", "c", "hello, world", ""); // If the expected behavior is regressed, this will hang because the // server will try to consume the request body before flushing the chunked // terminator. await connection.Receive( "0", "", ""); } } } [Fact] public async Task WhenResponseNotStartedResponseEndedBeforeConsumingRequestBody() { var sink = new TestSink(); var logger = new TestLogger("TestLogger", sink, enabled: true); using (var server = new TestServer(httpContext => Task.CompletedTask, new TestServiceContext() { Log = new KestrelTrace(logger) })) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Transfer-Encoding: chunked", "", "gg"); // This will receive a success response because the server flushed the response // before reading the malformed chunk header in the request, but then it will close // the connection. await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); } } Assert.Contains(sink.Writes, w => w.EventId.Id == 17 && w.LogLevel == LogLevel.Information && w.Exception is BadHttpRequestException && ((BadHttpRequestException)w.Exception).StatusCode == StatusCodes.Status400BadRequest); } [Fact] public async Task Sending100ContinueDoesNotStartResponse() { var sink = new TestSink(); var logger = new TestLogger("TestLogger", sink, enabled: true); using (var server = new TestServer(httpContext => { return httpContext.Request.Body.ReadAsync(new byte[1], 0, 1); }, new TestServiceContext() { Log = new KestrelTrace(logger) })) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Transfer-Encoding: chunked", "Expect: 100-continue", "", ""); await connection.Receive( "HTTP/1.1 100 Continue", "", ""); // Let the app finish await connection.Send( "1", "a", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 0", "", ""); // This will be consumed by Http1Connection when it attempts to // consume the request body and will cause an error. await connection.Send( "gg"); // If 100 Continue sets HttpProtocol.HasResponseStarted to true, // a success response will be produced before the server sees the // bad chunk header above, making this test fail. await connection.ReceiveEnd(); } } Assert.Contains(sink.Writes, w => w.EventId.Id == 17 && w.LogLevel == LogLevel.Information && w.Exception is BadHttpRequestException && ((BadHttpRequestException)w.Exception).StatusCode == StatusCodes.Status400BadRequest); } [Fact] public async Task Sending100ContinueAndResponseSendsChunkTerminatorBeforeConsumingRequestBody() { using (var server = new TestServer(async httpContext => { await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1); await httpContext.Response.WriteAsync("hello, world"); })) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Content-Length: 2", "Expect: 100-continue", "", ""); await connection.Receive( "HTTP/1.1 100 Continue", "", ""); await connection.Send( "a"); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", $"Transfer-Encoding: chunked", "", "c", "hello, world", ""); // If the expected behavior is regressed, this will hang because the // server will try to consume the request body before flushing the chunked // terminator. await connection.Receive( "0", "", ""); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task Http11ResponseSentToHttp10Request(ListenOptions listenOptions) { var serviceContext = new TestServiceContext(); using (var server = new TestServer(TestApp.EchoApp, serviceContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.0", "Content-Length: 11", "", "Hello World"); await connection.ReceiveEnd( "HTTP/1.1 200 OK", "Connection: close", $"Date: {serviceContext.DateHeaderValue}", "", "Hello World"); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ZeroContentLengthSetAutomaticallyAfterNoWrites(ListenOptions listenOptions) { var testContext = new TestServiceContext(); using (var server = new TestServer(TestApp.EmptyApp, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", "GET / HTTP/1.0", "Connection: keep-alive", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", "HTTP/1.1 200 OK", "Connection: keep-alive", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ZeroContentLengthSetAutomaticallyForNonKeepAliveRequests(ListenOptions listenOptions) { var testContext = new TestServiceContext(); using (var server = new TestServer(async httpContext => { Assert.Equal(0, await httpContext.Request.Body.ReadAsync(new byte[1], 0, 1).TimeoutAfter(TestConstants.DefaultTimeout)); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "Connection: close", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", "Connection: close", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.0", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", "Connection: close", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ZeroContentLengthNotSetAutomaticallyForHeadRequests(ListenOptions listenOptions) { var testContext = new TestServiceContext(); using (var server = new TestServer(TestApp.EmptyApp, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "HEAD / HTTP/1.1", "Host:", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "", ""); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ZeroContentLengthNotSetAutomaticallyForCertainStatusCodes(ListenOptions listenOptions) { var testContext = new TestServiceContext(); using (var server = new TestServer(async httpContext => { var request = httpContext.Request; var response = httpContext.Response; using (var reader = new StreamReader(request.Body, Encoding.ASCII)) { var statusString = await reader.ReadLineAsync(); response.StatusCode = int.Parse(statusString); } }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Content-Length: 3", "", "204POST / HTTP/1.1", "Host:", "Content-Length: 3", "", "205POST / HTTP/1.1", "Host:", "Content-Length: 3", "", "304POST / HTTP/1.1", "Host:", "Content-Length: 3", "", "200"); await connection.ReceiveEnd( "HTTP/1.1 204 No Content", $"Date: {testContext.DateHeaderValue}", "", "HTTP/1.1 205 Reset Content", $"Date: {testContext.DateHeaderValue}", "", "HTTP/1.1 304 Not Modified", $"Date: {testContext.DateHeaderValue}", "", "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ConnectionClosedAfter101Response(ListenOptions listenOptions) { var testContext = new TestServiceContext(); using (var server = new TestServer(async httpContext => { var request = httpContext.Request; var stream = await httpContext.Features.Get<IHttpUpgradeFeature>().UpgradeAsync(); var response = Encoding.ASCII.GetBytes("hello, world"); await stream.WriteAsync(response, 0, response.Length); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "Connection: Upgrade", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 101 Switching Protocols", "Connection: Upgrade", $"Date: {testContext.DateHeaderValue}", "", "hello, world"); } using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.0", "Connection: keep-alive, Upgrade", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 101 Switching Protocols", "Connection: Upgrade", $"Date: {testContext.DateHeaderValue}", "", "hello, world"); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ThrowingResultsIn500Response(ListenOptions listenOptions) { var testContext = new TestServiceContext(); bool onStartingCalled = false; var testLogger = new TestApplicationErrorLogger(); testContext.Log = new KestrelTrace(testLogger); using (var server = new TestServer(httpContext => { var response = httpContext.Response; response.OnStarting(_ => { onStartingCalled = true; return Task.CompletedTask; }, null); // Anything added to the ResponseHeaders dictionary is ignored response.Headers["Content-Length"] = "11"; throw new Exception(); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", "GET / HTTP/1.1", "Host:", "Connection: close", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 500 Internal Server Error", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", "HTTP/1.1 500 Internal Server Error", "Connection: close", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } } Assert.False(onStartingCalled); Assert.Equal(2, testLogger.ApplicationErrorsLogged); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ThrowingInOnStartingResultsInFailedWritesAnd500Response(ListenOptions listenOptions) { var callback1Called = false; var callback2CallCount = 0; var testContext = new TestServiceContext(); var testLogger = new TestApplicationErrorLogger(); testContext.Log = new KestrelTrace(testLogger); using (var server = new TestServer(async httpContext => { var onStartingException = new Exception(); var response = httpContext.Response; response.OnStarting(_ => { callback1Called = true; throw onStartingException; }, null); response.OnStarting(_ => { callback2CallCount++; throw onStartingException; }, null); var writeException = await Assert.ThrowsAsync<ObjectDisposedException>(async () => await response.Body.FlushAsync()); Assert.Same(onStartingException, writeException.InnerException); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveEnd( "HTTP/1.1 500 Internal Server Error", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", "HTTP/1.1 500 Internal Server Error", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } } // The first registered OnStarting callback should have been called, // since they are called LIFO order and the other one failed. Assert.False(callback1Called); Assert.Equal(2, callback2CallCount); Assert.Equal(2, testLogger.ApplicationErrorsLogged); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ThrowingInOnCompletedIsLoggedAndClosesConnection(ListenOptions listenOptions) { var testContext = new TestServiceContext(); var onCompletedCalled1 = false; var onCompletedCalled2 = false; var testLogger = new TestApplicationErrorLogger(); testContext.Log = new KestrelTrace(testLogger); using (var server = new TestServer(async httpContext => { var response = httpContext.Response; response.OnCompleted(_ => { onCompletedCalled1 = true; throw new Exception(); }, null); response.OnCompleted(_ => { onCompletedCalled2 = true; throw new Exception(); }, null); response.Headers["Content-Length"] = new[] { "11" }; await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 11", "", "Hello World"); } } // All OnCompleted callbacks should be called even if they throw. Assert.Equal(2, testLogger.ApplicationErrorsLogged); Assert.True(onCompletedCalled1); Assert.True(onCompletedCalled2); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ThrowingAfterWritingKillsConnection(ListenOptions listenOptions) { var testContext = new TestServiceContext(); bool onStartingCalled = false; var testLogger = new TestApplicationErrorLogger(); testContext.Log = new KestrelTrace(testLogger); using (var server = new TestServer(async httpContext => { var response = httpContext.Response; response.OnStarting(_ => { onStartingCalled = true; return Task.FromResult<object>(null); }, null); response.Headers["Content-Length"] = new[] { "11" }; await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11); throw new Exception(); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 11", "", "Hello World"); } } Assert.True(onStartingCalled); Assert.Equal(1, testLogger.ApplicationErrorsLogged); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ThrowingAfterPartialWriteKillsConnection(ListenOptions listenOptions) { var testContext = new TestServiceContext(); bool onStartingCalled = false; var testLogger = new TestApplicationErrorLogger(); testContext.Log = new KestrelTrace(testLogger); using (var server = new TestServer(async httpContext => { var response = httpContext.Response; response.OnStarting(_ => { onStartingCalled = true; return Task.FromResult<object>(null); }, null); response.Headers["Content-Length"] = new[] { "11" }; await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello"), 0, 5); throw new Exception(); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 11", "", "Hello"); } } Assert.True(onStartingCalled); Assert.Equal(1, testLogger.ApplicationErrorsLogged); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ThrowsOnWriteWithRequestAbortedTokenAfterRequestIsAborted(ListenOptions listenOptions) { // This should match _maxBytesPreCompleted in SocketOutput var maxBytesPreCompleted = 65536; // Ensure string is long enough to disable write-behind buffering var largeString = new string('a', maxBytesPreCompleted + 1); var writeTcs = new TaskCompletionSource<object>(); var requestAbortedWh = new ManualResetEventSlim(); var requestStartWh = new ManualResetEventSlim(); using (var server = new TestServer(async httpContext => { requestStartWh.Set(); var response = httpContext.Response; var request = httpContext.Request; var lifetime = httpContext.Features.Get<IHttpRequestLifetimeFeature>(); lifetime.RequestAborted.Register(() => requestAbortedWh.Set()); Assert.True(requestAbortedWh.Wait(TestConstants.DefaultTimeout)); try { await response.WriteAsync(largeString, lifetime.RequestAborted); } catch (Exception ex) { writeTcs.SetException(ex); throw; } writeTcs.SetException(new Exception("This shouldn't be reached.")); }, new TestServiceContext(), listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.1", "Host:", "Content-Length: 0", "", ""); Assert.True(requestStartWh.Wait(TestConstants.DefaultTimeout)); } // Write failed - can throw TaskCanceledException or OperationCanceledException, // dependending on how far the canceled write goes. await Assert.ThrowsAnyAsync<OperationCanceledException>(async () => await writeTcs.Task).TimeoutAfter(TestConstants.DefaultTimeout); // RequestAborted tripped Assert.True(requestAbortedWh.Wait(TestConstants.DefaultTimeout)); } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task NoErrorsLoggedWhenServerEndsConnectionBeforeClient(ListenOptions listenOptions) { var testContext = new TestServiceContext(); var testLogger = new TestApplicationErrorLogger(); testContext.Log = new KestrelTrace(testLogger); using (var server = new TestServer(async httpContext => { var response = httpContext.Response; response.Headers["Content-Length"] = new[] { "11" }; await response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello World"), 0, 11); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.0", "", ""); await connection.ReceiveForcedEnd( "HTTP/1.1 200 OK", "Connection: close", $"Date: {testContext.DateHeaderValue}", "Content-Length: 11", "", "Hello World"); } } Assert.Equal(0, testLogger.TotalErrorsLogged); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task NoResponseSentWhenConnectionIsClosedByServerBeforeClientFinishesSendingRequest(ListenOptions listenOptions) { var testContext = new TestServiceContext(); using (var server = new TestServer(httpContext => { httpContext.Abort(); return Task.CompletedTask; }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "POST / HTTP/1.0", "Content-Length: 1", "", ""); await connection.ReceiveForcedEnd(); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task ResponseHeadersAreResetOnEachRequest(ListenOptions listenOptions) { var testContext = new TestServiceContext(); IHeaderDictionary originalResponseHeaders = null; var firstRequest = true; using (var server = new TestServer(httpContext => { var responseFeature = httpContext.Features.Get<IHttpResponseFeature>(); if (firstRequest) { originalResponseHeaders = responseFeature.Headers; responseFeature.Headers = new HttpResponseHeaders(); firstRequest = false; } else { Assert.Same(originalResponseHeaders, responseFeature.Headers); } return Task.CompletedTask; }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", "Content-Length: 0", "", ""); } } } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task OnStartingCallbacksAreCalledInLastInFirstOutOrder(ListenOptions listenOptions) { const string response = "hello, world"; var testContext = new TestServiceContext(); var callOrder = new Stack<int>(); var onStartingTcs = new TaskCompletionSource<object>(); using (var server = new TestServer(async context => { context.Response.OnStarting(_ => { callOrder.Push(1); onStartingTcs.SetResult(null); return Task.CompletedTask; }, null); context.Response.OnStarting(_ => { callOrder.Push(2); return Task.CompletedTask; }, null); context.Response.ContentLength = response.Length; await context.Response.WriteAsync(response); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", $"Content-Length: {response.Length}", "", "hello, world"); // Wait for all callbacks to be called. await onStartingTcs.Task.TimeoutAfter(TestConstants.DefaultTimeout); } } Assert.Equal(1, callOrder.Pop()); Assert.Equal(2, callOrder.Pop()); } [Theory] [MemberData(nameof(ConnectionAdapterData))] public async Task OnCompletedCallbacksAreCalledInLastInFirstOutOrder(ListenOptions listenOptions) { const string response = "hello, world"; var testContext = new TestServiceContext(); var callOrder = new Stack<int>(); var onCompletedTcs = new TaskCompletionSource<object>(); using (var server = new TestServer(async context => { context.Response.OnCompleted(_ => { callOrder.Push(1); onCompletedTcs.SetResult(null); return Task.CompletedTask; }, null); context.Response.OnCompleted(_ => { callOrder.Push(2); return Task.CompletedTask; }, null); context.Response.ContentLength = response.Length; await context.Response.WriteAsync(response); }, testContext, listenOptions)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.ReceiveEnd( "HTTP/1.1 200 OK", $"Date: {testContext.DateHeaderValue}", $"Content-Length: {response.Length}", "", "hello, world"); // Wait for all callbacks to be called. await onCompletedTcs.Task.TimeoutAfter(TestConstants.DefaultTimeout); } } Assert.Equal(1, callOrder.Pop()); Assert.Equal(2, callOrder.Pop()); } [Fact] public async Task SynchronousWritesAllowedByDefault() { var firstRequest = true; using (var server = new TestServer(async context => { var bodyControlFeature = context.Features.Get<IHttpBodyControlFeature>(); Assert.True(bodyControlFeature.AllowSynchronousIO); context.Response.ContentLength = 6; if (firstRequest) { context.Response.Body.Write(Encoding.ASCII.GetBytes("Hello1"), 0, 6); firstRequest = false; } else { bodyControlFeature.AllowSynchronousIO = false; // Synchronous writes now throw. var ioEx = Assert.Throws<InvalidOperationException>(() => context.Response.Body.Write(Encoding.ASCII.GetBytes("What!?"), 0, 6)); Assert.Equal(CoreStrings.SynchronousWritesDisallowed, ioEx.Message); await context.Response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello2"), 0, 6); } })) { using (var connection = server.CreateConnection()) { await connection.SendEmptyGet(); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 6", "", "Hello1"); await connection.SendEmptyGet(); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 6", "", "Hello2"); } } } [Fact] public async Task SynchronousWritesCanBeDisallowedGlobally() { var testContext = new TestServiceContext { ServerOptions = { AllowSynchronousIO = false } }; using (var server = new TestServer(context => { var bodyControlFeature = context.Features.Get<IHttpBodyControlFeature>(); Assert.False(bodyControlFeature.AllowSynchronousIO); context.Response.ContentLength = 6; // Synchronous writes now throw. var ioEx = Assert.Throws<InvalidOperationException>(() => context.Response.Body.Write(Encoding.ASCII.GetBytes("What!?"), 0, 6)); Assert.Equal(CoreStrings.SynchronousWritesDisallowed, ioEx.Message); return context.Response.Body.WriteAsync(Encoding.ASCII.GetBytes("Hello!"), 0, 6); }, testContext)) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 200 OK", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 6", "", "Hello!"); } } } [Fact] public void ConnectionClosedWhenResponseDoesNotSatisfyMinimumDataRate() { using (StartLog(out var loggerFactory, "ConnClosedWhenRespDoesNotSatisfyMin")) { var logger = loggerFactory.CreateLogger($"{ typeof(ResponseTests).FullName}.{ nameof(ConnectionClosedWhenResponseDoesNotSatisfyMinimumDataRate)}"); var chunkSize = 64 * 1024; var chunks = 128; var responseSize = chunks * chunkSize; var requestAborted = new ManualResetEventSlim(); var messageLogged = new ManualResetEventSlim(); var mockKestrelTrace = new Mock<KestrelTrace>(loggerFactory.CreateLogger("Microsoft.AspNetCore.Server.Kestrel")) { CallBase = true }; mockKestrelTrace .Setup(trace => trace.ResponseMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>())) .Callback(() => messageLogged.Set()); var testContext = new TestServiceContext { LoggerFactory = loggerFactory, Log = mockKestrelTrace.Object, SystemClock = new SystemClock(), ServerOptions = { Limits = { MinResponseDataRate = new MinDataRate(bytesPerSecond: double.MaxValue, gracePeriod: TimeSpan.FromSeconds(2)) } } }; var listenOptions = new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)); listenOptions.ConnectionAdapters.Add(new LoggingConnectionAdapter(loggerFactory.CreateLogger<LoggingConnectionAdapter>())); var appLogger = loggerFactory.CreateLogger("App"); async Task App(HttpContext context) { appLogger.LogInformation("Request received"); context.RequestAborted.Register(() => requestAborted.Set()); context.Response.ContentLength = responseSize; for (var i = 0; i < chunks; i++) { await context.Response.WriteAsync(new string('a', chunkSize), context.RequestAborted); appLogger.LogInformation("Wrote chunk of {chunkSize} bytes", chunkSize); } } using (var server = new TestServer(App, testContext, listenOptions)) { using (var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp)) { socket.ReceiveBufferSize = 1; socket.Connect(new IPEndPoint(IPAddress.Loopback, server.Port)); logger.LogInformation("Sending request"); socket.Send(Encoding.ASCII.GetBytes("GET / HTTP/1.1\r\nHost: \r\n\r\n")); logger.LogInformation("Sent request"); var sw = Stopwatch.StartNew(); logger.LogInformation("Waiting for connection to abort."); Assert.True(messageLogged.Wait(TimeSpan.FromSeconds(120)), "The expected message was not logged within the timeout period."); Assert.True(requestAborted.Wait(TimeSpan.FromSeconds(120)), "The request was not aborted within the timeout period."); sw.Stop(); logger.LogInformation("Connection was aborted after {totalMilliseconds}ms.", sw.ElapsedMilliseconds); var totalReceived = 0; var received = 0; try { var buffer = new byte[chunkSize]; do { received = socket.Receive(buffer); totalReceived += received; } while (received > 0 && totalReceived < responseSize); } catch (SocketException) { } catch (IOException) { // Socket.Receive could throw, and that is fine } // Since we expect writes to be cut off by the rate control, we should never see the entire response logger.LogInformation("Received {totalReceived} bytes", totalReceived); Assert.NotEqual(responseSize, totalReceived); } } } } [Fact] public async Task HttpsConnectionClosedWhenResponseDoesNotSatisfyMinimumDataRate() { const int chunkSize = 64 * 1024; const int chunks = 128; var certificate = new X509Certificate2(TestResources.TestCertificatePath, "testPassword"); var messageLogged = new ManualResetEventSlim(); var aborted = new ManualResetEventSlim(); var mockKestrelTrace = new Mock<IKestrelTrace>(); mockKestrelTrace .Setup(trace => trace.ResponseMininumDataRateNotSatisfied(It.IsAny<string>(), It.IsAny<string>())) .Callback(() => messageLogged.Set()); var testContext = new TestServiceContext { Log = mockKestrelTrace.Object, SystemClock = new SystemClock(), ServerOptions = { Limits = { MinResponseDataRate = new MinDataRate(bytesPerSecond: double.MaxValue, gracePeriod: TimeSpan.FromSeconds(2)) } } }; var listenOptions = new ListenOptions(new IPEndPoint(IPAddress.Loopback, 0)) { ConnectionAdapters = { new HttpsConnectionAdapter(new HttpsConnectionAdapterOptions { ServerCertificate = certificate }) } }; using (var server = new TestServer(async context => { context.RequestAborted.Register(() => { aborted.Set(); }); context.Response.ContentLength = chunks * chunkSize; for (var i = 0; i < chunks; i++) { await context.Response.WriteAsync(new string('a', chunkSize), context.RequestAborted); } }, testContext, listenOptions)) { using (var client = new TcpClient()) { await client.ConnectAsync(IPAddress.Loopback, server.Port); using (var sslStream = new SslStream(client.GetStream(), false, (sender, cert, chain, errors) => true, null)) { await sslStream.AuthenticateAsClientAsync("localhost", new X509CertificateCollection(), SslProtocols.Tls12 | SslProtocols.Tls11, false); var request = Encoding.ASCII.GetBytes("GET / HTTP/1.1\r\nHost:\r\n\r\n"); await sslStream.WriteAsync(request, 0, request.Length); Assert.True(aborted.Wait(TimeSpan.FromSeconds(60))); using (var reader = new StreamReader(sslStream, encoding: Encoding.ASCII, detectEncodingFromByteOrderMarks: false, bufferSize: 1024, leaveOpen: false)) { await reader.ReadToEndAsync().TimeoutAfter(TestConstants.DefaultTimeout); } Assert.True(messageLogged.Wait(TestConstants.DefaultTimeout)); } } } } [Fact] public async Task NonZeroContentLengthFor304StatusCodeIsAllowed() { using (var server = new TestServer(httpContext => { var response = httpContext.Response; response.StatusCode = StatusCodes.Status304NotModified; response.ContentLength = 42; return Task.CompletedTask; })) { using (var connection = server.CreateConnection()) { await connection.Send( "GET / HTTP/1.1", "Host:", "", ""); await connection.Receive( "HTTP/1.1 304 Not Modified", $"Date: {server.Context.DateHeaderValue}", "Content-Length: 42", "", ""); } } } public static TheoryData<string, StringValues, string> NullHeaderData { get { var dataset = new TheoryData<string, StringValues, string>(); // Unknown headers dataset.Add("NullString", (string)null, null); dataset.Add("EmptyString", "", ""); dataset.Add("NullStringArray", new string[] { null }, null); dataset.Add("EmptyStringArray", new string[] { "" }, ""); dataset.Add("MixedStringArray", new string[] { null, "" }, ""); // Known headers dataset.Add("Location", (string)null, null); dataset.Add("Location", "", ""); dataset.Add("Location", new string[] { null }, null); dataset.Add("Location", new string[] { "" }, ""); dataset.Add("Location", new string[] { null, "" }, ""); return dataset; } } } }
1
14,820
This test modification isn't correct. This task could run immediately and you wouldn't be able to tell if OnCompleted was called or not.
aspnet-KestrelHttpServer
.cs
@@ -20,11 +20,13 @@ export function setComponentProps(component, props, opts, context, mountAll) { if ((component.__ref = props.ref)) delete props.ref; if ((component.__key = props.key)) delete props.key; - if (!component.base || mountAll) { - if (component.componentWillMount) component.componentWillMount(); - } - else if (component.componentWillReceiveProps) { - component.componentWillReceiveProps(props, context); + if (typeof component.constructor.getDerivedStateFromProps === 'undefined') { + if (!component.base || mountAll) { + if (component.componentWillMount) component.componentWillMount(); + } + else if (component.componentWillReceiveProps) { + component.componentWillReceiveProps(props, context); + } } if (context && context!==component.context) {
1
import { SYNC_RENDER, NO_RENDER, FORCE_RENDER, ASYNC_RENDER, ATTR_KEY } from '../constants'; import options from '../options'; import { extend } from '../util'; import { enqueueRender } from '../render-queue'; import { getNodeProps } from './index'; import { diff, mounts, diffLevel, flushMounts, recollectNodeTree, removeChildren } from './diff'; import { createComponent, collectComponent } from './component-recycler'; import { removeNode } from '../dom/index'; /** Set a component's `props` (generally derived from JSX attributes). * @param {Object} props * @param {Object} [opts] * @param {boolean} [opts.renderSync=false] If `true` and {@link options.syncComponentUpdates} is `true`, triggers synchronous rendering. * @param {boolean} [opts.render=true] If `false`, no render will be triggered. */ export function setComponentProps(component, props, opts, context, mountAll) { if (component._disable) return; component._disable = true; if ((component.__ref = props.ref)) delete props.ref; if ((component.__key = props.key)) delete props.key; if (!component.base || mountAll) { if (component.componentWillMount) component.componentWillMount(); } else if (component.componentWillReceiveProps) { component.componentWillReceiveProps(props, context); } if (context && context!==component.context) { if (!component.prevContext) component.prevContext = component.context; component.context = context; } if (!component.prevProps) component.prevProps = component.props; component.props = props; component._disable = false; if (opts!==NO_RENDER) { if (opts===SYNC_RENDER || options.syncComponentUpdates!==false || !component.base) { renderComponent(component, SYNC_RENDER, mountAll); } else { enqueueRender(component); } } if (component.__ref) component.__ref(component); } /** Render a Component, triggering necessary lifecycle events and taking High-Order Components into account. * @param {Component} component * @param {Object} [opts] * @param {boolean} [opts.build=false] If `true`, component will build and store a DOM node if not already associated with one. * @private */ export function renderComponent(component, opts, mountAll, isChild) { if (component._disable) return; let props = component.props, state = component.state, context = component.context, previousProps = component.prevProps || props, previousState = component.prevState || state, previousContext = component.prevContext || context, isUpdate = component.base, nextBase = component.nextBase, initialBase = isUpdate || nextBase, initialChildComponent = component._component, skip = false, rendered, inst, cbase; // if updating if (isUpdate) { component.props = previousProps; component.state = previousState; component.context = previousContext; if (opts!==FORCE_RENDER && component.shouldComponentUpdate && component.shouldComponentUpdate(props, state, context) === false) { skip = true; } else if (component.componentWillUpdate) { component.componentWillUpdate(props, state, context); } component.props = props; component.state = state; component.context = context; } component.prevProps = component.prevState = component.prevContext = component.nextBase = null; component._dirty = false; if (!skip) { rendered = component.render(props, state, context); // context to pass to the child, can be updated via (grand-)parent component if (component.getChildContext) { context = extend(extend({}, context), component.getChildContext()); } let childComponent = rendered && rendered.nodeName, toUnmount, base; if (typeof childComponent==='function') { // set up high order component link let childProps = getNodeProps(rendered); inst = initialChildComponent; if (inst && inst.constructor===childComponent && childProps.key==inst.__key) { setComponentProps(inst, childProps, SYNC_RENDER, context, false); } else { toUnmount = inst; component._component = inst = createComponent(childComponent, childProps, context); inst.nextBase = inst.nextBase || nextBase; inst._parentComponent = component; setComponentProps(inst, childProps, NO_RENDER, context, false); renderComponent(inst, SYNC_RENDER, mountAll, true); } base = inst.base; } else { cbase = initialBase; // destroy high order component link toUnmount = initialChildComponent; if (toUnmount) { cbase = component._component = null; } if (initialBase || opts===SYNC_RENDER) { if (cbase) cbase._component = null; base = diff(cbase, rendered, context, mountAll || !isUpdate, initialBase && initialBase.parentNode, true); } } if (initialBase && base!==initialBase && inst!==initialChildComponent) { let baseParent = initialBase.parentNode; if (baseParent && base!==baseParent) { baseParent.replaceChild(base, initialBase); if (!toUnmount) { initialBase._component = null; recollectNodeTree(initialBase, false); } } } if (toUnmount) { unmountComponent(toUnmount); } component.base = base; if (base && !isChild) { let componentRef = component, t = component; while ((t=t._parentComponent)) { (componentRef = t).base = base; } base._component = componentRef; base._componentConstructor = componentRef.constructor; } } if (!isUpdate || mountAll) { mounts.unshift(component); } else if (!skip) { // Ensure that pending componentDidMount() hooks of child components // are called before the componentDidUpdate() hook in the parent. // Note: disabled as it causes duplicate hooks, see https://github.com/developit/preact/issues/750 // flushMounts(); if (component.componentDidUpdate) { component.componentDidUpdate(previousProps, previousState, previousContext); } if (options.afterUpdate) options.afterUpdate(component); } if (component._renderCallbacks!=null) { while (component._renderCallbacks.length) component._renderCallbacks.pop().call(component); } if (!diffLevel && !isChild) flushMounts(); } /** Apply the Component referenced by a VNode to the DOM. * @param {Element} dom The DOM node to mutate * @param {VNode} vnode A Component-referencing VNode * @returns {Element} dom The created/mutated element * @private */ export function buildComponentFromVNode(dom, vnode, context, mountAll) { let c = dom && dom._component, originalComponent = c, oldDom = dom, isDirectOwner = c && dom._componentConstructor===vnode.nodeName, isOwner = isDirectOwner, props = getNodeProps(vnode); while (c && !isOwner && (c=c._parentComponent)) { isOwner = c.constructor===vnode.nodeName; } if (c && isOwner && (!mountAll || c._component)) { setComponentProps(c, props, ASYNC_RENDER, context, mountAll); dom = c.base; } else { if (originalComponent && !isDirectOwner) { unmountComponent(originalComponent); dom = oldDom = null; } c = createComponent(vnode.nodeName, props, context); if (dom && !c.nextBase) { c.nextBase = dom; // passing dom/oldDom as nextBase will recycle it if unused, so bypass recycling on L229: oldDom = null; } setComponentProps(c, props, SYNC_RENDER, context, mountAll); dom = c.base; if (oldDom && dom!==oldDom) { oldDom._component = null; recollectNodeTree(oldDom, false); } } return dom; } /** Remove a component from the DOM and recycle it. * @param {Component} component The Component instance to unmount * @private */ export function unmountComponent(component) { if (options.beforeUnmount) options.beforeUnmount(component); let base = component.base; component._disable = true; if (component.componentWillUnmount) component.componentWillUnmount(); component.base = null; // recursively tear down & recollect high-order component children: let inner = component._component; if (inner) { unmountComponent(inner); } else if (base) { if (base[ATTR_KEY] && base[ATTR_KEY].ref) base[ATTR_KEY].ref(null); component.nextBase = base; removeNode(base); collectComponent(component); removeChildren(base); } if (component.__ref) component.__ref(null); }
1
11,921
I'd be open to loosening this check if it can help offset the size.
preactjs-preact
js
@@ -2017,6 +2017,9 @@ defaultdict(<class 'list'>, {'col..., 'col...})] Apply a function that takes pandas DataFrame and outputs pandas DataFrame. The pandas DataFrame given to the function is of a batch used internally. + See also `Transform and apply a function + <https://koalas.readthedocs.io/en/stable/user_guide/transform_apply.html>`_. + .. note:: the `func` is unable to access to the whole input frame. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ from collections import OrderedDict, defaultdict from distutils.version import LooseVersion import re import warnings import inspect import json from functools import partial, reduce import sys from itertools import zip_longest from typing import Any, Optional, List, Tuple, Union, Generic, TypeVar, Iterable, Dict, Callable import numpy as np import pandas as pd from pandas.api.types import is_list_like, is_dict_like, is_scalar if LooseVersion(pd.__version__) >= LooseVersion("0.24"): from pandas.core.dtypes.common import infer_dtype_from_object else: from pandas.core.dtypes.common import _get_dtype_from_object as infer_dtype_from_object from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.inference import is_sequence import pyspark from pyspark import StorageLevel from pyspark import sql as spark from pyspark.sql import functions as F, Column from pyspark.sql.functions import pandas_udf, PandasUDFType from pyspark.sql.readwriter import OptionUtils from pyspark.sql.types import ( BooleanType, ByteType, DecimalType, DoubleType, FloatType, IntegerType, LongType, NumericType, ShortType, StructType, StructField, ) from pyspark.sql.window import Window from databricks import koalas as ks # For running doctests and reference resolution in PyCharm. from databricks.koalas.config import option_context, get_option from databricks.koalas.utils import ( validate_arguments_and_invoke_function, align_diff_frames, validate_bool_kwarg, column_labels_level, name_like_string, scol_for, validate_axis, verify_temp_column_name, ) from databricks.koalas.generic import _Frame from databricks.koalas.internal import ( _InternalFrame, HIDDEN_COLUMNS, NATURAL_ORDER_COLUMN_NAME, SPARK_INDEX_NAME_FORMAT, SPARK_DEFAULT_INDEX_NAME, ) from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.ml import corr from databricks.koalas.typedef import infer_return_type, as_spark_type from databricks.koalas.plot import KoalasFramePlotMethods # These regular expression patterns are complied and defined here to avoid to compile the same # pattern every time it is used in _repr_ and _repr_html_ in DataFrame. # Two patterns basically seek the footer string from Pandas' REPR_PATTERN = re.compile(r"\n\n\[(?P<rows>[0-9]+) rows x (?P<columns>[0-9]+) columns\]$") REPR_HTML_PATTERN = re.compile( r"\n\<p\>(?P<rows>[0-9]+) rows × (?P<columns>[0-9]+) columns\<\/p\>\n\<\/div\>$" ) _flex_doc_FRAME = """ Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). Equivalent to ``{equiv}``. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`) to arithmetic operators: `+`, `-`, `*`, `/`, `//`. Parameters ---------- other : scalar Any single data Returns ------- DataFrame Result of the arithmetic operation. Examples -------- >>> df = ks.DataFrame({{'angles': [0, 3, 4], ... 'degrees': [360, 180, 360]}}, ... index=['circle', 'triangle', 'rectangle'], ... columns=['angles', 'degrees']) >>> df angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Add a scalar with operator version which return the same results. Also reverse version. >>> df + 1 angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 >>> df.add(df) angles degrees circle 0 720 triangle 6 360 rectangle 8 720 >>> df.radd(1) angles degrees circle 1 361 triangle 4 181 rectangle 5 361 Divide and true divide by constant with reverse version. >>> df / 10 angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.div(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rdiv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 >>> df.truediv(10) angles degrees circle 0.0 36.0 triangle 0.3 18.0 rectangle 0.4 36.0 >>> df.rtruediv(10) angles degrees circle inf 0.027778 triangle 3.333333 0.055556 rectangle 2.500000 0.027778 Subtract by constant with reverse version. >>> df - 1 angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.sub(1) angles degrees circle -1 359 triangle 2 179 rectangle 3 359 >>> df.rsub(1) angles degrees circle 1 -359 triangle -2 -179 rectangle -3 -359 Multiply by constant with reverse version. >>> df * 1 angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.mul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 >>> df.rmul(1) angles degrees circle 0 360 triangle 3 180 rectangle 4 360 Floor Divide by constant with reverse version. >>> df // 10 angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.floordiv(10) angles degrees circle 0.0 36.0 triangle 0.0 18.0 rectangle 0.0 36.0 >>> df.rfloordiv(10) # doctest: +SKIP angles degrees circle inf 0.0 triangle 3.0 0.0 rectangle 2.0 0.0 Mod by constant with reverse version. >>> df % 2 angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.mod(2) angles degrees circle 0 0 triangle 1 0 rectangle 0 0 >>> df.rmod(2) angles degrees circle NaN 2 triangle 2.0 2 rectangle 2.0 2 Power by constant with reverse version. >>> df ** 2 angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.pow(2) angles degrees circle 0.0 129600.0 triangle 9.0 32400.0 rectangle 16.0 129600.0 >>> df.rpow(2) angles degrees circle 1.0 2.348543e+108 triangle 8.0 1.532496e+54 rectangle 16.0 2.348543e+108 """ T = TypeVar("T") if (3, 5) <= sys.version_info < (3, 7): from typing import GenericMeta # This is a workaround to support variadic generic in DataFrame in Python 3.5+. # See https://github.com/python/typing/issues/193 # We wrap the input params by a tuple to mimic variadic generic. old_getitem = GenericMeta.__getitem__ # type: ignore def new_getitem(self, params): if hasattr(self, "is_dataframe"): return old_getitem(self, Tuple[params]) else: return old_getitem(self, params) GenericMeta.__getitem__ = new_getitem # type: ignore class DataFrame(_Frame, Generic[T]): """ Koalas DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _internal: an internal immutable Frame to manage metadata. :type _internal: _InternalFrame Parameters ---------- data : numpy ndarray (structured or homogeneous), dict, Pandas DataFrame, Spark DataFrame \ or Koalas Series Dict can contain Series, arrays, constants, or list-like objects If data is a dict, argument order is maintained for Python 3.6 and later. Note that if `data` is a Pandas DataFrame, a Spark DataFrame, and a Koalas Series, other arguments should not be used. index : Index or array-like Index to use for resulting frame. Will default to RangeIndex if no indexing information part of input data and no index provided columns : Index or array-like Column labels to use for resulting frame. Will default to RangeIndex (0, 1, 2, ..., n) if no column labels are provided dtype : dtype, default None Data type to force. Only a single dtype is allowed. If None, infer copy : boolean, default False Copy data from inputs. Only affects DataFrame / 2d ndarray input Examples -------- Constructing DataFrame from a dictionary. >>> d = {'col1': [1, 2], 'col2': [3, 4]} >>> df = ks.DataFrame(data=d, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 Constructing DataFrame from Pandas DataFrame >>> df = ks.DataFrame(pd.DataFrame(data=d, columns=['col1', 'col2'])) >>> df col1 col2 0 1 3 1 2 4 Notice that the inferred dtype is int64. >>> df.dtypes col1 int64 col2 int64 dtype: object To enforce a single dtype: >>> df = ks.DataFrame(data=d, dtype=np.int8) >>> df.dtypes col1 int8 col2 int8 dtype: object Constructing DataFrame from numpy ndarray: >>> df2 = ks.DataFrame(np.random.randint(low=0, high=10, size=(5, 5)), ... columns=['a', 'b', 'c', 'd', 'e']) >>> df2 # doctest: +SKIP a b c d e 0 3 1 4 9 8 1 4 8 4 8 4 2 7 6 5 6 7 3 8 7 9 1 0 4 2 5 4 3 9 """ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): if isinstance(data, _InternalFrame): assert index is None assert columns is None assert dtype is None assert not copy super(DataFrame, self).__init__(data) elif isinstance(data, spark.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy super(DataFrame, self).__init__(_InternalFrame(spark_frame=data, index_map=None)) elif isinstance(data, ks.Series): assert index is None assert columns is None assert dtype is None assert not copy data = data.to_dataframe() super(DataFrame, self).__init__(data._internal) else: if isinstance(data, pd.DataFrame): assert index is None assert columns is None assert dtype is None assert not copy pdf = data else: pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) super(DataFrame, self).__init__(_InternalFrame.from_pandas(pdf)) @property def _sdf(self) -> spark.DataFrame: return self._internal.spark_frame @property def ndim(self): """ Return an int representing the number of array dimensions. return 2 for DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', None], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 NaN 7 8 >>> df.ndim 2 """ return 2 @property def axes(self): """ Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.axes [Int64Index([0, 1], dtype='int64'), Index(['col1', 'col2'], dtype='object')] """ return [self.index, self.columns] def _reduce_for_stat_function(self, sfun, name, axis=None, numeric_only=True): """ Applies sfun to each column and returns a pd.Series where the number of rows equal the number of columns. Parameters ---------- sfun : either an 1-arg function that takes a Column and returns a Column, or a 2-arg function that takes a Column and its DataType and returns a Column. axis: used only for sanity check because series only support index axis. name : original pandas API name. axis : axis to apply. 0 or 1, or 'index' or 'columns. numeric_only : bool, default True Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Only 'DataFrame.count' uses this parameter currently. """ from inspect import signature from databricks.koalas import Series from databricks.koalas.series import _col if name not in ("count", "min", "max") and not numeric_only: raise ValueError("Disabling 'numeric_only' parameter is not supported.") axis = validate_axis(axis) if axis == 0: exprs = [] new_column_labels = [] num_args = len(signature(sfun).parameters) for label in self._internal.column_labels: col_sdf = self._internal.spark_column_for(label) col_type = self._internal.spark_type_for(label) is_numeric_or_boolean = isinstance(col_type, (NumericType, BooleanType)) min_or_max = sfun.__name__ in ("min", "max") keep_column = not numeric_only or is_numeric_or_boolean or min_or_max if keep_column: if isinstance(col_type, BooleanType) and not min_or_max: # Stat functions cannot be used with boolean values by default # Thus, cast to integer (true to 1 and false to 0) # Exclude the min and max methods though since those work with booleans col_sdf = col_sdf.cast("integer") if num_args == 1: # Only pass in the column if sfun accepts only one arg col_sdf = sfun(col_sdf) else: # must be 2 assert num_args == 2 # Pass in both the column and its data type if sfun accepts two args col_sdf = sfun(col_sdf, col_type) exprs.append(col_sdf.alias(name_like_string(label))) new_column_labels.append(label) sdf = self._sdf.select(*exprs) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context( "compute.default_index_type", "distributed", "compute.max_rows", None ): kdf = DataFrame(sdf) internal = _InternalFrame( kdf._internal.spark_frame, index_map=kdf._internal.index_map, column_labels=new_column_labels, column_label_names=self._internal.column_label_names, ) return _col(DataFrame(internal).transpose()) elif axis == 1: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() pser = getattr(pdf, name)(axis=axis, numeric_only=numeric_only) if len(pdf) <= limit: return Series(pser) @pandas_udf(returnType=as_spark_type(pser.dtype.type)) def calculate_columns_axis(*cols): return getattr(pd.concat(cols, axis=1), name)(axis=axis, numeric_only=numeric_only) df = self._sdf.select( calculate_columns_axis(*self._internal.data_spark_columns).alias("0") ) return DataFrame(df)["0"] else: raise ValueError("No axis named %s for object type %s." % (axis, type(axis))) def _kser_for(self, label): """ Create Series with a proper column label. The given label must be verified to exist in `_InternalFrame.column_labels`. For example, in some method, self is like: >>> self = ks.range(3) `self._kser_for(label)` can be used with `_InternalFrame.column_labels`: >>> self._kser_for(self._internal.column_labels[0]) 0 0 1 1 2 2 Name: id, dtype: int64 `self._kser_for(label)` must not be used directly with user inputs. In that case, `self[label]` should be used instead, which checks the label exists or not: >>> self['id'] 0 0 1 1 2 2 Name: id, dtype: int64 """ from databricks.koalas.series import Series return Series( self._internal.copy( spark_column=self._internal.spark_column_for(label), column_labels=[label] ), anchor=self, ) def _apply_series_op(self, op): applied = [] for label in self._internal.column_labels: applied.append(op(self._kser_for(label))) internal = self._internal.with_new_columns(applied) return DataFrame(internal) # Arithmetic Operators def _map_series_op(self, op, other): from databricks.koalas.base import IndexOpsMixin if not isinstance(other, DataFrame) and ( isinstance(other, IndexOpsMixin) or is_sequence(other) ): raise ValueError( "%s with a sequence is currently not supported; " "however, got %s." % (op, type(other)) ) if isinstance(other, DataFrame) and self is not other: if self._internal.column_labels_level != other._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") # Different DataFrames def apply_op(kdf, this_column_labels, that_column_labels): for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( getattr(kdf._kser_for(this_label), op)(kdf._kser_for(that_label)), this_label, ) return align_diff_frames(apply_op, self, other, fillna=True, how="full") else: # DataFrame and Series if isinstance(other, DataFrame): return self._apply_series_op(lambda kser: getattr(kser, op)(other[kser.name])) else: return self._apply_series_op(lambda kser: getattr(kser, op)(other)) def __add__(self, other): return self._map_series_op("add", other) def __radd__(self, other): return self._map_series_op("radd", other) def __div__(self, other): return self._map_series_op("div", other) def __rdiv__(self, other): return self._map_series_op("rdiv", other) def __truediv__(self, other): return self._map_series_op("truediv", other) def __rtruediv__(self, other): return self._map_series_op("rtruediv", other) def __mul__(self, other): return self._map_series_op("mul", other) def __rmul__(self, other): return self._map_series_op("rmul", other) def __sub__(self, other): return self._map_series_op("sub", other) def __rsub__(self, other): return self._map_series_op("rsub", other) def __pow__(self, other): return self._map_series_op("pow", other) def __rpow__(self, other): return self._map_series_op("rpow", other) def __mod__(self, other): return self._map_series_op("mod", other) def __rmod__(self, other): return self._map_series_op("rmod", other) def __floordiv__(self, other): return self._map_series_op("floordiv", other) def __rfloordiv__(self, other): return self._map_series_op("rfloordiv", other) def add(self, other): return self + other # create accessor for plot plot = CachedAccessor("plot", KoalasFramePlotMethods) def hist(self, bins=10, **kwds): return self.plot.hist(bins, **kwds) hist.__doc__ = KoalasFramePlotMethods.hist.__doc__ def kde(self, bw_method=None, ind=None, **kwds): return self.plot.kde(bw_method, ind, **kwds) kde.__doc__ = KoalasFramePlotMethods.kde.__doc__ add.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="dataframe + other", reverse="radd" ) def radd(self, other): return other + self radd.__doc__ = _flex_doc_FRAME.format( desc="Addition", op_name="+", equiv="other + dataframe", reverse="add" ) def div(self, other): return self / other div.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rdiv" ) divide = div def rdiv(self, other): return other / self rdiv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="div" ) def truediv(self, other): return self / other truediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="dataframe / other", reverse="rtruediv" ) def rtruediv(self, other): return other / self rtruediv.__doc__ = _flex_doc_FRAME.format( desc="Floating division", op_name="/", equiv="other / dataframe", reverse="truediv" ) def mul(self, other): return self * other mul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="dataframe * other", reverse="rmul" ) multiply = mul def rmul(self, other): return other * self rmul.__doc__ = _flex_doc_FRAME.format( desc="Multiplication", op_name="*", equiv="other * dataframe", reverse="mul" ) def sub(self, other): return self - other sub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="dataframe - other", reverse="rsub" ) subtract = sub def rsub(self, other): return other - self rsub.__doc__ = _flex_doc_FRAME.format( desc="Subtraction", op_name="-", equiv="other - dataframe", reverse="sub" ) def mod(self, other): return self % other mod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="dataframe % other", reverse="rmod" ) def rmod(self, other): return other % self rmod.__doc__ = _flex_doc_FRAME.format( desc="Modulo", op_name="%", equiv="other % dataframe", reverse="mod" ) def pow(self, other): return self ** other pow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power of series", op_name="**", equiv="dataframe ** other", reverse="rpow" ) def rpow(self, other): return other ** self rpow.__doc__ = _flex_doc_FRAME.format( desc="Exponential power", op_name="**", equiv="other ** dataframe", reverse="pow" ) def floordiv(self, other): return self // other floordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="dataframe // other", reverse="rfloordiv" ) def rfloordiv(self, other): return other // self rfloordiv.__doc__ = _flex_doc_FRAME.format( desc="Integer division", op_name="//", equiv="other // dataframe", reverse="floordiv" ) # Comparison Operators def __eq__(self, other): return self._map_series_op("eq", other) def __ne__(self, other): return self._map_series_op("ne", other) def __lt__(self, other): return self._map_series_op("lt", other) def __le__(self, other): return self._map_series_op("le", other) def __ge__(self, other): return self._map_series_op("ge", other) def __gt__(self, other): return self._map_series_op("gt", other) def eq(self, other): """ Compare if the current value is equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.eq(1) a b a True True b False False c False True d False False """ return self == other equals = eq def gt(self, other): """ Compare if the current value is greater than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.gt(2) a b a False False b False False c True False d True False """ return self > other def ge(self, other): """ Compare if the current value is greater than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ge(1) a b a True True b True False c True True d True False """ return self >= other def lt(self, other): """ Compare if the current value is less than the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.lt(1) a b a False False b False False c False False d False False """ return self < other def le(self, other): """ Compare if the current value is less than or equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.le(2) a b a True True b True False c False True d False False """ return self <= other def ne(self, other): """ Compare if the current value is not equal to the other. >>> df = ks.DataFrame({'a': [1, 2, 3, 4], ... 'b': [1, np.nan, 1, np.nan]}, ... index=['a', 'b', 'c', 'd'], columns=['a', 'b']) >>> df.ne(1) a b a False False b True True c True False d True True """ return self != other def applymap(self, func): """ Apply a function to a Dataframe elementwise. This method applies a function that accepts and returns a scalar to every element of a DataFrame. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> np.int32: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. Parameters ---------- func : callable Python function, returns a single value from a single value. Returns ------- DataFrame Transformed DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2.12], [3.356, 4.567]]) >>> df 0 1 0 1.000 2.120 1 3.356 4.567 >>> def str_len(x) -> int: ... return len(str(x)) >>> df.applymap(str_len) 0 1 0 3 4 1 5 5 >>> def power(x) -> float: ... return x ** 2 >>> df.applymap(power) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 You can omit the type hint and let Koalas infer its type. >>> df.applymap(lambda x: x ** 2) 0 1 0 1.000000 4.494400 1 11.262736 20.857489 """ # TODO: We can implement shortcut theoretically since it creates new DataFrame # anyway and we don't have to worry about operations on different DataFrames. return self._apply_series_op(lambda kser: kser.apply(func)) # TODO: not all arguments are implemented comparing to Pandas' for now. def aggregate(self, func: Union[List[str], Dict[str, List[str]]]): """Aggregate using one or more operations over the specified axis. Parameters ---------- func : dict or a list a dict mapping from column name (string) to aggregate functions (list of strings). If a list is given, the aggregation is performed against all columns. Returns ------- DataFrame Notes ----- `agg` is an alias for `aggregate`. Use the alias. See Also -------- DataFrame.apply : Invoke function on DataFrame. DataFrame.transform : Only perform transforming type operations. DataFrame.groupby : Perform operations over groups. Series.aggregate : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame([[1, 2, 3], ... [4, 5, 6], ... [7, 8, 9], ... [np.nan, np.nan, np.nan]], ... columns=['A', 'B', 'C']) >>> df A B C 0 1.0 2.0 3.0 1 4.0 5.0 6.0 2 7.0 8.0 9.0 3 NaN NaN NaN Aggregate these functions over the rows. >>> df.agg(['sum', 'min'])[['A', 'B', 'C']] A B C min 1.0 2.0 3.0 sum 12.0 15.0 18.0 Different aggregations per column. >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})[['A', 'B']] A B max NaN 8.0 min 1.0 2.0 sum 12.0 NaN """ from databricks.koalas.groupby import GroupBy if isinstance(func, list): if all((isinstance(f, str) for f in func)): func = dict([(column, func) for column in self.columns]) else: raise ValueError( "If the given function is a list, it " "should only contains function names as strings." ) if not isinstance(func, dict) or not all( isinstance(key, str) and ( isinstance(value, str) or isinstance(value, list) and all(isinstance(v, str) for v in value) ) for key, value in func.items() ): raise ValueError( "aggs must be a dict mapping from column name (string) to aggregate " "functions (list of strings)." ) kdf = DataFrame(GroupBy._spark_groupby(self, func)) # type: DataFrame # The codes below basically converts: # # A B # sum min min max # 0 12.0 1.0 2.0 8.0 # # to: # A B # max NaN 8.0 # min 1.0 2.0 # sum 12.0 NaN # # Aggregated output is usually pretty much small. So it is fine to directly use pandas API. pdf = kdf.to_pandas().stack() pdf.index = pdf.index.droplevel() pdf.columns.names = [None] pdf.index.names = [None] return DataFrame(pdf[list(func.keys())]) agg = aggregate def corr(self, method="pearson"): """ Compute pairwise correlation of columns, excluding NA/null values. Parameters ---------- method : {'pearson', 'spearman'} * pearson : standard correlation coefficient * spearman : Spearman rank correlation Returns ------- y : pandas.DataFrame See Also -------- Series.corr Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.corr('pearson') dogs cats dogs 1.000000 -0.851064 cats -0.851064 1.000000 >>> df.corr('spearman') dogs cats dogs 1.000000 -0.948683 cats -0.948683 1.000000 Notes ----- There are behavior differences between Koalas and pandas. * the `method` argument only accepts 'pearson', 'spearman' * the data should not contain NaNs. Koalas will return an error. * Koalas doesn't support the following argument(s). * `min_periods` argument is not supported """ return ks.from_pandas(corr(self, method)) def iteritems(self) -> Iterable: """ Iterator over (column name, Series) pairs. Iterates over the DataFrame columns, returning a tuple with the column name and the content as a Series. Returns ------- label : object The column names for the DataFrame being iterated over. content : Series The column entries belonging to each label, as a Series. Examples -------- >>> df = ks.DataFrame({'species': ['bear', 'bear', 'marsupial'], ... 'population': [1864, 22000, 80000]}, ... index=['panda', 'polar', 'koala'], ... columns=['species', 'population']) >>> df species population panda bear 1864 polar bear 22000 koala marsupial 80000 >>> for label, content in df.iteritems(): ... print('label:', label) ... print('content:', content.to_string()) ... label: species content: panda bear polar bear koala marsupial label: population content: panda 1864 polar 22000 koala 80000 """ return [ (label if len(label) > 1 else label[0], self._kser_for(label)) for label in self._internal.column_labels ] def iterrows(self): """ Iterate over DataFrame rows as (index, Series) pairs. Yields ------ index : label or tuple of label The index of the row. A tuple for a `MultiIndex`. data : pandas.Series The data of the row as a Series. it : generator A generator that iterates over the rows of the frame. Notes ----- 1. Because ``iterrows`` returns a Series for each row, it does **not** preserve dtypes across the rows (dtypes are preserved across columns for DataFrames). For example, >>> df = ks.DataFrame([[1, 1.5]], columns=['int', 'float']) >>> row = next(df.iterrows())[1] >>> row int 1.0 float 1.5 Name: 0, dtype: float64 >>> print(row['int'].dtype) float64 >>> print(df['int'].dtype) int64 To preserve dtypes while iterating over the rows, it is better to use :meth:`itertuples` which returns namedtuples of the values and which is generally faster than ``iterrows``. 2. You should **never modify** something you are iterating over. This is not guaranteed to work in all cases. Depending on the data types, the iterator returns a copy and not a view, and writing to it will have no effect. """ columns = self.columns internal_index_columns = self._internal.index_spark_column_names internal_data_columns = self._internal.data_spark_column_names def extract_kv_from_spark_row(row): k = ( row[internal_index_columns[0]] if len(internal_index_columns) == 1 else tuple(row[c] for c in internal_index_columns) ) v = [row[c] for c in internal_data_columns] return k, v for k, v in map(extract_kv_from_spark_row, self._sdf.toLocalIterator()): s = pd.Series(v, index=columns, name=k) yield k, s def items(self) -> Iterable: """This is an alias of ``iteritems``.""" return self.iteritems() def to_clipboard(self, excel=True, sep=None, **kwargs): """ Copy object to the system clipboard. Write a text representation of object to the system clipboard. This can be pasted into Excel, for example. .. note:: This method should only be used if the resulting DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- excel : bool, default True - True, use the provided separator, writing in a csv format for allowing easy pasting into excel. - False, write a string representation of the object to the clipboard. sep : str, default ``'\\t'`` Field delimiter. **kwargs These parameters will be passed to DataFrame.to_csv. Notes ----- Requirements for your platform. - Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules) - Windows : none - OS X : none See Also -------- read_clipboard : Read text from clipboard. Examples -------- Copy the contents of a DataFrame to the clipboard. >>> df = ks.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C']) # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # ,A,B,C ... # 0,1,2,3 ... # 1,4,5,6 We can omit the the index by passing the keyword `index` and setting it to false. >>> df.to_clipboard(sep=',', index=False) # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # A,B,C ... # 1,2,3 ... # 4,5,6 This function also works for Series: >>> df = ks.Series([1, 2, 3, 4, 5, 6, 7], name='x') # doctest: +SKIP >>> df.to_clipboard(sep=',') # doctest: +SKIP ... # Wrote the following to the system clipboard: ... # 0, 1 ... # 1, 2 ... # 2, 3 ... # 3, 4 ... # 4, 5 ... # 5, 6 ... # 6, 7 """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_clipboard, pd.DataFrame.to_clipboard, args ) def to_html( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False, ): """ Render a DataFrame as an HTML table. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. bold_rows : bool, default True Make the row labels bold in the output. classes : str or list or tuple, default None CSS class(es) to apply to the resulting html table. escape : bool, default True Convert the characters <, >, and & to HTML-safe sequences. notebook : {True, False}, default False Whether the generated HTML is for IPython Notebook. border : int A ``border=border`` attribute is included in the opening `<table>` tag. Default ``pd.options.html.border``. table_id : str, optional A css id is included in the opening `<table>` tag if specified. render_links : bool, default False Convert URLs to HTML links (only works with Pandas 0.24+). Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_string : Convert DataFrame to a string. """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_html, pd.DataFrame.to_html, args ) def to_string( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal=".", line_width=None, ): """ Render a DataFrame to a console-friendly tabular output. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : StringIO-like, optional Buffer to write to. columns : sequence, optional, default None The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool, optional Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names index : bool, optional, default True Whether to print index (row) labels. na_rep : str, optional, default 'NaN' String representation of NAN to use. formatters : list or dict of one-param. functions, optional Formatter functions to apply to columns' elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : one-parameter function, optional, default None Formatter function to apply to columns' elements if they are floats. The result of this function must be a unicode string. sparsify : bool, optional, default True Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. index_names : bool, optional, default True Prints the names of the indexes. justify : str, default None How to justify the column labels. If None uses the option from the print configuration (controlled by set_option), 'right' out of the box. Valid values are * left * right * center * justify * justify-all * start * end * inherit * match-parent * initial * unset. max_rows : int, optional Maximum number of rows to display in the console. max_cols : int, optional Maximum number of columns to display in the console. show_dimensions : bool, default False Display DataFrame dimensions (number of rows by number of columns). decimal : str, default '.' Character recognized as decimal separator, e.g. ',' in Europe. line_width : int, optional Width to wrap a line in characters. Returns ------- str (or unicode, depending on data and options) String representation of the dataframe. See Also -------- to_html : Convert DataFrame to HTML. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2, 3], 'col2': [4, 5, 6]}, columns=['col1', 'col2']) >>> print(df.to_string()) col1 col2 0 1 4 1 2 5 2 3 6 >>> print(df.to_string(max_rows=2)) col1 col2 0 1 4 1 2 5 """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_string, pd.DataFrame.to_string, args ) def to_dict(self, orient="dict", into=dict): """ Convert the DataFrame to a dictionary. The type of the key-value pairs can be customized with the parameters (see below). .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- orient : str {'dict', 'list', 'series', 'split', 'records', 'index'} Determines the type of the values of the dictionary. - 'dict' (default) : dict like {column -> {index -> value}} - 'list' : dict like {column -> [values]} - 'series' : dict like {column -> Series(values)} - 'split' : dict like {'index' -> [index], 'columns' -> [columns], 'data' -> [values]} - 'records' : list like [{column -> value}, ... , {column -> value}] - 'index' : dict like {index -> {column -> value}} Abbreviations are allowed. `s` indicates `series` and `sp` indicates `split`. into : class, default dict The collections.abc.Mapping subclass used for all Mappings in the return value. Can be the actual class or an empty instance of the mapping type you want. If you want a collections.defaultdict, you must pass it initialized. Returns ------- dict, list or collections.abc.Mapping Return a collections.abc.Mapping object representing the DataFrame. The resulting transformation depends on the `orient` parameter. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], ... 'col2': [0.5, 0.75]}, ... index=['row1', 'row2'], ... columns=['col1', 'col2']) >>> df col1 col2 row1 1 0.50 row2 2 0.75 >>> df_dict = df.to_dict() >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('col1', [('row1', 1), ('row2', 2)]), ('col2', [('row1', 0.5), ('row2', 0.75)])] You can specify the return orientation. >>> df_dict = df.to_dict('series') >>> sorted(df_dict.items()) [('col1', row1 1 row2 2 Name: col1, dtype: int64), ('col2', row1 0.50 row2 0.75 Name: col2, dtype: float64)] >>> df_dict = df.to_dict('split') >>> sorted(df_dict.items()) # doctest: +ELLIPSIS [('columns', ['col1', 'col2']), ('data', [[1..., 0.75]]), ('index', ['row1', 'row2'])] >>> df_dict = df.to_dict('records') >>> [sorted(values.items()) for values in df_dict] # doctest: +ELLIPSIS [[('col1', 1...), ('col2', 0.5)], [('col1', 2...), ('col2', 0.75)]] >>> df_dict = df.to_dict('index') >>> sorted([(key, sorted(values.items())) for key, values in df_dict.items()]) [('row1', [('col1', 1), ('col2', 0.5)]), ('row2', [('col1', 2), ('col2', 0.75)])] You can also specify the mapping type. >>> from collections import OrderedDict, defaultdict >>> df.to_dict(into=OrderedDict) OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])), \ ('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))]) If you want a `defaultdict`, you need to initialize it: >>> dd = defaultdict(list) >>> df.to_dict('records', into=dd) # doctest: +ELLIPSIS [defaultdict(<class 'list'>, {'col..., 'col...}), \ defaultdict(<class 'list'>, {'col..., 'col...})] """ # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_dict, pd.DataFrame.to_dict, args ) def to_latex( self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep="NaN", formatters=None, float_format=None, sparsify=None, index_names=True, bold_rows=False, column_format=None, longtable=None, escape=None, encoding=None, decimal=".", multicolumn=None, multicolumn_format=None, multirow=None, ): r""" Render an object to a LaTeX tabular environment table. Render an object to a tabular environment table. You can splice this into a LaTeX document. Requires usepackage{booktabs}. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, consider alternative formats. Parameters ---------- buf : file descriptor or None Buffer to write to. If None, the output is returned as a string. columns : list of label, optional The subset of columns to write. Writes all columns by default. col_space : int, optional The minimum width of each column. header : bool or list of str, default True Write out the column names. If a list of strings is given, it is assumed to be aliases for the column names. index : bool, default True Write row names (index). na_rep : str, default ‘NaN’ Missing data representation. formatters : list of functions or dict of {str: function}, optional Formatter functions to apply to columns’ elements by position or name. The result of each function must be a unicode string. List must be of length equal to the number of columns. float_format : str, optional Format string for floating point numbers. sparsify : bool, optional Set to False for a DataFrame with a hierarchical index to print every multiindex key at each row. By default, the value will be read from the config module. index_names : bool, default True Prints the names of the indexes. bold_rows : bool, default False Make the row labels bold in the output. column_format : str, optional The columns format as specified in LaTeX table format e.g. ‘rcl’ for 3 columns. By default, ‘l’ will be used for all columns except columns of numbers, which default to ‘r’. longtable : bool, optional By default, the value will be read from the pandas config module. Use a longtable environment instead of tabular. Requires adding a usepackage{longtable} to your LaTeX preamble. escape : bool, optional By default, the value will be read from the pandas config module. When set to False prevents from escaping latex special characters in column names. encoding : str, optional A string representing the encoding to use in the output file, defaults to ‘ascii’ on Python 2 and ‘utf-8’ on Python 3. decimal : str, default ‘.’ Character recognized as decimal separator, e.g. ‘,’ in Europe. multicolumn : bool, default True Use multicolumn to enhance MultiIndex columns. The default will be read from the config module. multicolumn_format : str, default ‘l’ The alignment for multicolumns, similar to column_format The default will be read from the config module. multirow : bool, default False Use multirow to enhance MultiIndex rows. Requires adding a usepackage{multirow} to your LaTeX preamble. Will print centered labels (instead of top-aligned) across the contained rows, separating groups via clines. The default will be read from the pandas config module. Returns ------- str or None If buf is None, returns the resulting LateX format as a string. Otherwise returns None. See Also -------- DataFrame.to_string : Render a DataFrame to a console-friendly tabular output. DataFrame.to_html : Render a DataFrame as an HTML table. Examples -------- >>> df = ks.DataFrame({'name': ['Raphael', 'Donatello'], ... 'mask': ['red', 'purple'], ... 'weapon': ['sai', 'bo staff']}, ... columns=['name', 'mask', 'weapon']) >>> df.to_latex(index=False) # doctest: +NORMALIZE_WHITESPACE '\\begin{tabular}{lll}\n\\toprule\n name & mask & weapon \\\\\n\\midrule\n Raphael & red & sai \\\\\n Donatello & purple & bo staff \\\\\n\\bottomrule\n\\end{tabular}\n' """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_latex, pd.DataFrame.to_latex, args ) def to_markdown(self, buf=None, mode=None, max_rows=None): """ Print DataFrame in Markdown-friendly format. .. note:: This method should only be used if the resulting Pandas object is expected to be small, as all the data is loaded into the driver's memory. If the input is large, set max_rows parameter. Parameters ---------- buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. mode : str, optional Mode in which file is opened. **kwargs These parameters will be passed to `tabulate`. Returns ------- str DataFrame in Markdown-friendly format. Examples -------- >>> df = ks.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(df.to_markdown()) # doctest: +SKIP | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal | """ # `to_markdown` is supported in pandas >= 1.0.0 since it's newly added in pandas 1.0.0. if LooseVersion(pd.__version__) < LooseVersion("1.0.0"): raise NotImplementedError( "`to_markdown()` only supported in Kaoals with pandas >= 1.0.0" ) # Make sure locals() call is at the top of the function so we don't capture local variables. args = locals() if max_rows is not None: kdf = self.head(max_rows) else: kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_markdown, pd.DataFrame.to_markdown, args ) # TODO: enable doctests once we drop Spark 2.3.x (due to type coercion logic # when creating arrays) def transpose(self): """ Transpose index and columns. Reflect the DataFrame over its main diagonal by writing rows as columns and vice-versa. The property :attr:`.T` is an accessor to the method :meth:`transpose`. .. note:: This method is based on an expensive operation due to the nature of big data. Internally it needs to generate each row for each value, and then group twice - it is a huge operation. To prevent misusage, this method has the 'compute.max_rows' default limit of input length, and raises a ValueError. >>> from databricks.koalas.config import option_context >>> with option_context('compute.max_rows', 1000): # doctest: +NORMALIZE_WHITESPACE ... ks.DataFrame({'a': range(1001)}).transpose() Traceback (most recent call last): ... ValueError: Current DataFrame has more then the given limit 1000 rows. Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' to retrieve to retrieve more than 1000 rows. Note that, before changing the 'compute.max_rows', this operation is considerably expensive. Returns ------- DataFrame The transposed DataFrame. Notes ----- Transposing a DataFrame with mixed dtypes will result in a homogeneous DataFrame with the coerced dtype. For instance, if int and float have to be placed in same column, it becomes float. If type coercion is not possible, it fails. Also, note that the values in index should be unique because they become unique column names. In addition, if Spark 2.3 is used, the types should always be exactly same. Examples -------- **Square DataFrame with homogeneous dtype** >>> d1 = {'col1': [1, 2], 'col2': [3, 4]} >>> df1 = ks.DataFrame(data=d1, columns=['col1', 'col2']) >>> df1 col1 col2 0 1 3 1 2 4 >>> df1_transposed = df1.T.sort_index() # doctest: +SKIP >>> df1_transposed # doctest: +SKIP 0 1 col1 1 2 col2 3 4 When the dtype is homogeneous in the original DataFrame, we get a transposed DataFrame with the same dtype: >>> df1.dtypes col1 int64 col2 int64 dtype: object >>> df1_transposed.dtypes # doctest: +SKIP 0 int64 1 int64 dtype: object **Non-square DataFrame with mixed dtypes** >>> d2 = {'score': [9.5, 8], ... 'kids': [0, 0], ... 'age': [12, 22]} >>> df2 = ks.DataFrame(data=d2, columns=['score', 'kids', 'age']) >>> df2 score kids age 0 9.5 0 12 1 8.0 0 22 >>> df2_transposed = df2.T.sort_index() # doctest: +SKIP >>> df2_transposed # doctest: +SKIP 0 1 age 12.0 22.0 kids 0.0 0.0 score 9.5 8.0 When the DataFrame has mixed dtypes, we get a transposed DataFrame with the coerced dtype: >>> df2.dtypes score float64 kids int64 age int64 dtype: object >>> df2_transposed.dtypes # doctest: +SKIP 0 float64 1 float64 dtype: object """ max_compute_count = get_option("compute.max_rows") if max_compute_count is not None: pdf = self.head(max_compute_count + 1)._to_internal_pandas() if len(pdf) > max_compute_count: raise ValueError( "Current DataFrame has more then the given limit {0} rows. " "Please set 'compute.max_rows' by using 'databricks.koalas.config.set_option' " "to retrieve to retrieve more than {0} rows. Note that, before changing the " "'compute.max_rows', this operation is considerably expensive.".format( max_compute_count ) ) return DataFrame(pdf.transpose()) # Explode the data to be pairs. # # For instance, if the current input DataFrame is as below: # # +------+------+------+------+------+ # |index1|index2|(a,x1)|(a,x2)|(b,x3)| # +------+------+------+------+------+ # | y1| z1| 1| 0| 0| # | y2| z2| 0| 50| 0| # | y3| z3| 3| 2| 1| # +------+------+------+------+------+ # # Output of `exploded_df` becomes as below: # # +-----------------+-----------------+-----------------+-----+ # | index|__index_level_0__|__index_level_1__|value| # +-----------------+-----------------+-----------------+-----+ # |{"a":["y1","z1"]}| a| x1| 1| # |{"a":["y1","z1"]}| a| x2| 0| # |{"a":["y1","z1"]}| b| x3| 0| # |{"a":["y2","z2"]}| a| x1| 0| # |{"a":["y2","z2"]}| a| x2| 50| # |{"a":["y2","z2"]}| b| x3| 0| # |{"a":["y3","z3"]}| a| x1| 3| # |{"a":["y3","z3"]}| a| x2| 2| # |{"a":["y3","z3"]}| b| x3| 1| # +-----------------+-----------------+-----------------+-----+ pairs = F.explode( F.array( *[ F.struct( [ F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label) ] + [self._internal.spark_column_for(label).alias("value")] ) for label in self._internal.column_labels ] ) ) exploded_df = self._sdf.withColumn("pairs", pairs).select( [ F.to_json( F.struct( F.array( [scol.cast("string") for scol in self._internal.index_spark_columns] ).alias("a") ) ).alias("index"), F.col("pairs.*"), ] ) # After that, executes pivot with key and its index column. # Note that index column should contain unique values since column names # should be unique. internal_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] pivoted_df = exploded_df.groupBy(internal_index_columns).pivot("index") transposed_df = pivoted_df.agg(F.first(F.col("value"))) new_data_columns = list( filter(lambda x: x not in internal_index_columns, transposed_df.columns) ) internal = self._internal.copy( spark_frame=transposed_df, index_map=OrderedDict((col, None) for col in internal_index_columns), column_labels=[tuple(json.loads(col)["a"]) for col in new_data_columns], data_spark_columns=[scol_for(transposed_df, col) for col in new_data_columns], column_label_names=None, ) return DataFrame(internal) T = property(transpose) def apply_batch(self, func): """ Apply a function that takes pandas DataFrame and outputs pandas DataFrame. The pandas DataFrame given to the function is of a batch used internally. .. note:: the `func` is unable to access to the whole input frame. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole frame but of the batch internally ... # used. ... def length(pdf) -> ks.DataFrame[int]: ... return pd.DataFrame([len(pdf)]) ... >>> df = ks.DataFrame({'A': range(1000)}) >>> df.apply_batch(length) # doctest: +SKIP c0 0 83 1 83 2 83 ... 10 83 11 83 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def plus_one(x) -> ks.DataFrame[float, float]: ... return x + 1 If the return type is specified, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. See examples below. Parameters ---------- func : function Function to apply to each pandas frame. Returns ------- DataFrame See Also -------- DataFrame.apply: For row/columnwise operations. DataFrame.applymap: For elementwise operations. DataFrame.aggregate: Only perform aggregating type operations. DataFrame.transform: Only perform transforming type operations. Series.transform_batch: transform the search as each pandas chunks. Examples -------- >>> df = ks.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B']) >>> df A B 0 1 2 1 3 4 2 5 6 >>> def query_func(pdf) -> ks.DataFrame[int, int]: ... return pdf.query('A == 1') >>> df.apply_batch(query_func) c0 c1 0 1 2 You can also omit the type hints so Koalas infers the return schema as below: >>> df.apply_batch(lambda pdf: pdf.query('A == 1')) A B 0 1 2 """ # TODO: codes here partially duplicate `DataFrame.apply`. Can we deduplicate? from databricks.koalas.groupby import GroupBy if isinstance(func, np.ufunc): f = func func = lambda *args, **kwargs: f(*args, **kwargs) assert callable(func), "the first argument should be a callable function." spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None should_use_map_in_pandas = LooseVersion(pyspark.__version__) >= "3.0" if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() applied = func(pdf) if not isinstance(applied, pd.DataFrame): raise ValueError( "The given function should return a frame; however, " "the return type was %s." % type(applied) ) kdf = ks.DataFrame(applied) if len(pdf) <= limit: return kdf return_schema = kdf._internal.to_internal_spark_frame.schema if should_use_map_in_pandas: output_func = GroupBy._make_pandas_df_builder_func( self, func, return_schema, retain_index=True ) sdf = self._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) else: sdf = GroupBy._spark_group_map_apply( self, func, (F.spark_partition_id(),), return_schema, retain_index=True ) # If schema is inferred, we can restore indexes too. internal = kdf._internal.with_new_sdf(sdf) else: return_schema = infer_return_type(func).tpe is_return_dataframe = getattr(return_sig, "__origin__", None) == ks.DataFrame if not is_return_dataframe: raise TypeError( "The given function should specify a frame as its type " "hints; however, the return type was %s." % return_sig ) if should_use_map_in_pandas: output_func = GroupBy._make_pandas_df_builder_func( self, func, return_schema, retain_index=False ) sdf = self._internal.to_internal_spark_frame.mapInPandas( lambda iterator: map(output_func, iterator), schema=return_schema ) else: sdf = GroupBy._spark_group_map_apply( self, func, (F.spark_partition_id(),), return_schema, retain_index=False ) # Otherwise, it loses index. internal = _InternalFrame(spark_frame=sdf, index_map=None) return DataFrame(internal) def map_in_pandas(self, func): warnings.warn( "map_in_pandas is deprecated as of DataFrame.apply_batch. " "Please use the API instead.", DeprecationWarning, ) return self.apply_batch(func) map_in_pandas.__doc__ = apply_batch.__doc__ def apply(self, func, axis=0): """ Apply a function along an axis of the DataFrame. Objects passed to the function are Series objects whose index is either the DataFrame's index (``axis=0``) or the DataFrame's columns (``axis=1``). .. note:: when `axis` is 0 or 'index', the `func` is unable to access to the whole input series. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole series but of the batch internally ... # used. ... def length(s) -> int: ... return len(s) ... >>> df = ks.DataFrame({'A': range(1000)}) >>> df.apply(length, axis=0) # doctest: +SKIP 0 83 1 83 2 83 ... 10 83 11 83 Name: 0, dtype: int32 .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify the return type as `Series` or scalar value in ``func``, for instance, as below: >>> def square(s) -> ks.Series[np.int32]: ... return s ** 2 Koalas uses return type hint and does not try to infer the type. In case when axis is 1, it requires to specify `DataFrame` or scalar value with type hints as below: >>> def plus_one(x) -> ks.DataFrame[float, float]: ... return x + 1 If the return type is specified as `DataFrame`, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. See examples below. However, this way switches the index type to default index type in the output because the type hint cannot express the index type at this moment. Use `reset_index()` to keep index as a workaround. Parameters ---------- func : function Function to apply to each column or row. axis : {0 or 'index', 1 or 'columns'}, default 0 Axis along which the function is applied: * 0 or 'index': apply function to each column. * 1 or 'columns': apply function to each row. Returns ------- Series or DataFrame Result of applying ``func`` along the given axis of the DataFrame. See Also -------- DataFrame.applymap : For elementwise operations. DataFrame.aggregate : Only perform aggregating type operations. DataFrame.transform : Only perform transforming type operations. Series.apply : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame([[4, 9]] * 3, columns=['A', 'B']) >>> df A B 0 4 9 1 4 9 2 4 9 Using a numpy universal function (in this case the same as ``np.sqrt(df)``): >>> def sqrt(x) -> ks.Series[float]: ... return np.sqrt(x) ... >>> df.apply(sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 You can omit the type hint and let Koalas infer its type. >>> df.apply(np.sqrt, axis=0) A B 0 2.0 3.0 1 2.0 3.0 2 2.0 3.0 When `axis` is 1 or 'columns', it applies the function for each row. >>> def summation(x) -> np.int64: ... return np.sum(x) ... >>> df.apply(summation, axis=1) 0 13 1 13 2 13 Name: 0, dtype: int64 Likewise, you can omit the type hint and let Koalas infer its type. >>> df.apply(np.sum, axis=1) 0 13 1 13 2 13 Name: 0, dtype: int64 Returning a list-like will result in a Series >>> df.apply(lambda x: [1, 2], axis=1) 0 [1, 2] 1 [1, 2] 2 [1, 2] Name: 0, dtype: object In order to specify the types when `axis` is '1', it should use DataFrame[...] annotation. In this case, the column names are automatically generated. >>> def identify(x) -> ks.DataFrame[np.int64, np.int64]: ... return x ... >>> df.apply(identify, axis=1) c0 c1 0 4 9 1 4 9 2 4 9 """ from databricks.koalas.groupby import GroupBy from databricks.koalas.series import _col if isinstance(func, np.ufunc): f = func func = lambda *args, **kwargs: f(*args, **kwargs) assert callable(func), "the first argument should be a callable function." axis = validate_axis(axis) should_return_series = False spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None def apply_func(pdf): pdf_or_pser = pdf.apply(func, axis=axis) if isinstance(pdf_or_pser, pd.Series): return pdf_or_pser.to_frame() else: return pdf_or_pser if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() applied = pdf.apply(func, axis=axis) kser_or_kdf = ks.from_pandas(applied) if len(pdf) <= limit: return kser_or_kdf kdf = kser_or_kdf if isinstance(kser_or_kdf, ks.Series): should_return_series = True kdf = kser_or_kdf.to_frame() return_schema = kdf._internal._sdf.drop(*HIDDEN_COLUMNS).schema sdf = GroupBy._spark_group_map_apply( self, apply_func, (F.spark_partition_id(),), return_schema, retain_index=True ) # If schema is inferred, we can restore indexes too. internal = kdf._internal.with_new_sdf(sdf) else: return_schema = infer_return_type(func).tpe require_index_axis = getattr(return_sig, "__origin__", None) == ks.Series require_column_axis = getattr(return_sig, "__origin__", None) == ks.DataFrame if require_index_axis: if axis != 0: raise TypeError( "The given function should specify a scalar or a series as its type " "hints when axis is 0 or 'index'; however, the return type " "was %s" % return_sig ) fields_types = zip(self.columns, [return_schema] * len(self.columns)) return_schema = StructType([StructField(c, t) for c, t in fields_types]) elif require_column_axis: if axis != 1: raise TypeError( "The given function should specify a scalar or a frame as its type " "hints when axis is 1 or 'column'; however, the return type " "was %s" % return_sig ) else: # any axis is fine. should_return_series = True return_schema = StructType([StructField("0", return_schema)]) sdf = GroupBy._spark_group_map_apply( self, apply_func, (F.spark_partition_id(),), return_schema, retain_index=False ) # Otherwise, it loses index. internal = _InternalFrame(spark_frame=sdf, index_map=None) result = DataFrame(internal) if should_return_series: return _col(result) else: return result def transform(self, func): """ Call ``func`` on self producing a Series with transformed values and that has the same length as its input. .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def square(x) -> ks.Series[np.int32]: ... return x ** 2 Koalas uses return type hint and does not try to infer the type. .. note:: the series within ``func`` is actually multiple pandas series as the segments of the whole Koalas series; therefore, the length of each series is not guaranteed. As an example, an aggregation against each series does work as a global aggregation but an aggregation of each segment. See below: >>> def func(x) -> ks.Series[np.int32]: ... return x + sum(x) Parameters ---------- func : function Function to use for transforming the data. It must work when pandas Series is passed. Returns ------- DataFrame A DataFrame that must have the same length as self. Raises ------ Exception : If the returned DataFrame has a different length than self. See Also -------- DataFrame.aggregate : Only perform aggregating type operations. DataFrame.apply : Invoke function on DataFrame. Series.transform : The equivalent function for Series. Examples -------- >>> df = ks.DataFrame({'A': range(3), 'B': range(1, 4)}, columns=['A', 'B']) >>> df A B 0 0 1 1 1 2 2 2 3 >>> def square(x) -> ks.Series[np.int32]: ... return x ** 2 >>> df.transform(square) A B 0 0 1 1 1 4 2 4 9 You can omit the type hint and let Koalas infer its type. >>> df.transform(lambda x: x ** 2) A B 0 0 1 1 1 4 2 4 9 For multi-index columns: >>> df.columns = [('X', 'A'), ('X', 'B')] >>> df.transform(square) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 >>> df.transform(lambda x: x ** 2) # doctest: +NORMALIZE_WHITESPACE X A B 0 0 1 1 1 4 2 4 9 """ assert callable(func), "the first argument should be a callable function." spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() transformed = pdf.transform(func) kdf = DataFrame(transformed) if len(pdf) <= limit: return kdf applied = [] for input_label, output_label in zip( self._internal.column_labels, kdf._internal.column_labels ): pudf = pandas_udf( func, returnType=kdf._internal.spark_type_for(output_label), functionType=PandasUDFType.SCALAR, ) kser = self._kser_for(input_label) applied.append( kser._with_new_scol(scol=pudf(kser.spark_column)).rename(input_label) ) internal = self._internal.with_new_columns(applied) return DataFrame(internal) else: return self._apply_series_op(lambda kser: kser.transform_batch(func)) def transform_batch(self, func): """ Transform chunks with a function that takes pandas DataFrame and outputs pandas DataFrame. The pandas DataFrame given to the function is of a batch used internally. The length of each input and output should be the same. .. note:: the `func` is unable to access to the whole input frame. Koalas internally splits the input series into multiple batches and calls `func` with each batch multiple times. Therefore, operations such as global aggregations are impossible. See the example below. >>> # This case does not return the length of whole frame but of the batch internally ... # used. ... def length(pdf) -> ks.DataFrame[int]: ... return pd.DataFrame([len(pdf)] * len(pdf)) ... >>> df = ks.DataFrame({'A': range(1000)}) >>> df.transform_batch(length) # doctest: +SKIP c0 0 83 1 83 2 83 ... .. note:: this API executes the function once to infer the type which is potentially expensive, for instance, when the dataset is created after aggregations or sorting. To avoid this, specify return type in ``func``, for instance, as below: >>> def plus_one(x) -> ks.DataFrame[float, float]: ... return x + 1 If the return type is specified, the output column names become `c0, c1, c2 ... cn`. These names are positionally mapped to the returned DataFrame in ``func``. See examples below. Parameters ---------- func : function Function to transform each pandas frame. Returns ------- DataFrame See Also -------- DataFrame.apply_batch: For row/columnwise operations. Series.transform_batch: transform the search as each pandas chunks. Examples -------- >>> df = ks.DataFrame([(1, 2), (3, 4), (5, 6)], columns=['A', 'B']) >>> df A B 0 1 2 1 3 4 2 5 6 >>> def plus_one_func(pdf) -> ks.DataFrame[int, int]: ... return pdf + 1 >>> df.transform_batch(plus_one_func) c0 c1 0 2 3 1 4 5 2 6 7 >>> def plus_one_func(pdf) -> ks.Series[int]: ... return pdf.B + 1 >>> df.transform_batch(plus_one_func) 0 3 1 5 2 7 Name: 0, dtype: int32 You can also omit the type hints so Koalas infers the return schema as below: >>> df.transform_batch(lambda pdf: pdf + 1) A B 0 2 3 1 4 5 2 6 7 Note that you should not transform the index. The index information will not change. >>> df.transform_batch(lambda pdf: pdf.B + 1) 0 3 1 5 2 7 Name: B, dtype: int64 """ from databricks.koalas.groupby import GroupBy from databricks.koalas import Series assert callable(func), "the first argument should be a callable function." spec = inspect.getfullargspec(func) return_sig = spec.annotations.get("return", None) should_infer_schema = return_sig is None names = self._internal.to_internal_spark_frame.schema.names should_by_pass = LooseVersion(pyspark.__version__) >= "3.0" def pandas_concat(series): # The input can only be a DataFrame for struct from Spark 3.0. # This works around to make the input as a frame. See SPARK-27240 pdf = pd.concat(series, axis=1) pdf = pdf.rename(columns=dict(zip(pdf.columns, names))) return pdf def pandas_extract(pdf, name): # This is for output to work around a DataFrame for struct # from Spark 3.0. See SPARK-23836 return pdf[name] def pandas_series_func(f): ff = f return lambda *series: ff(pandas_concat(series)) def pandas_frame_func(f): ff = f return lambda *series: pandas_extract(ff(pandas_concat(series)), field.name) if should_infer_schema: # Here we execute with the first 1000 to get the return type. # If the records were less than 1000, it uses pandas API directly for a shortcut. limit = get_option("compute.shortcut_limit") pdf = self.head(limit + 1)._to_internal_pandas() transformed = func(pdf) if not isinstance(transformed, (pd.DataFrame, pd.Series)): raise ValueError( "The given function should return a frame; however, " "the return type was %s." % type(transformed) ) if len(transformed) != len(pdf): raise ValueError("transform_batch cannot produce aggregated results") kdf_or_kser = ks.from_pandas(transformed) if isinstance(kdf_or_kser, ks.Series): kser = kdf_or_kser pudf = pandas_udf( func if should_by_pass else pandas_series_func(func), returnType=kser.spark_type, functionType=PandasUDFType.SCALAR, ) columns = self._internal.spark_columns # TODO: Index will be lost in this case. internal = self._internal.copy( spark_column=pudf(F.struct(*columns)) if should_by_pass else pudf(*columns), column_labels=kser._internal.column_labels, column_label_names=kser._internal.column_label_names, ) return Series(internal, anchor=self) else: kdf = kdf_or_kser if len(pdf) <= limit: # only do the short cut when it returns a frame to avoid # operations on different dataframes in case of series. return kdf return_schema = kdf._internal.to_internal_spark_frame.schema # Force nullability. return_schema = StructType( [StructField(field.name, field.dataType) for field in return_schema.fields] ) output_func = GroupBy._make_pandas_df_builder_func( self, func, return_schema, retain_index=True ) columns = self._internal.spark_columns if should_by_pass: pudf = pandas_udf( output_func, returnType=return_schema, functionType=PandasUDFType.SCALAR ) temp_struct_column = verify_temp_column_name( self._internal.spark_frame, "__temp_struct__" ) applied = pudf(F.struct(*columns)).alias(temp_struct_column) sdf = self._internal.spark_frame.select(applied) sdf = sdf.selectExpr("%s.*" % temp_struct_column) else: applied = [] for field in return_schema.fields: applied.append( pandas_udf( pandas_frame_func(output_func), returnType=field.dataType, functionType=PandasUDFType.SCALAR, )(*columns).alias(field.name) ) sdf = self._internal.spark_frame.select(*applied) return DataFrame(kdf._internal.with_new_sdf(sdf)) else: return_schema = infer_return_type(func).tpe is_return_dataframe = getattr(return_sig, "__origin__", None) == ks.DataFrame is_return_series = getattr(return_sig, "__origin__", None) == ks.Series if not is_return_dataframe and not is_return_series: raise TypeError( "The given function should specify a frame or seires as its type " "hints; however, the return type was %s." % return_sig ) if is_return_series: pudf = pandas_udf( func if should_by_pass else pandas_series_func(func), returnType=return_schema, functionType=PandasUDFType.SCALAR, ) columns = self._internal.spark_columns internal = self._internal.copy( spark_column=pudf(F.struct(*columns)) if should_by_pass else pudf(*columns), column_labels=[("0",)], column_label_names=None, ) return Series(internal, anchor=self) else: output_func = GroupBy._make_pandas_df_builder_func( self, func, return_schema, retain_index=False ) columns = self._internal.spark_columns if should_by_pass: pudf = pandas_udf( output_func, returnType=return_schema, functionType=PandasUDFType.SCALAR ) temp_struct_column = verify_temp_column_name( self._internal.spark_frame, "__temp_struct__" ) applied = pudf(F.struct(*columns)).alias(temp_struct_column) sdf = self._internal.spark_frame.select(applied) sdf = sdf.selectExpr("%s.*" % temp_struct_column) else: applied = [] for field in return_schema.fields: applied.append( pandas_udf( pandas_frame_func(output_func), returnType=field.dataType, functionType=PandasUDFType.SCALAR, )(*columns).alias(field.name) ) sdf = self._internal.spark_frame.select(*applied) return DataFrame(sdf) def pop(self, item): """ Return item and drop from frame. Raise KeyError if not found. Parameters ---------- item : str Label of column to be popped. Returns ------- Series Examples -------- >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> df name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('class') 0 bird 1 bird 2 mammal 3 mammal Name: class, dtype: object >>> df name max_speed 0 falcon 389.0 1 parrot 24.0 2 lion 80.5 3 monkey NaN Also support for MultiIndex >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey','mammal', np.nan)], ... columns=('name', 'class', 'max_speed')) >>> columns = [('a', 'name'), ('a', 'class'), ('b', 'max_speed')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df a b name class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN >>> df.pop('a') name class 0 falcon bird 1 parrot bird 2 lion mammal 3 monkey mammal >>> df b max_speed 0 389.0 1 24.0 2 80.5 3 NaN """ result = self[item] self._internal = self.drop(item)._internal return result # TODO: add axis parameter can work when '1' or 'columns' def xs(self, key, axis=0, level=None): """ Return cross-section from the DataFrame. This method takes a `key` argument to select data at a particular level of a MultiIndex. Parameters ---------- key : label or tuple of label Label contained in the index, or partially in a MultiIndex. axis : 0 or 'index', default 0 Axis to retrieve cross-section on. currently only support 0 or 'index' level : object, defaults to first n levels (n=1 or len(key)) In case of a key partially contained in a MultiIndex, indicate which levels are used. Levels can be referred by label or position. Returns ------- DataFrame Cross-section from the original DataFrame corresponding to the selected index levels. See Also -------- DataFrame.loc : Access a group of rows and columns by label(s) or a boolean array. DataFrame.iloc : Purely integer-location based indexing for selection by position. Examples -------- >>> d = {'num_legs': [4, 4, 2, 2], ... 'num_wings': [0, 0, 2, 2], ... 'class': ['mammal', 'mammal', 'mammal', 'bird'], ... 'animal': ['cat', 'dog', 'bat', 'penguin'], ... 'locomotion': ['walks', 'walks', 'flies', 'walks']} >>> df = ks.DataFrame(data=d) >>> df = df.set_index(['class', 'animal', 'locomotion']) >>> df # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class animal locomotion mammal cat walks 4 0 dog walks 4 0 bat flies 2 2 bird penguin walks 2 2 Get values at specified index >>> df.xs('mammal') # doctest: +NORMALIZE_WHITESPACE num_legs num_wings animal locomotion cat walks 4 0 dog walks 4 0 bat flies 2 2 Get values at several indexes >>> df.xs(('mammal', 'dog')) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings locomotion walks 4 0 Get values at specified index and level >>> df.xs('cat', level=1) # doctest: +NORMALIZE_WHITESPACE num_legs num_wings class locomotion mammal walks 4 0 """ from databricks.koalas.series import _col if not isinstance(key, (str, tuple)): raise ValueError("'key' should be string or tuple that contains strings") if not all(isinstance(index, str) for index in key): raise ValueError( "'key' should have index names as only strings " "or a tuple that contain index names as only strings" ) axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if isinstance(key, str): key = (key,) if len(key) > len(self._internal.index_spark_columns): raise KeyError( "Key length ({}) exceeds index depth ({})".format( len(key), len(self._internal.index_spark_columns) ) ) if level is None: level = 0 scols = ( self._internal.spark_columns[:level] + self._internal.spark_columns[level + len(key) :] ) rows = [self._internal.spark_columns[lvl] == index for lvl, index in enumerate(key, level)] sdf = ( self._sdf.select(scols + list(HIDDEN_COLUMNS)) .drop(NATURAL_ORDER_COLUMN_NAME) .filter(reduce(lambda x, y: x & y, rows)) ) if len(key) == len(self._internal.index_spark_columns): result = _col(DataFrame(_InternalFrame(spark_frame=sdf, index_map=None)).T) result.name = key else: new_index_map = OrderedDict( list(self._internal.index_map.items())[:level] + list(self._internal.index_map.items())[level + len(key) :] ) internal = self._internal.copy(spark_frame=sdf, index_map=new_index_map,) result = DataFrame(internal) return result def where(self, cond, other=np.nan): """ Replace values where the condition is False. Parameters ---------- cond : boolean DataFrame Where cond is True, keep the original value. Where False, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is False are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.where(df1 > 0).sort_index() A B 0 NaN 100.0 1 1.0 200.0 2 2.0 300.0 3 3.0 400.0 4 4.0 500.0 >>> df1.where(df1 > 1, 10).sort_index() A B 0 10 100 1 10 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df1 + 100).sort_index() A B 0 100 100 1 101 200 2 2 300 3 3 400 4 4 500 >>> df1.where(df1 > 1, df2).sort_index() A B 0 0 100 1 -1 200 2 2 300 3 3 400 4 4 500 When the column name of cond is different from self, it treats all values are False >>> cond = ks.DataFrame({'C': [0, -1, -2, -3, -4], 'D':[4, 3, 2, 1, 0]}) % 3 == 0 >>> cond C D 0 True False 1 False True 2 False False 3 True False 4 False True >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN When the type of cond is Series, it just check boolean regardless of column name >>> cond = ks.Series([1, 2]) > 1 >>> cond 0 False 1 True Name: 0, dtype: bool >>> df1.where(cond).sort_index() A B 0 NaN NaN 1 1.0 200.0 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series tmp_cond_col_name = "__tmp_cond_col_{}__".format tmp_other_col_name = "__tmp_other_col_{}__".format kdf = self.copy() tmp_cond_col_names = [ tmp_cond_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(cond, DataFrame): cond = cond[ [ ( cond._internal.spark_column_for(label) if label in cond._internal.column_labels else F.lit(False) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_cond_col_names) ] ] kdf[tmp_cond_col_names] = cond elif isinstance(cond, Series): cond = cond.to_frame() cond = cond[ [cond._internal.data_spark_columns[0].alias(name) for name in tmp_cond_col_names] ] kdf[tmp_cond_col_names] = cond else: raise ValueError("type of cond must be a DataFrame or Series") tmp_other_col_names = [ tmp_other_col_name(name_like_string(label)) for label in self._internal.column_labels ] if isinstance(other, DataFrame): other = other[ [ ( other._internal.spark_column_for(label) if label in other._internal.column_labels else F.lit(np.nan) ).alias(name) for label, name in zip(self._internal.column_labels, tmp_other_col_names) ] ] kdf[tmp_other_col_names] = other elif isinstance(other, Series): other = other.to_frame() other = other[ [other._internal.data_spark_columns[0].alias(name) for name in tmp_other_col_names] ] kdf[tmp_other_col_names] = other else: for label in self._internal.column_labels: kdf[tmp_other_col_name(name_like_string(label))] = other # above logic make spark dataframe looks like below: # +-----------------+---+---+------------------+-------------------+------------------+--... # |__index_level_0__| A| B|__tmp_cond_col_A__|__tmp_other_col_A__|__tmp_cond_col_B__|__... # +-----------------+---+---+------------------+-------------------+------------------+--... # | 0| 0|100| true| 0| false| ... # | 1| 1|200| false| -1| false| ... # | 3| 3|400| true| -3| false| ... # | 2| 2|300| false| -2| true| ... # | 4| 4|500| false| -4| false| ... # +-----------------+---+---+------------------+-------------------+------------------+--... data_spark_columns = [] for label in self._internal.column_labels: data_spark_columns.append( F.when( kdf[tmp_cond_col_name(name_like_string(label))].spark_column, kdf._internal.spark_column_for(label), ) .otherwise(kdf[tmp_other_col_name(name_like_string(label))].spark_column) .alias(kdf._internal.spark_column_name_for(label)) ) return DataFrame( kdf._internal.with_new_columns( data_spark_columns, column_labels=self._internal.column_labels ) ) def mask(self, cond, other=np.nan): """ Replace values where the condition is True. Parameters ---------- cond : boolean DataFrame Where cond is False, keep the original value. Where True, replace with corresponding value from other. other : scalar, DataFrame Entries where cond is True are replaced with corresponding value from other. Returns ------- DataFrame Examples -------- >>> from databricks.koalas.config import set_option, reset_option >>> set_option("compute.ops_on_diff_frames", True) >>> df1 = ks.DataFrame({'A': [0, 1, 2, 3, 4], 'B':[100, 200, 300, 400, 500]}) >>> df2 = ks.DataFrame({'A': [0, -1, -2, -3, -4], 'B':[-100, -200, -300, -400, -500]}) >>> df1 A B 0 0 100 1 1 200 2 2 300 3 3 400 4 4 500 >>> df2 A B 0 0 -100 1 -1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> df1.mask(df1 > 0).sort_index() A B 0 0.0 NaN 1 NaN NaN 2 NaN NaN 3 NaN NaN 4 NaN NaN >>> df1.mask(df1 > 1, 10).sort_index() A B 0 0 10 1 1 10 2 10 10 3 10 10 4 10 10 >>> df1.mask(df1 > 1, df1 + 100).sort_index() A B 0 0 200 1 1 300 2 102 400 3 103 500 4 104 600 >>> df1.mask(df1 > 1, df2).sort_index() A B 0 0 -100 1 1 -200 2 -2 -300 3 -3 -400 4 -4 -500 >>> reset_option("compute.ops_on_diff_frames") """ from databricks.koalas.series import Series if not isinstance(cond, (DataFrame, Series)): raise ValueError("type of cond must be a DataFrame or Series") cond_inversed = cond._apply_series_op(lambda kser: ~kser) return self.where(cond_inversed, other) @property def index(self): """The index (row labels) Column of the DataFrame. Currently not supported when the DataFrame has no index. See Also -------- Index """ from databricks.koalas.indexes import Index, MultiIndex if len(self._internal.index_map) == 1: return Index(self) else: return MultiIndex(self) @property def empty(self): """ Returns true if the current DataFrame is empty. Otherwise, returns false. Examples -------- >>> ks.range(10).empty False >>> ks.range(0).empty True >>> ks.DataFrame({}, index=list('abc')).empty True """ return len(self._internal.column_labels) == 0 or self._sdf.rdd.isEmpty() @property def style(self): """ Property returning a Styler object containing methods for building a styled HTML representation fo the DataFrame. .. note:: currently it collects top 1000 rows and return its pandas `pandas.io.formats.style.Styler` instance. Examples -------- >>> ks.range(1001).style # doctest: +ELLIPSIS <pandas.io.formats.style.Styler object at ...> """ max_results = get_option("compute.max_rows") pdf = self.head(max_results + 1).to_pandas() if len(pdf) > max_results: warnings.warn("'style' property will only use top %s rows." % max_results, UserWarning) return pdf.head(max_results).style def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. Set the DataFrame index (row labels) using one or more existing columns or arrays (of the correct length). The index can replace the existing index or expand on it. Parameters ---------- keys : label or array-like or list of labels/arrays This parameter can be either a single column key, a single array of the same length as the calling DataFrame, or a list containing an arbitrary combination of column keys and arrays. Here, "array" encompasses :class:`Series`, :class:`Index` and ``np.ndarray``. drop : bool, default True Delete columns to be used as the new index. append : bool, default False Whether to append columns to existing index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). Returns ------- DataFrame Changed row labels. See Also -------- DataFrame.reset_index : Opposite of set_index. Examples -------- >>> df = ks.DataFrame({'month': [1, 4, 7, 10], ... 'year': [2012, 2014, 2013, 2014], ... 'sale': [55, 40, 84, 31]}, ... columns=['month', 'year', 'sale']) >>> df month year sale 0 1 2012 55 1 4 2014 40 2 7 2013 84 3 10 2014 31 Set the index to become the 'month' column: >>> df.set_index('month') # doctest: +NORMALIZE_WHITESPACE year sale month 1 2012 55 4 2014 40 7 2013 84 10 2014 31 Create a MultiIndex using columns 'year' and 'month': >>> df.set_index(['year', 'month']) # doctest: +NORMALIZE_WHITESPACE sale year month 2012 1 55 2014 4 40 2013 7 84 2014 10 31 """ inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(keys, (str, tuple)): keys = [keys] else: keys = list(keys) columns = set(self.columns) for key in keys: if key not in columns: raise KeyError(key) keys = [key if isinstance(key, tuple) else (key,) for key in keys] if drop: column_labels = [label for label in self._internal.column_labels if label not in keys] else: column_labels = self._internal.column_labels if append: index_map = OrderedDict( list(self._internal.index_map.items()) + [(self._internal.spark_column_name_for(label), label) for label in keys] ) else: index_map = OrderedDict( (self._internal.spark_column_name_for(label), label) for label in keys ) internal = self._internal.copy( index_map=index_map, column_labels=column_labels, data_spark_columns=[self._internal.spark_column_for(label) for label in column_labels], ) if inplace: self._internal = internal else: return DataFrame(internal) def reset_index(self, level=None, drop=False, inplace=False, col_level=0, col_fill=""): """Reset the index, or a level of it. For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. Parameters ---------- level : int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default. drop : bool, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. inplace : bool, default False Modify the DataFrame in place (do not create a new object). col_level : int or str, default 0 If the columns have multiple levels, determines which level the labels are inserted into. By default it is inserted into the first level. col_fill : object, default '' If the columns have multiple levels, determines how the other levels are named. If None then the index name is repeated. Returns ------- DataFrame DataFrame with the new index. See Also -------- DataFrame.set_index : Opposite of reset_index. Examples -------- >>> df = ks.DataFrame([('bird', 389.0), ... ('bird', 24.0), ... ('mammal', 80.5), ... ('mammal', np.nan)], ... index=['falcon', 'parrot', 'lion', 'monkey'], ... columns=('class', 'max_speed')) >>> df class max_speed falcon bird 389.0 parrot bird 24.0 lion mammal 80.5 monkey mammal NaN When we reset the index, the old index is added as a column. Unlike pandas, Koalas does not automatically add a sequential index. The following 0, 1, 2, 3 are only there when we display the DataFrame. >>> df.reset_index() index class max_speed 0 falcon bird 389.0 1 parrot bird 24.0 2 lion mammal 80.5 3 monkey mammal NaN We can use the `drop` parameter to avoid the old index being added as a column: >>> df.reset_index(drop=True) class max_speed 0 bird 389.0 1 bird 24.0 2 mammal 80.5 3 mammal NaN You can also use `reset_index` with `MultiIndex`. >>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'), ... ('bird', 'parrot'), ... ('mammal', 'lion'), ... ('mammal', 'monkey')], ... names=['class', 'name']) >>> columns = pd.MultiIndex.from_tuples([('speed', 'max'), ... ('species', 'type')]) >>> df = ks.DataFrame([(389.0, 'fly'), ... ( 24.0, 'fly'), ... ( 80.5, 'run'), ... (np.nan, 'jump')], ... index=index, ... columns=columns) >>> df # doctest: +NORMALIZE_WHITESPACE speed species max type class name bird falcon 389.0 fly parrot 24.0 fly mammal lion 80.5 run monkey NaN jump If the index has multiple levels, we can reset a subset of them: >>> df.reset_index(level='class') # doctest: +NORMALIZE_WHITESPACE class speed species max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we are not dropping the index, by default, it is placed in the top level. We can place it in another level: >>> df.reset_index(level='class', col_level=1) # doctest: +NORMALIZE_WHITESPACE speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump When the index is inserted under another level, we can specify under which one with the parameter `col_fill`: >>> df.reset_index(level='class', col_level=1, ... col_fill='species') # doctest: +NORMALIZE_WHITESPACE species speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump If we specify a nonexistent level for `col_fill`, it is created: >>> df.reset_index(level='class', col_level=1, ... col_fill='genus') # doctest: +NORMALIZE_WHITESPACE genus speed species class max type name falcon bird 389.0 fly parrot bird 24.0 fly lion mammal 80.5 run monkey mammal NaN jump """ inplace = validate_bool_kwarg(inplace, "inplace") multi_index = len(self._internal.index_map) > 1 def rename(index): if multi_index: return ("level_{}".format(index),) else: if ("index",) not in self._internal.column_labels: return ("index",) else: return ("level_{}".format(index),) if level is None: new_index_map = [ (column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._internal.index_map.items()) ] index_map = [] else: if isinstance(level, (int, str)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for lev in level: if lev >= len(self._internal.index_map): raise IndexError( "Too many levels: Index has only {} level, not {}".format( len(self._internal.index_map), lev + 1 ) ) idx = level elif all(isinstance(lev, str) for lev in level): idx = [] for l in level: try: i = self._internal.index_names.index((l,)) idx.append(i) except ValueError: if multi_index: raise KeyError("Level unknown not found") else: raise KeyError( "Level unknown must be same as name ({})".format( name_like_string(self._internal.index_names[0]) ) ) else: raise ValueError("Level should be all int or all string.") idx.sort() new_index_map = [] index_map_items = list(self._internal.index_map.items()) new_index_map_items = index_map_items.copy() for i in idx: info = index_map_items[i] index_column, index_name = info new_index_map.append( (index_column, index_name if index_name is not None else rename(i)) ) new_index_map_items.remove(info) index_map = OrderedDict(new_index_map_items) new_data_scols = [ scol_for(self._sdf, column).alias(name_like_string(name)) for column, name in new_index_map ] if len(index_map) > 0: index_scols = [scol_for(self._sdf, column) for column in index_map] sdf = self._sdf.select( index_scols + new_data_scols + self._internal.data_spark_columns + list(HIDDEN_COLUMNS) ) else: sdf = self._sdf.select( new_data_scols + self._internal.data_spark_columns + list(HIDDEN_COLUMNS) ) # Now, new internal Spark columns are named as same as index name. new_index_map = [(column, name) for column, name in new_index_map] sdf = _InternalFrame.attach_default_index(sdf) index_map = OrderedDict({SPARK_DEFAULT_INDEX_NAME: None}) if drop: new_index_map = [] if self._internal.column_labels_level > 1: column_depth = len(self._internal.column_labels[0]) if col_level >= column_depth: raise IndexError( "Too many levels: Index has only {} levels, not {}".format( column_depth, col_level + 1 ) ) if any(col_level + len(name) > column_depth for _, name in new_index_map): raise ValueError("Item must have length equal to number of levels.") column_labels = [ tuple( ([col_fill] * col_level) + list(name) + ([col_fill] * (column_depth - (len(name) + col_level))) ) for _, name in new_index_map ] + self._internal.column_labels else: column_labels = [name for _, name in new_index_map] + self._internal.column_labels internal = self._internal.copy( spark_frame=sdf, index_map=index_map, column_labels=column_labels, data_spark_columns=( [scol_for(sdf, name_like_string(name)) for _, name in new_index_map] + [scol_for(sdf, col) for col in self._internal.data_spark_column_names] ), ) if inplace: self._internal = internal else: return DataFrame(internal) def isnull(self): """ Detects missing values for items in the current Dataframe. Return a boolean same-sized Dataframe indicating if the values are NA. NA values, such as None or numpy.NaN, gets mapped to True values. Everything else gets mapped to False values. See Also -------- Dataframe.notnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.isnull() 0 1 0 False False 1 False True 2 False True 3 False False >>> df = ks.DataFrame([[None, 'bee', None], ['dog', None, 'fly']]) >>> df.isnull() 0 1 2 0 True False True 1 False True False """ return self._apply_series_op(lambda kser: kser.isnull()) isna = isnull def notnull(self): """ Detects non-missing values for items in the current Dataframe. This function takes a dataframe and indicates whether it's values are valid (not missing, which is ``NaN`` in numeric datatypes, ``None`` or ``NaN`` in objects and ``NaT`` in datetimelike). See Also -------- Dataframe.isnull Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, None), (.6, None), (.2, .1)]) >>> df.notnull() 0 1 0 True True 1 True False 2 True False 3 True True >>> df = ks.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']]) >>> df.notnull() 0 1 2 0 True True True 1 True False True """ return self._apply_series_op(lambda kser: kser.notnull()) notna = notnull # TODO: add frep and axis parameter def shift(self, periods=1, fill_value=None): """ Shift DataFrame by desired number of periods. .. note:: the current implementation of shift uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int Number of periods to shift. Can be positive or negative. fill_value : object, optional The scalar value to use for newly introduced missing values. The default depends on the dtype of self. For numeric data, np.nan is used. Returns ------- Copy of input DataFrame, shifted. Examples -------- >>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45], ... 'Col2': [13, 23, 18, 33, 48], ... 'Col3': [17, 27, 22, 37, 52]}, ... columns=['Col1', 'Col2', 'Col3']) >>> df.shift(periods=3) Col1 Col2 Col3 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 10.0 13.0 17.0 4 20.0 23.0 27.0 >>> df.shift(periods=3, fill_value=0) Col1 Col2 Col3 0 0 0 0 1 0 0 0 2 0 0 0 3 10 13 17 4 20 23 27 """ return self._apply_series_op(lambda kser: kser.shift(periods, fill_value)) # TODO: axis should support 1 or 'columns' either at this moment def diff(self, periods: int = 1, axis: Union[int, str] = 0): """ First discrete difference of element. Calculates the difference of a DataFrame element compared with another element in the DataFrame (default is the element in the same column of the previous row). .. note:: the current implementation of diff uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for calculating difference, accepts negative values. axis : int, default 0 or 'index' Can only be set to 0 at the moment. Returns ------- diffed : DataFrame Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6], ... 'b': [1, 1, 2, 3, 5, 8], ... 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 2 1 4 2 3 2 9 3 4 3 16 4 5 5 25 5 6 8 36 >>> df.diff() a b c 0 NaN NaN NaN 1 1.0 0.0 3.0 2 1.0 1.0 5.0 3 1.0 1.0 7.0 4 1.0 2.0 9.0 5 1.0 3.0 11.0 Difference with previous column >>> df.diff(periods=3) a b c 0 NaN NaN NaN 1 NaN NaN NaN 2 NaN NaN NaN 3 3.0 2.0 15.0 4 3.0 4.0 21.0 5 3.0 6.0 27.0 Difference with following row >>> df.diff(periods=-1) a b c 0 -1.0 0.0 -3.0 1 -1.0 -1.0 -5.0 2 -1.0 -1.0 -7.0 3 -1.0 -2.0 -9.0 4 -1.0 -3.0 -11.0 5 NaN NaN NaN """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') return self._apply_series_op(lambda kser: kser.diff(periods)) # TODO: axis should support 1 or 'columns' either at this moment def nunique( self, axis: Union[int, str] = 0, dropna: bool = True, approx: bool = False, rsd: float = 0.05, ) -> "ks.Series": """ Return number of unique elements in the object. Excludes NA values by default. Parameters ---------- axis : int, default 0 or 'index' Can only be set to 0 at the moment. dropna : bool, default True Don’t include NaN in the count. approx: bool, default False If False, will use the exact algorithm and return the exact number of unique. If True, it uses the HyperLogLog approximate algorithm, which is significantly faster for large amount of data. Note: This parameter is specific to Koalas and is not found in pandas. rsd: float, default 0.05 Maximum estimation error allowed in the HyperLogLog algorithm. Note: Just like ``approx`` this parameter is specific to Koalas. Returns ------- The number of unique values per column as a Koalas Series. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [np.nan, 3, np.nan]}) >>> df.nunique() A 3 B 1 Name: 0, dtype: int64 >>> df.nunique(dropna=False) A 3 B 2 Name: 0, dtype: int64 On big data, we recommend using the approximate algorithm to speed up this function. The result will be very close to the exact unique count. >>> df.nunique(approx=True) A 3 B 1 Name: 0, dtype: int64 """ from databricks.koalas.series import _col axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') sdf = self._sdf.select( [ self._kser_for(label)._nunique(dropna, approx, rsd) for label in self._internal.column_labels ] ) # The data is expected to be small so it's fine to transpose/use default index. with ks.option_context( "compute.default_index_type", "distributed", "compute.max_rows", None ): kdf = DataFrame(sdf) # type: ks.DataFrame internal = _InternalFrame( spark_frame=kdf._internal.spark_frame, index_map=kdf._internal.index_map, column_labels=self._internal.column_labels, column_label_names=self._internal.column_label_names, ) return _col(DataFrame(internal).transpose()) def round(self, decimals=0): """ Round a DataFrame to a variable number of decimal places. Parameters ---------- decimals : int, dict, Series Number of decimal places to round each column to. If an int is given, round each column to the same number of places. Otherwise dict and Series round to variable numbers of places. Column names should be in the keys if `decimals` is a dict-like, or in the index if `decimals` is a Series. Any columns not included in `decimals` will be left as is. Elements of `decimals` which are not columns of the input will be ignored. .. note:: If `decimals` is a Series, it is expected to be small, as all the data is loaded into the driver's memory. Returns ------- DataFrame See Also -------- Series.round Examples -------- >>> df = ks.DataFrame({'A':[0.028208, 0.038683, 0.877076], ... 'B':[0.992815, 0.645646, 0.149370], ... 'C':[0.173891, 0.577595, 0.491027]}, ... columns=['A', 'B', 'C'], ... index=['first', 'second', 'third']) >>> df A B C first 0.028208 0.992815 0.173891 second 0.038683 0.645646 0.577595 third 0.877076 0.149370 0.491027 >>> df.round(2) A B C first 0.03 0.99 0.17 second 0.04 0.65 0.58 third 0.88 0.15 0.49 >>> df.round({'A': 1, 'C': 2}) A B C first 0.0 0.992815 0.17 second 0.0 0.645646 0.58 third 0.9 0.149370 0.49 >>> decimals = ks.Series([1, 0, 2], index=['A', 'B', 'C']) >>> df.round(decimals) A B C first 0.0 1.0 0.17 second 0.0 1.0 0.58 third 0.9 0.0 0.49 """ if isinstance(decimals, ks.Series): decimals = { k if isinstance(k, tuple) else (k,): v for k, v in decimals._to_internal_pandas().items() } elif isinstance(decimals, dict): decimals = {k if isinstance(k, tuple) else (k,): v for k, v in decimals.items()} elif isinstance(decimals, int): decimals = {k: decimals for k in self._internal.column_labels} else: raise ValueError("decimals must be an integer, a dict-like or a Series") def op(kser): label = kser._internal.column_labels[0] if label in decimals: return F.round(kser.spark_column, decimals[label]).alias( kser._internal.data_spark_column_names[0] ) else: return kser return self._apply_series_op(op) def _mark_duplicates(self, subset=None, keep="first"): if subset is None: subset = self._internal.column_labels else: if isinstance(subset, str): subset = [(subset,)] elif isinstance(subset, tuple): subset = [subset] else: subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] diff = set(subset).difference(set(self._internal.column_labels)) if len(diff) > 0: raise KeyError(", ".join([str(d) if len(d) > 1 else d[0] for d in diff])) group_cols = [self._internal.spark_column_name_for(label) for label in subset] sdf = self._sdf column = verify_temp_column_name(sdf, "__duplicated__") if keep == "first" or keep == "last": if keep == "first": ord_func = spark.functions.asc else: ord_func = spark.functions.desc window = ( Window.partitionBy(group_cols) .orderBy(ord_func(NATURAL_ORDER_COLUMN_NAME)) .rowsBetween(Window.unboundedPreceding, Window.currentRow) ) sdf = sdf.withColumn(column, F.row_number().over(window) > 1) elif not keep: window = Window.partitionBy(group_cols).rowsBetween( Window.unboundedPreceding, Window.unboundedFollowing ) sdf = sdf.withColumn(column, F.count("*").over(window) > 1) else: raise ValueError("'keep' only supports 'first', 'last' and False") return sdf, column def duplicated(self, subset=None, keep="first"): """ Return boolean Series denoting duplicate rows, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Mark duplicates as ``True`` except for the first occurrence. - ``last`` : Mark duplicates as ``True`` except for the last occurrence. - False : Mark all duplicates as ``True``. Returns ------- duplicated : Series Examples -------- >>> df = ks.DataFrame({'a': [1, 1, 1, 3], 'b': [1, 1, 1, 4], 'c': [1, 1, 1, 5]}, ... columns = ['a', 'b', 'c']) >>> df a b c 0 1 1 1 1 1 1 1 2 1 1 1 3 3 4 5 >>> df.duplicated().sort_index() 0 False 1 True 2 True 3 False Name: 0, dtype: bool Mark duplicates as ``True`` except for the last occurrence. >>> df.duplicated(keep='last').sort_index() 0 True 1 True 2 False 3 False Name: 0, dtype: bool Mark all duplicates as ``True``. >>> df.duplicated(keep=False).sort_index() 0 True 1 True 2 True 3 False Name: 0, dtype: bool """ from databricks.koalas.series import _col sdf, column = self._mark_duplicates(subset, keep) column_label = ("0",) sdf = sdf.select( self._internal.index_spark_columns + [scol_for(sdf, column).alias(name_like_string(column_label))] ) return _col( DataFrame( _InternalFrame( spark_frame=sdf, index_map=self._internal.index_map, column_labels=[column_label], data_spark_columns=[scol_for(sdf, name_like_string(column_label))], ) ) ) def to_koalas(self, index_col: Optional[Union[str, List[str]]] = None): """ Converts the existing DataFrame into a Koalas DataFrame. This method is monkey-patched into Spark's DataFrame and can be used to convert a Spark DataFrame into a Koalas DataFrame. If running on an existing Koalas DataFrame, the method returns itself. If a Koalas DataFrame is converted to a Spark DataFrame and then back to Koalas, it will lose the index information and the original index will be turned into a normal column. Parameters ---------- index_col: str or list of str, optional, default: None Index column of table in Spark. See Also -------- DataFrame.to_spark Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}, columns=['col1', 'col2']) >>> df col1 col2 0 1 3 1 2 4 >>> spark_df = df.to_spark() >>> spark_df DataFrame[col1: bigint, col2: bigint] >>> kdf = spark_df.to_koalas() >>> kdf col1 col2 0 1 3 1 2 4 We can specify the index columns. >>> kdf = spark_df.to_koalas(index_col='col1') >>> kdf # doctest: +NORMALIZE_WHITESPACE col2 col1 1 3 2 4 Calling to_koalas on a Koalas DataFrame simply returns itself. >>> df.to_koalas() col1 col2 0 1 3 1 2 4 """ if isinstance(self, DataFrame): return self else: assert isinstance(self, spark.DataFrame), type(self) from databricks.koalas.namespace import _get_index_map index_map = _get_index_map(self, index_col) internal = _InternalFrame(spark_frame=self, index_map=index_map) return DataFrame(internal) def cache(self): """ Yields and caches the current DataFrame. The Koalas DataFrame is yielded as a protected resource and its corresponding data is cached which gets uncached after execution goes of the context. If you want to specify the StorageLevel manually, use :meth:`DataFrame.persist` See Also -------- DataFrame.persist Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 >>> with df.cache() as cached_df: ... print(cached_df.count()) ... dogs 4 cats 4 Name: 0, dtype: int64 >>> df = df.cache() >>> df.to_pandas().mean(axis=1) 0 0.25 1 0.30 2 0.30 3 0.15 dtype: float64 To uncache the dataframe, use `unpersist` function >>> df.unpersist() """ return _CachedDataFrame(self._internal) def persist(self, storage_level=StorageLevel.MEMORY_AND_DISK): """ Yields and caches the current DataFrame with a specific StorageLevel. If a StogeLevel is not given, the `MEMORY_AND_DISK` level is used by default like PySpark. The Koalas DataFrame is yielded as a protected resource and its corresponding data is cached which gets uncached after execution goes of the context. See Also -------- DataFrame.cache Examples -------- >>> import pyspark >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 Set the StorageLevel to `MEMORY_ONLY`. >>> with df.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df: ... print(cached_df.storage_level) ... print(cached_df.count()) ... Memory Serialized 1x Replicated dogs 4 cats 4 Name: 0, dtype: int64 Set the StorageLevel to `DISK_ONLY`. >>> with df.persist(pyspark.StorageLevel.DISK_ONLY) as cached_df: ... print(cached_df.storage_level) ... print(cached_df.count()) ... Disk Serialized 1x Replicated dogs 4 cats 4 Name: 0, dtype: int64 If a StorageLevel is not given, it uses `MEMORY_AND_DISK` by default. >>> with df.persist() as cached_df: ... print(cached_df.storage_level) ... print(cached_df.count()) ... Disk Memory Serialized 1x Replicated dogs 4 cats 4 Name: 0, dtype: int64 >>> df = df.persist() >>> df.to_pandas().mean(axis=1) 0 0.25 1 0.30 2 0.30 3 0.15 dtype: float64 To uncache the dataframe, use `unpersist` function >>> df.unpersist() """ return _CachedDataFrame(self._internal, storage_level=storage_level) def hint(self, name: str, *parameters) -> "DataFrame": """ Specifies some hint on the current DataFrame. Parameters ---------- name : A name of the hint. parameters : Optional parameters. Returns ------- ret : DataFrame with the hint. See Also -------- broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> merged = df1.merge(df2.hint("broadcast"), left_on='lkey', right_on='rkey') >>> merged.explain() # doctest: +ELLIPSIS == Physical Plan == ... ...BroadcastHashJoin... ... """ return DataFrame(self._internal.with_new_sdf(self._sdf.hint(name, *parameters))) def to_table( self, name: str, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Union[str, List[str], None] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): """ Write the DataFrame into a Spark table. Parameters ---------- name : str, required Table name in Spark. format : string, optional Specifies the output data source format. Some common ones are: - 'delta' - 'parquet' - 'orc' - 'json' - 'csv' mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the table exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options Additional options passed directly to Spark. See Also -------- read_table DataFrame.to_spark_io DataFrame.to_parquet Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_table('%s.my_table' % db, partition_cols='date') """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore self.to_spark(index_col=index_col).write.saveAsTable( name=name, format=format, mode=mode, partitionBy=partition_cols, **options ) def to_delta( self, path: str, mode: str = "overwrite", partition_cols: Union[str, List[str], None] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): """ Write the DataFrame out as a Delta Lake table. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Delta Lake. See Also -------- read_delta DataFrame.to_parquet DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 Create a new Delta Lake table, partitioned by one column: >>> df.to_delta('%s/to_delta/foo' % path, partition_cols='date') Partitioned by two columns: >>> df.to_delta('%s/to_delta/bar' % path, partition_cols=['date', 'country']) Overwrite an existing table's partitions, using the 'replaceWhere' capability in Delta: >>> df.to_delta('%s/to_delta/bar' % path, ... mode='overwrite', replaceWhere='date >= "2012-01-01"') """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore self.to_spark_io( path=path, mode=mode, format="delta", partition_cols=partition_cols, index_col=index_col, **options ) def to_parquet( self, path: str, mode: str = "overwrite", partition_cols: Union[str, List[str], None] = None, compression: Optional[str] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): """ Write the DataFrame out as a Parquet file or directory. Parameters ---------- path : str, required Path to write to. mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when the destination exists already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional, default None Names of partitioning columns compression : str {'none', 'uncompressed', 'snappy', 'gzip', 'lzo', 'brotli', 'lz4', 'zstd'} Compression codec to use when saving to file. If None is set, it uses the value specified in `spark.sql.parquet.compression.codec`. index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_parquet DataFrame.to_delta DataFrame.to_table DataFrame.to_spark_io Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_parquet('%s/to_parquet/foo.parquet' % path, partition_cols='date') >>> df.to_parquet( ... '%s/to_parquet/foo.parquet' % path, ... mode = 'overwrite', ... partition_cols=['date', 'country']) """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore builder = self.to_spark(index_col=index_col).write.mode(mode) OptionUtils._set_opts( builder, mode=mode, partitionBy=partition_cols, compression=compression ) builder.options(**options).format("parquet").save(path) def to_spark_io( self, path: Optional[str] = None, format: Optional[str] = None, mode: str = "overwrite", partition_cols: Union[str, List[str], None] = None, index_col: Optional[Union[str, List[str]]] = None, **options ): """Write the DataFrame out to a Spark data source. Parameters ---------- path : string, optional Path to the data source. format : string, optional Specifies the output data source format. Some common ones are: - 'delta' - 'parquet' - 'orc' - 'json' - 'csv' mode : str {'append', 'overwrite', 'ignore', 'error', 'errorifexists'}, default 'overwrite'. Specifies the behavior of the save operation when data already. - 'append': Append the new data to existing data. - 'overwrite': Overwrite existing data. - 'ignore': Silently ignore this operation if data already exists. - 'error' or 'errorifexists': Throw an exception if data already exists. partition_cols : str or list of str, optional Names of partitioning columns index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. options : dict All other options passed directly into Spark's data source. See Also -------- read_spark_io DataFrame.to_delta DataFrame.to_parquet DataFrame.to_table Examples -------- >>> df = ks.DataFrame(dict( ... date=list(pd.date_range('2012-1-1 12:00:00', periods=3, freq='M')), ... country=['KR', 'US', 'JP'], ... code=[1, 2 ,3]), columns=['date', 'country', 'code']) >>> df date country code 0 2012-01-31 12:00:00 KR 1 1 2012-02-29 12:00:00 US 2 2 2012-03-31 12:00:00 JP 3 >>> df.to_spark_io(path='%s/to_spark_io/foo.json' % path, format='json') """ if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1: options = options.get("options") # type: ignore self.to_spark(index_col=index_col).write.save( path=path, format=format, mode=mode, partitionBy=partition_cols, **options ) def to_spark(self, index_col: Optional[Union[str, List[str]]] = None): """ Return the current DataFrame as a Spark DataFrame. Parameters ---------- index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. See Also -------- DataFrame.to_koalas Examples -------- By default, this method loses the index as below. >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]}) >>> df.to_spark().show() # doctest: +NORMALIZE_WHITESPACE +---+---+---+ | a| b| c| +---+---+---+ | 1| 4| 7| | 2| 5| 8| | 3| 6| 9| +---+---+---+ If `index_col` is set, it keeps the index column as specified. >>> df.to_spark(index_col="index").show() # doctest: +NORMALIZE_WHITESPACE +-----+---+---+---+ |index| a| b| c| +-----+---+---+---+ | 0| 1| 4| 7| | 1| 2| 5| 8| | 2| 3| 6| 9| +-----+---+---+---+ Keeping index column is useful when you want to call some Spark APIs and convert it back to Koalas DataFrame without creating a default index, which can affect performance. >>> spark_df = df.to_spark(index_col="index") >>> spark_df = spark_df.filter("a == 2") >>> spark_df.to_koalas(index_col="index") # doctest: +NORMALIZE_WHITESPACE a b c index 1 2 5 8 In case of multi-index, specify a list to `index_col`. >>> new_df = df.set_index("a", append=True) >>> new_spark_df = new_df.to_spark(index_col=["index_1", "index_2"]) >>> new_spark_df.show() # doctest: +NORMALIZE_WHITESPACE +-------+-------+---+---+ |index_1|index_2| b| c| +-------+-------+---+---+ | 0| 1| 4| 7| | 1| 2| 5| 8| | 2| 3| 6| 9| +-------+-------+---+---+ Likewise, can be converted to back to Koalas DataFrame. >>> new_spark_df.to_koalas( ... index_col=["index_1", "index_2"]) # doctest: +NORMALIZE_WHITESPACE b c index_1 index_2 0 1 4 7 1 2 5 8 2 3 6 9 """ if index_col is None: return self._internal.to_external_spark_frame else: if isinstance(index_col, str): index_col = [index_col] data_column_names = [] data_columns = [] data_columns_column_labels = zip( self._internal.data_spark_column_names, self._internal.column_labels ) # TODO: this code is similar with _InternalFrame.to_new_spark_frame. Might have to # deduplicate. for i, (column, label) in enumerate(data_columns_column_labels): scol = self._internal.spark_column_for(label) name = str(i) if label is None else name_like_string(label) data_column_names.append(name) if column != name: scol = scol.alias(name) data_columns.append(scol) old_index_scols = self._internal.index_spark_columns if len(index_col) != len(old_index_scols): raise ValueError( "length of index columns is %s; however, the length of the given " "'index_col' is %s." % (len(old_index_scols), len(index_col)) ) if any(col in data_column_names for col in index_col): raise ValueError("'index_col' cannot be overlapped with other columns.") sdf = self._internal.to_internal_spark_frame new_index_scols = [ index_scol.alias(col) for index_scol, col in zip(old_index_scols, index_col) ] return sdf.select(new_index_scols + data_columns) def to_pandas(self): """ Return a pandas DataFrame. .. note:: This method should only be used if the resulting Pandas DataFrame is expected to be small, as all the data is loaded into the driver's memory. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df.to_pandas() dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 """ return self._internal.to_pandas_frame.copy() # Alias to maintain backward compatibility with Spark toPandas = to_pandas def assign(self, **kwargs): """ Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though Koalas doesn't check it). If the values are not callable, (e.g. a Series or a literal), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Examples -------- >>> df = ks.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence and you can also create multiple columns within the same assign. >>> assigned = df.assign(temp_f=df['temp_c'] * 9 / 5 + 32, ... temp_k=df['temp_c'] + 273.15) >>> assigned[['temp_c', 'temp_f', 'temp_k']] temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 Notes ----- Assigning multiple columns within the same ``assign`` is possible but you cannot refer to newly created or modified columns. This feature is supported in pandas for Python 3.6 and later but not in Koalas. In Koalas, all items are computed first, and then assigned. """ return self._assign(kwargs) def _assign(self, kwargs): assert isinstance(kwargs, dict) from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or is_scalar(v)): raise TypeError( "Column assignment doesn't support type " "{0}".format(type(v).__name__) ) if callable(v): kwargs[k] = v(self) pairs = { (k if isinstance(k, tuple) else (k,)): ( v.spark_column if isinstance(v, Series) else v if isinstance(v, spark.Column) else F.lit(v) ) for k, v in kwargs.items() } scols = [] for label in self._internal.column_labels: for i in range(len(label)): if label[: len(label) - i] in pairs: name = self._internal.spark_column_name_for(label) scol = pairs[label[: len(label) - i]].alias(name) break else: scol = self._internal.spark_column_for(label) scols.append(scol) column_labels = self._internal.column_labels.copy() for label, scol in pairs.items(): if label not in set(i[: len(label)] for i in self._internal.column_labels): scols.append(scol.alias(name_like_string(label))) column_labels.append(label) level = self._internal.column_labels_level column_labels = [ tuple(list(label) + ([""] * (level - len(label)))) for label in column_labels ] internal = self._internal.with_new_columns(scols, column_labels=column_labels) return DataFrame(internal) @staticmethod def from_records( data: Union[np.array, List[tuple], dict, pd.DataFrame], index: Union[str, list, np.array] = None, exclude: list = None, columns: list = None, coerce_float: bool = False, nrows: int = None, ) -> "DataFrame": """ Convert structured or record ndarray to DataFrame. Parameters ---------- data : ndarray (structured dtype), list of tuples, dict, or DataFrame index : string, list of fields, array-like Field of array to use as the index, alternately a specific set of input labels to use exclude : sequence, default None Columns or fields to exclude columns : sequence, default None Column names to use. If the passed data do not have names associated with them, this argument provides names for the columns. Otherwise this argument indicates the order of the columns in the result (any names not found in the data will become all-NA columns) coerce_float : boolean, default False Attempt to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point, useful for SQL result sets nrows : int, default None Number of rows to read if data is an iterator Returns ------- df : DataFrame Examples -------- Use dict as input >>> ks.DataFrame.from_records({'A': [1, 2, 3]}) A 0 1 1 2 2 3 Use list of tuples as input >>> ks.DataFrame.from_records([(1, 2), (3, 4)]) 0 1 0 1 2 1 3 4 Use NumPy array as input >>> ks.DataFrame.from_records(np.eye(3)) 0 1 2 0 1.0 0.0 0.0 1 0.0 1.0 0.0 2 0.0 0.0 1.0 """ return DataFrame( pd.DataFrame.from_records(data, index, exclude, columns, coerce_float, nrows) ) def to_records(self, index=True, column_dtypes=None, index_dtypes=None): """ Convert DataFrame to a NumPy record array. Index will be included as the first field of the record array if requested. .. note:: This method should only be used if the resulting NumPy ndarray is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- index : bool, default True Include index in resulting record array, stored in 'index' field or using the index label, if set. column_dtypes : str, type, dict, default None If a string or type, the data type to store all columns. If a dictionary, a mapping of column names and indices (zero-indexed) to specific data types. index_dtypes : str, type, dict, default None If a string or type, the data type to store all index levels. If a dictionary, a mapping of index level names and indices (zero-indexed) to specific data types. This mapping is applied only if `index=True`. Returns ------- numpy.recarray NumPy ndarray with the DataFrame labels as fields and each row of the DataFrame as entries. See Also -------- DataFrame.from_records: Convert structured or record ndarray to DataFrame. numpy.recarray: An ndarray that allows field access using attributes, analogous to typed columns in a spreadsheet. Examples -------- >>> df = ks.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]}, ... index=['a', 'b']) >>> df A B a 1 0.50 b 2 0.75 >>> df.to_records() # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')]) The index can be excluded from the record array: >>> df.to_records(index=False) # doctest: +SKIP rec.array([(1, 0.5 ), (2, 0.75)], dtype=[('A', '<i8'), ('B', '<f8')]) Specification of dtype for columns is new in Pandas 0.24.0. Data types can be specified for the columns: >>> df.to_records(column_dtypes={"A": "int32"}) # doctest: +SKIP rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)], dtype=[('index', 'O'), ('A', '<i4'), ('B', '<f8')]) Specification of dtype for index is new in Pandas 0.24.0. Data types can also be specified for the index: >>> df.to_records(index_dtypes="<S2") # doctest: +SKIP rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)], dtype=[('index', 'S2'), ('A', '<i8'), ('B', '<f8')]) """ args = locals() kdf = self return validate_arguments_and_invoke_function( kdf._to_internal_pandas(), self.to_records, pd.DataFrame.to_records, args ) def copy(self, deep=None) -> "DataFrame": """ Make a copy of this object's indices and data. Parameters ---------- deep : None this parameter is not supported but just dummy parameter to match pandas. Returns ------- copy : DataFrame Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df_copy = df.copy() >>> df_copy x y z w 0 1 3 5 7 1 2 4 6 8 """ return DataFrame(self._internal.copy()) def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False): """ Remove missing values. Parameters ---------- axis : {0 or 'index'}, default 0 Determine if rows or columns which contain missing values are removed. * 0, or 'index' : Drop rows which contain missing values. how : {'any', 'all'}, default 'any' Determine if row or column is removed from DataFrame, when we have at least one NA or all NA. * 'any' : If any NA values are present, drop that row or column. * 'all' : If all values are NA, drop that row or column. thresh : int, optional Require that many non-NA values. subset : array-like, optional Labels along other axis to consider, e.g. if you are dropping rows these would be a list of columns to include. inplace : bool, default False If True, do operation inplace and return None. Returns ------- DataFrame DataFrame with NA entries dropped from it. See Also -------- DataFrame.drop : Drop specified labels from columns. DataFrame.isnull: Indicate missing values. DataFrame.notnull : Indicate existing (non-missing) values. Examples -------- >>> df = ks.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'], ... "toy": [None, 'Batmobile', 'Bullwhip'], ... "born": [None, "1940-04-25", None]}, ... columns=['name', 'toy', 'born']) >>> df name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Drop the rows where at least one element is missing. >>> df.dropna() name toy born 1 Batman Batmobile 1940-04-25 Drop the rows where all elements are missing. >>> df.dropna(how='all') name toy born 0 Alfred None None 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Keep only the rows with at least 2 non-NA values. >>> df.dropna(thresh=2) name toy born 1 Batman Batmobile 1940-04-25 2 Catwoman Bullwhip None Define in which columns to look for missing values. >>> df.dropna(subset=['name', 'born']) name toy born 1 Batman Batmobile 1940-04-25 Keep the DataFrame with valid entries in the same variable. >>> df.dropna(inplace=True) >>> df name toy born 1 Batman Batmobile 1940-04-25 """ axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if axis == 0: if subset is not None: if isinstance(subset, str): labels = [(subset,)] elif isinstance(subset, tuple): labels = [subset] else: labels = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] invalids = [label for label in labels if label not in self._internal.column_labels] if len(invalids) > 0: raise KeyError(invalids) else: labels = self._internal.column_labels cnt = reduce( lambda x, y: x + y, [ F.when(self._kser_for(label).notna().spark_column, 1).otherwise(0) for label in labels ], F.lit(0), ) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == "any": pred = cnt == F.lit(len(labels)) elif how == "all": pred = cnt > F.lit(0) else: if how is not None: raise ValueError("invalid how option: {h}".format(h=how)) else: raise TypeError("must specify how or thresh") internal = self._internal.with_filter(pred) if inplace: self._internal = internal else: return DataFrame(internal) else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") # TODO: add 'limit' when value parameter exists def fillna(self, value=None, method=None, axis=None, inplace=False, limit=None): """Fill NA/NaN values. .. note:: the current implementation of 'method' parameter in fillna uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- value : scalar, dict, Series Value to use to fill holes. alternately a dict/Series of values specifying which value to use for each column. DataFrame is not supported. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Replace all NaN elements with 0s. >>> df.fillna(0) A B C D 0 0.0 2.0 0.0 0 1 3.0 4.0 0.0 1 2 0.0 0.0 0.0 5 3 0.0 3.0 1.0 4 We can also propagate non-null values forward or backward. >>> df.fillna(method='ffill') A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1, 2, and 3 respectively. >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3} >>> df.fillna(value=values) A B C D 0 0.0 2.0 2.0 0 1 3.0 4.0 2.0 1 2 0.0 1.0 2.0 5 3 0.0 3.0 1.0 4 """ if value is not None: axis = validate_axis(axis) inplace = validate_bool_kwarg(inplace, "inplace") if axis != 0: raise NotImplementedError("fillna currently only works for axis=0 or axis='index'") if not isinstance(value, (float, int, str, bool, dict, pd.Series)): raise TypeError("Unsupported type %s" % type(value)) if limit is not None: raise ValueError("limit parameter for value is not support now") if isinstance(value, pd.Series): value = value.to_dict() if isinstance(value, dict): for v in value.values(): if not isinstance(v, (float, int, str, bool)): raise TypeError("Unsupported type %s" % type(v)) value = {k if isinstance(k, tuple) else (k,): v for k, v in value.items()} def op(kser): label = kser._internal.column_labels[0] for k, v in value.items(): if k == label[: len(k)]: return kser.fillna( value=value[k], method=method, axis=axis, inplace=False, limit=limit ) else: return kser else: op = lambda kser: kser.fillna( value=value, method=method, axis=axis, inplace=False, limit=limit ) elif method is not None: op = lambda kser: kser.fillna( value=value, method=method, axis=axis, inplace=False, limit=limit ) else: raise ValueError("Must specify a fillna 'value' or 'method' parameter.") kdf = self._apply_series_op(op) if inplace: self._internal = kdf._internal else: return kdf # TODO: add 'downcast' when value parameter exists def bfill(self, axis=None, inplace=False, limit=None): """ Synonym for `DataFrame.fillna()` with ``method=`bfill```. .. note:: the current implementation of 'bfiff' uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Propagate non-null values backward. >>> df.bfill() A B C D 0 3.0 2.0 1.0 0 1 3.0 4.0 1.0 1 2 NaN 3.0 1.0 5 3 NaN 3.0 1.0 4 """ return self.fillna(method="bfill", axis=axis, inplace=inplace, limit=limit) # TODO: add 'downcast' when value parameter exists def ffill(self, axis=None, inplace=False, limit=None): """ Synonym for `DataFrame.fillna()` with ``method=`ffill```. .. note:: the current implementation of 'ffiff' uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame DataFrame with NA entries filled. Examples -------- >>> df = ks.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> df A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Propagate non-null values forward. >>> df.ffill() A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 """ return self.fillna(method="ffill", axis=axis, inplace=inplace, limit=limit) def replace( self, to_replace=None, value=None, subset=None, inplace=False, limit=None, regex=False, method="pad", ): """ Returns a new DataFrame replacing a value with another value. Parameters ---------- to_replace : int, float, string, or list Value to be replaced. If the value is a dict, then value is ignored and to_replace must be a mapping from column name (string) to replacement value. The value to be replaced must be an int, float, or string. value : int, float, string, or list Value to use to replace holes. The replacement value must be an int, float, or string. If value is a list, value should be of the same length with to_replace. subset : string, list Optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if value is a string, and subset contains a non-string column, then the non-string column is simply ignored. inplace : boolean, default False Fill in place (do not create a new object) Returns ------- DataFrame Object after replacement. Examples -------- >>> df = ks.DataFrame({"name": ['Ironman', 'Captain America', 'Thor', 'Hulk'], ... "weapon": ['Mark-45', 'Shield', 'Mjolnir', 'Smash']}, ... columns=['name', 'weapon']) >>> df name weapon 0 Ironman Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash Scalar `to_replace` and `value` >>> df.replace('Ironman', 'War-Machine') name weapon 0 War-Machine Mark-45 1 Captain America Shield 2 Thor Mjolnir 3 Hulk Smash List like `to_replace` and `value` >>> df.replace(['Ironman', 'Captain America'], ['Rescue', 'Hawkeye'], inplace=True) >>> df name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Mjolnir 3 Hulk Smash Replacing value by specifying column >>> df.replace('Mjolnir', 'Stormbuster', subset='weapon') name weapon 0 Rescue Mark-45 1 Hawkeye Shield 2 Thor Stormbuster 3 Hulk Smash Dict like `to_replace` >>> df = ks.DataFrame({'A': [0, 1, 2, 3, 4], ... 'B': [5, 6, 7, 8, 9], ... 'C': ['a', 'b', 'c', 'd', 'e']}, ... columns=['A', 'B', 'C']) >>> df.replace({'A': {0: 100, 4: 400}}) A B C 0 100 5 a 1 1 6 b 2 2 7 c 3 3 8 d 4 400 9 e >>> df.replace({'A': 0, 'B': 5}, 100) A B C 0 100 100 a 1 1 6 b 2 2 7 c 3 3 8 d 4 4 9 e Notes ----- One difference between this implementation and pandas is that it is necessary to specify the column name when you are passing dictionary in `to_replace` parameter. Calling `replace` on its index such as `df.replace({0: 10, 1: 100})` will throw an error. Instead specify column-name like `df.replace({'A': {0: 10, 1: 100}})`. """ if method != "pad": raise NotImplementedError("replace currently works only for method='pad") if limit is not None: raise NotImplementedError("replace currently works only when limit=None") if regex is not False: raise NotImplementedError("replace currently doesn't supports regex") inplace = validate_bool_kwarg(inplace, "inplace") if value is not None and not isinstance(value, (int, float, str, list, dict)): raise TypeError("Unsupported type {}".format(type(value))) if to_replace is not None and not isinstance(to_replace, (int, float, str, list, dict)): raise TypeError("Unsupported type {}".format(type(to_replace))) if isinstance(value, list) and isinstance(to_replace, list): if len(value) != len(to_replace): raise ValueError("Length of to_replace and value must be same") # TODO: Do we still need to support this argument? if subset is None: subset = self._internal.column_labels elif isinstance(subset, str): subset = [(subset,)] elif isinstance(subset, tuple): subset = [subset] else: subset = [sub if isinstance(sub, tuple) else (sub,) for sub in subset] subset = [self._internal.spark_column_name_for(label) for label in subset] sdf = self._sdf if ( isinstance(to_replace, dict) and value is None and (not any(isinstance(i, dict) for i in to_replace.values())) ): sdf = sdf.replace(to_replace, value, subset) elif isinstance(to_replace, dict): for name, replacement in to_replace.items(): if isinstance(name, str): name = (name,) df_column = self._internal.spark_column_name_for(name) if isinstance(replacement, dict): sdf = sdf.replace(replacement, subset=df_column) else: sdf = sdf.withColumn( df_column, F.when(scol_for(sdf, df_column) == replacement, value).otherwise( scol_for(sdf, df_column) ), ) else: sdf = sdf.replace(to_replace, value, subset) internal = self._internal.with_new_sdf(sdf) if inplace: self._internal = internal else: return DataFrame(internal) def clip(self, lower: Union[float, int] = None, upper: Union[float, int] = None) -> "DataFrame": """ Trim values at input threshold(s). Assigns values outside boundary to boundary values. Parameters ---------- lower : float or int, default None Minimum threshold value. All values below this threshold will be set to it. upper : float or int, default None Maximum threshold value. All values above this threshold will be set to it. Returns ------- DataFrame DataFrame with the values outside the clip boundaries replaced. Examples -------- >>> ks.DataFrame({'A': [0, 2, 4]}).clip(1, 3) A 0 1 1 2 2 3 Notes ----- One difference between this implementation and pandas is that running pd.DataFrame({'A': ['a', 'b']}).clip(0, 1) will crash with "TypeError: '<=' not supported between instances of 'str' and 'int'" while ks.DataFrame({'A': ['a', 'b']}).clip(0, 1) will output the original DataFrame, simply ignoring the incompatible types. """ if is_list_like(lower) or is_list_like(upper): raise ValueError( "List-like value are not supported for 'lower' and 'upper' at the " + "moment" ) if lower is None and upper is None: return self numeric_types = ( DecimalType, DoubleType, FloatType, ByteType, IntegerType, LongType, ShortType, ) def op(kser): if isinstance(kser.spark_type, numeric_types): scol = kser.spark_column if lower is not None: scol = F.when(scol < lower, lower).otherwise(scol) if upper is not None: scol = F.when(scol > upper, upper).otherwise(scol) return scol.alias(kser._internal.data_spark_column_names[0]) else: return kser return self._apply_series_op(op) def head(self, n: int = 5) -> "DataFrame": """ Return the first `n` rows. This function returns the first `n` rows for the object based on position. It is useful for quickly testing if your object has the right type of data in it. Parameters ---------- n : int, default 5 Number of rows to select. Returns ------- obj_head : same type as caller The first `n` rows of the caller object. Examples -------- >>> df = ks.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion', ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']}) >>> df animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey 5 parrot 6 shark 7 whale 8 zebra Viewing the first 5 lines >>> df.head() animal 0 alligator 1 bee 2 falcon 3 lion 4 monkey Viewing the first `n` lines (three in this case) >>> df.head(3) animal 0 alligator 1 bee 2 falcon """ if n < 0: n = len(self) + n if n <= 0: return DataFrame(self._internal.with_filter(F.lit(False))) else: if get_option("compute.ordered_head"): sdf = self._sdf.orderBy(NATURAL_ORDER_COLUMN_NAME) else: sdf = self._sdf return DataFrame(self._internal.with_new_sdf(sdf.limit(n))) def pivot_table(self, values=None, index=None, columns=None, aggfunc="mean", fill_value=None): """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on the index and columns of the result DataFrame. Parameters ---------- values : column to aggregate. They should be either a list less than three or a string. index : column (string) or list of columns If an array is passed, it must be the same length as the data. The list should contain string. columns : column Columns used in the pivot operation. Only one column is supported and it should be a string. aggfunc : function (string), dict, default mean If dict is passed, the resulting pivot table will have columns concatenated by "_" where the first part is the value of columns and the second part is the column name in values If dict is passed, the key is column to aggregate and value is function or list of functions. fill_value : scalar, default None Value to replace missing values with. Returns ------- table : DataFrame Examples -------- >>> df = ks.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo", ... "bar", "bar", "bar", "bar"], ... "B": ["one", "one", "one", "two", "two", ... "one", "one", "two", "two"], ... "C": ["small", "large", "large", "small", ... "small", "large", "small", "small", ... "large"], ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7], ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]}, ... columns=['A', 'B', 'C', 'D', 'E']) >>> df A B C D E 0 foo one small 1 2 1 foo one large 2 4 2 foo one large 2 5 3 foo two small 3 5 4 foo two small 3 6 5 bar one large 4 6 6 bar one small 5 8 7 bar two small 6 9 8 bar two large 7 9 This first example aggregates values by taking the sum. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum') >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4.0 5 two 7.0 6 foo one 4.0 1 two NaN 6 We can also fill missing values using the `fill_value` parameter. >>> table = df.pivot_table(values='D', index=['A', 'B'], ... columns='C', aggfunc='sum', fill_value=0) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE C large small A B bar one 4 5 two 7 6 foo one 4 1 two 0 6 We can also calculate multiple types of aggregations for any given value column. >>> table = df.pivot_table(values=['D'], index =['C'], ... columns="A", aggfunc={'D': 'mean'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D A bar foo C large 5.5 2.000000 small 5.5 2.333333 The next example aggregates on multiple values. >>> table = df.pivot_table(index=['C'], columns="A", values=['D', 'E'], ... aggfunc={'D': 'mean', 'E': 'sum'}) >>> table.sort_index() # doctest: +NORMALIZE_WHITESPACE D E A bar foo bar foo C large 5.5 2.000000 15 9 small 5.5 2.333333 17 13 """ if not isinstance(columns, (str, tuple)): raise ValueError("columns should be string or tuple.") if not isinstance(values, (str, tuple)) and not isinstance(values, list): raise ValueError("values should be string or list of one column.") if not isinstance(aggfunc, str) and ( not isinstance(aggfunc, dict) or not all( isinstance(key, (str, tuple)) and isinstance(value, str) for key, value in aggfunc.items() ) ): raise ValueError( "aggfunc must be a dict mapping from column name (string or tuple) " "to aggregate functions (string)." ) if isinstance(aggfunc, dict) and index is None: raise NotImplementedError( "pivot_table doesn't support aggfunc" " as dict and without index." ) if isinstance(values, list) and index is None: raise NotImplementedError("values can't be a list without index.") if columns not in self.columns: raise ValueError("Wrong columns {}.".format(columns)) if isinstance(columns, str): columns = (columns,) if isinstance(values, list): values = [col if isinstance(col, tuple) else (col,) for col in values] if not all( isinstance(self._internal.spark_type_for(col), NumericType) for col in values ): raise TypeError("values should be a numeric type.") else: values = values if isinstance(values, tuple) else (values,) if not isinstance(self._internal.spark_type_for(values), NumericType): raise TypeError("values should be a numeric type.") if isinstance(aggfunc, str): if isinstance(values, list): agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(value), aggfunc ) ) for value in values ] else: agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format( self._internal.spark_column_name_for(values), aggfunc ) ) ] elif isinstance(aggfunc, dict): aggfunc = { key if isinstance(key, tuple) else (key,): value for key, value in aggfunc.items() } agg_cols = [ F.expr( "{1}(`{0}`) as `{0}`".format(self._internal.spark_column_name_for(key), value) ) for key, value in aggfunc.items() ] agg_columns = [key for key, _ in aggfunc.items()] if set(agg_columns) != set(values): raise ValueError("Columns in aggfunc must be the same as values.") if index is None: sdf = ( self._sdf.groupBy() .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) elif isinstance(index, list): index = [label if isinstance(label, tuple) else (label,) for label in index] sdf = ( self._sdf.groupBy([self._internal.spark_column_for(label) for label in index]) .pivot(pivot_col=self._internal.spark_column_name_for(columns)) .agg(*agg_cols) ) else: raise ValueError("index should be a None or a list of columns.") if fill_value is not None and isinstance(fill_value, (int, float)): sdf = sdf.fillna(fill_value) if index is not None: if isinstance(values, list): index_columns = [self._internal.spark_column_name_for(label) for label in index] data_columns = [column for column in sdf.columns if column not in index_columns] if len(values) > 1: # If we have two values, Spark will return column's name # in this format: column_values, where column contains # their values in the DataFrame and values is # the column list passed to the pivot_table(). # E.g. if column is b and values is ['b','e'], # then ['2_b', '2_e', '3_b', '3_e']. # We sort the columns of Spark DataFrame by values. data_columns.sort(key=lambda x: x.split("_", 1)[1]) sdf = sdf.select(index_columns + data_columns) column_name_to_index = dict( zip(self._internal.data_spark_column_names, self._internal.column_labels) ) column_labels = [ tuple(list(column_name_to_index[name.split("_")[1]]) + [name.split("_")[0]]) for name in data_columns ] index_map = OrderedDict(zip(index_columns, index)) column_label_names = ([None] * column_labels_level(values)) + [ str(columns) if len(columns) > 1 else columns[0] ] internal = _InternalFrame( spark_frame=sdf, index_map=index_map, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) kdf = DataFrame(internal) else: column_labels = [tuple(list(values[0]) + [column]) for column in data_columns] index_map = OrderedDict(zip(index_columns, index)) column_label_names = ([None] * len(values[0])) + [ str(columns) if len(columns) > 1 else columns[0] ] internal = _InternalFrame( spark_frame=sdf, index_map=index_map, column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) kdf = DataFrame(internal) return kdf else: index_columns = [self._internal.spark_column_name_for(label) for label in index] index_map = OrderedDict(zip(index_columns, index)) column_label_names = [str(columns) if len(columns) > 1 else columns[0]] internal = _InternalFrame( spark_frame=sdf, index_map=index_map, column_label_names=column_label_names ) return DataFrame(internal) else: if isinstance(values, list): index_values = values[-1] else: index_values = values index_map = OrderedDict() for i, index_value in enumerate(index_values): colname = SPARK_INDEX_NAME_FORMAT(i) sdf = sdf.withColumn(colname, F.lit(index_value)) index_map[colname] = None column_label_names = [str(columns) if len(columns) > 1 else columns[0]] internal = _InternalFrame( spark_frame=sdf, index_map=index_map, column_label_names=column_label_names ) return DataFrame(internal) def pivot(self, index=None, columns=None, values=None): """ Return reshaped DataFrame organized by given index / column values. Reshape data (produce a "pivot" table) based on column values. Uses unique values from specified `index` / `columns` to form axes of the resulting DataFrame. This function does not support data aggregation. Parameters ---------- index : string, optional Column to use to make new frame's index. If None, uses existing index. columns : string Column to use to make new frame's columns. values : string, object or a list of the previous Column(s) to use for populating new frame's values. Returns ------- DataFrame Returns reshaped DataFrame. See Also -------- DataFrame.pivot_table : Generalization of pivot that can handle duplicate values for one index/column pair. Examples -------- >>> df = ks.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', ... 'two'], ... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'], ... 'baz': [1, 2, 3, 4, 5, 6], ... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']}, ... columns=['foo', 'bar', 'baz', 'zoo']) >>> df foo bar baz zoo 0 one A 1 x 1 one B 2 y 2 one C 3 z 3 two A 4 q 4 two B 5 w 5 two C 6 t >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1 2 3 two 4 5 6 >>> df.pivot(columns='bar', values='baz').sort_index() # doctest: +NORMALIZE_WHITESPACE bar A B C 0 1.0 NaN NaN 1 NaN 2.0 NaN 2 NaN NaN 3.0 3 4.0 NaN NaN 4 NaN 5.0 NaN 5 NaN NaN 6.0 Notice that, unlike pandas raises an ValueError when duplicated values are found, Koalas' pivot still works with its first value it meets during operation because pivot is an expensive operation and it is preferred to permissively execute over failing fast when processing large data. >>> df = ks.DataFrame({"foo": ['one', 'one', 'two', 'two'], ... "bar": ['A', 'A', 'B', 'C'], ... "baz": [1, 2, 3, 4]}, columns=['foo', 'bar', 'baz']) >>> df foo bar baz 0 one A 1 1 one A 2 2 two B 3 3 two C 4 >>> df.pivot(index='foo', columns='bar', values='baz').sort_index() ... # doctest: +NORMALIZE_WHITESPACE bar A B C foo one 1.0 NaN NaN two NaN 3.0 4.0 It also support multi-index and multi-index column. >>> df.columns = pd.MultiIndex.from_tuples([('a', 'foo'), ('a', 'bar'), ('b', 'baz')]) >>> df = df.set_index(('a', 'bar'), append=True) >>> df # doctest: +NORMALIZE_WHITESPACE a b foo baz (a, bar) 0 A one 1 1 A one 2 2 B two 3 3 C two 4 >>> df.pivot(columns=('a', 'foo'), values=('b', 'baz')).sort_index() ... # doctest: +NORMALIZE_WHITESPACE ('a', 'foo') one two (a, bar) 0 A 1.0 NaN 1 A 2.0 NaN 2 B NaN 3.0 3 C NaN 4.0 """ if columns is None: raise ValueError("columns should be set.") if values is None: raise ValueError("values should be set.") should_use_existing_index = index is not None if should_use_existing_index: df = self index = [index] else: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: len(self._internal.index_spark_column_names)] df = df.pivot_table(index=index, columns=columns, values=values, aggfunc="first") if should_use_existing_index: return df else: index_columns = df._internal.index_spark_column_names internal = df._internal.copy( index_map=OrderedDict( (index_column, name) for index_column, name in zip(index_columns, self._internal.index_names) ) ) return DataFrame(internal) @property def columns(self): """The column labels of the DataFrame.""" if self._internal.column_labels_level > 1: columns = pd.MultiIndex.from_tuples(self._internal.column_labels) else: columns = pd.Index([label[0] for label in self._internal.column_labels]) if self._internal.column_label_names is not None: columns.names = self._internal.column_label_names return columns @columns.setter def columns(self, columns): if isinstance(columns, pd.MultiIndex): column_labels = columns.tolist() old_names = self._internal.column_labels if len(old_names) != len(column_labels): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(column_labels)) ) column_label_names = columns.names data_columns = [name_like_string(label) for label in column_labels] data_spark_columns = [ self._internal.spark_column_for(label).alias(name) for label, name in zip(self._internal.column_labels, data_columns) ] self._internal = self._internal.with_new_columns( data_spark_columns, column_labels=column_labels ) sdf = self._sdf.select( self._internal.index_spark_columns + [ self._internal.spark_column_for(label).alias(name) for label, name in zip(self._internal.column_labels, data_columns) ] + list(HIDDEN_COLUMNS) ) data_spark_columns = [scol_for(sdf, col) for col in data_columns] self._internal = self._internal.copy( spark_frame=sdf, column_labels=column_labels, data_spark_columns=data_spark_columns, column_label_names=column_label_names, ) else: old_names = self._internal.column_labels if len(old_names) != len(columns): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(columns)) ) column_labels = [col if isinstance(col, tuple) else (col,) for col in columns] if isinstance(columns, pd.Index): column_label_names = columns.names else: column_label_names = None data_columns = [name_like_string(label) for label in column_labels] sdf = self._sdf.select( self._internal.index_spark_columns + [ self._internal.spark_column_for(label).alias(name) for label, name in zip(self._internal.column_labels, data_columns) ] + list(HIDDEN_COLUMNS) ) data_spark_columns = [scol_for(sdf, col) for col in data_columns] self._internal = self._internal.copy( spark_frame=sdf, column_labels=column_labels, data_spark_columns=data_spark_columns, column_label_names=column_label_names, ) @property def dtypes(self): """Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the object dtype. Returns ------- pd.Series The data type of each column. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.dtypes a object b int64 c int8 d float64 e bool f datetime64[ns] dtype: object """ return pd.Series( [self._kser_for(label).dtype for label in self._internal.column_labels], index=pd.Index( [label if len(label) > 1 else label[0] for label in self._internal.column_labels] ), ) def spark_schema(self, index_col: Optional[Union[str, List[str]]] = None): """ Returns the underlying Spark schema. Returns ------- pyspark.sql.types.StructType The underlying Spark schema. Parameters ---------- index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.spark_schema().simpleString() 'struct<a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>' >>> df.spark_schema(index_col='index').simpleString() 'struct<index:bigint,a:string,b:bigint,c:tinyint,d:double,e:boolean,f:timestamp>' """ return self.to_spark(index_col).schema def print_schema(self, index_col: Optional[Union[str, List[str]]] = None): """ Prints out the underlying Spark schema in the tree format. Parameters ---------- index_col: str or list of str, optional, default: None Column names to be used in Spark to represent Koalas' index. The index name in Koalas is ignored. By default, the index is always lost. Examples -------- >>> df = ks.DataFrame({'a': list('abc'), ... 'b': list(range(1, 4)), ... 'c': np.arange(3, 6).astype('i1'), ... 'd': np.arange(4.0, 7.0, dtype='float64'), ... 'e': [True, False, True], ... 'f': pd.date_range('20130101', periods=3)}, ... columns=['a', 'b', 'c', 'd', 'e', 'f']) >>> df.print_schema() # doctest: +NORMALIZE_WHITESPACE root |-- a: string (nullable = false) |-- b: long (nullable = false) |-- c: byte (nullable = false) |-- d: double (nullable = false) |-- e: boolean (nullable = false) |-- f: timestamp (nullable = false) >>> df.print_schema(index_col='index') # doctest: +NORMALIZE_WHITESPACE root |-- index: long (nullable = false) |-- a: string (nullable = false) |-- b: long (nullable = false) |-- c: byte (nullable = false) |-- d: double (nullable = false) |-- e: boolean (nullable = false) |-- f: timestamp (nullable = false) """ self.to_spark(index_col).printSchema() def select_dtypes(self, include=None, exclude=None): """ Return a subset of the DataFrame's columns based on the column dtypes. Parameters ---------- include, exclude : scalar or list-like A selection of dtypes or strings to be included/excluded. At least one of these parameters must be supplied. It also takes Spark SQL DDL type strings, for instance, 'string' and 'date'. Returns ------- DataFrame The subset of the frame including the dtypes in ``include`` and excluding the dtypes in ``exclude``. Raises ------ ValueError * If both of ``include`` and ``exclude`` are empty >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes() Traceback (most recent call last): ... ValueError: at least one of include or exclude must be nonempty * If ``include`` and ``exclude`` have overlapping elements >>> df = pd.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3}) >>> df.select_dtypes(include='a', exclude='a') Traceback (most recent call last): ... TypeError: string dtypes are not allowed, use 'object' instead Notes ----- * To select datetimes, use ``np.datetime64``, ``'datetime'`` or ``'datetime64'`` Examples -------- >>> df = ks.DataFrame({'a': [1, 2] * 3, ... 'b': [True, False] * 3, ... 'c': [1.0, 2.0] * 3, ... 'd': ['a', 'b'] * 3}, columns=['a', 'b', 'c', 'd']) >>> df a b c d 0 1 True 1.0 a 1 2 False 2.0 b 2 1 True 1.0 a 3 2 False 2.0 b 4 1 True 1.0 a 5 2 False 2.0 b >>> df.select_dtypes(include='bool') b 0 True 1 False 2 True 3 False 4 True 5 False >>> df.select_dtypes(include=['float64'], exclude=['int']) c 0 1.0 1 2.0 2 1.0 3 2.0 4 1.0 5 2.0 >>> df.select_dtypes(exclude=['int']) b c d 0 True 1.0 a 1 False 2.0 b 2 True 1.0 a 3 False 2.0 b 4 True 1.0 a 5 False 2.0 b Spark SQL DDL type strings can be used as well. >>> df.select_dtypes(exclude=['string']) a b c 0 1 True 1.0 1 2 False 2.0 2 1 True 1.0 3 2 False 2.0 4 1 True 1.0 5 2 False 2.0 """ from pyspark.sql.types import _parse_datatype_string if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () if not any((include, exclude)): raise ValueError("at least one of include or exclude must be " "nonempty") # can't both include AND exclude! if set(include).intersection(set(exclude)): raise ValueError( "include and exclude overlap on {inc_ex}".format( inc_ex=set(include).intersection(set(exclude)) ) ) # Handle Spark types include_spark_type = [] for inc in include: try: include_spark_type.append(_parse_datatype_string(inc)) except: pass exclude_spark_type = [] for exc in exclude: try: exclude_spark_type.append(_parse_datatype_string(exc)) except: pass # Handle Pandas types include_numpy_type = [] for inc in include: try: include_numpy_type.append(infer_dtype_from_object(inc)) except: pass exclude_numpy_type = [] for exc in exclude: try: exclude_numpy_type.append(infer_dtype_from_object(exc)) except: pass column_labels = [] for label in self._internal.column_labels: if len(include) > 0: should_include = ( infer_dtype_from_object(self._kser_for(label).dtype.name) in include_numpy_type or self._internal.spark_type_for(label) in include_spark_type ) else: should_include = not ( infer_dtype_from_object(self._kser_for(label).dtype.name) in exclude_numpy_type or self._internal.spark_type_for(label) in exclude_spark_type ) if should_include: column_labels.append(label) data_spark_columns = [self._internal.spark_column_for(label) for label in column_labels] return DataFrame( self._internal.with_new_columns(data_spark_columns, column_labels=column_labels) ) def count(self, axis=None): """ Count non-NA cells for each column. The values `None`, `NaN` are considered NA. Parameters ---------- axis : {0 or ‘index’, 1 or ‘columns’}, default 0 If 0 or ‘index’ counts are generated for each column. If 1 or ‘columns’ counts are generated for each row. Returns ------- pandas.Series See Also -------- Series.count: Number of non-NA elements in a Series. DataFrame.shape: Number of DataFrame rows and columns (including NA elements). DataFrame.isna: Boolean same-sized DataFrame showing places of NA elements. Examples -------- Constructing DataFrame from a dictionary: >>> df = ks.DataFrame({"Person": ... ["John", "Myla", "Lewis", "John", "Myla"], ... "Age": [24., np.nan, 21., 33, 26], ... "Single": [False, True, True, True, False]}, ... columns=["Person", "Age", "Single"]) >>> df Person Age Single 0 John 24.0 False 1 Myla NaN True 2 Lewis 21.0 True 3 John 33.0 True 4 Myla 26.0 False Notice the uncounted NA values: >>> df.count() Person 5 Age 4 Single 5 Name: 0, dtype: int64 >>> df.count(axis=1) 0 3 1 2 2 3 3 3 4 3 Name: 0, dtype: int64 """ return self._reduce_for_stat_function( _Frame._count_expr, name="count", axis=axis, numeric_only=False ) def drop( self, labels=None, axis=1, columns: Union[str, Tuple[str, ...], List[str], List[Tuple[str, ...]]] = None, ): """ Drop specified labels from columns. Remove columns by specifying label names and axis=1 or columns. When specifying both labels and columns, only labels will be dropped. Removing rows is yet to be implemented. Parameters ---------- labels : single label or list-like Column labels to drop. axis : {1 or 'columns'}, default 1 .. dropna currently only works for axis=1 'columns' axis=0 is yet to be implemented. columns : single label or list-like Alternative to specifying axis (``labels, axis=1`` is equivalent to ``columns=labels``). Returns ------- dropped : DataFrame See Also -------- Series.dropna Examples -------- >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> df x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('x', axis=1) y z w 0 3 5 7 1 4 6 8 >>> df.drop(['y', 'z'], axis=1) x w 0 1 7 1 2 8 >>> df.drop(columns=['y', 'z']) x w 0 1 7 1 2 8 Also support for MultiIndex >>> df = ks.DataFrame({'x': [1, 2], 'y': [3, 4], 'z': [5, 6], 'w': [7, 8]}, ... columns=['x', 'y', 'z', 'w']) >>> columns = [('a', 'x'), ('a', 'y'), ('b', 'z'), ('b', 'w')] >>> df.columns = pd.MultiIndex.from_tuples(columns) >>> df # doctest: +NORMALIZE_WHITESPACE a b x y z w 0 1 3 5 7 1 2 4 6 8 >>> df.drop('a') # doctest: +NORMALIZE_WHITESPACE b z w 0 5 7 1 6 8 Notes ----- Currently only axis = 1 is supported in this function, axis = 0 is yet to be implemented. """ if labels is not None: axis = validate_axis(axis) if axis == 1: return self.drop(columns=labels) raise NotImplementedError("Drop currently only works for axis=1") elif columns is not None: if isinstance(columns, str): columns = [(columns,)] # type: ignore elif isinstance(columns, tuple): columns = [columns] else: columns = [ # type: ignore col if isinstance(col, tuple) else (col,) for col in columns # type: ignore ] drop_column_labels = set( label for label in self._internal.column_labels for col in columns if label[: len(col)] == col ) if len(drop_column_labels) == 0: raise KeyError(columns) cols, labels = zip( *( (column, label) for column, label in zip( self._internal.data_spark_column_names, self._internal.column_labels ) if label not in drop_column_labels ) ) data_spark_columns = [self._internal.spark_column_for(label) for label in labels] internal = self._internal.with_new_columns( data_spark_columns, column_labels=list(labels) ) return DataFrame(internal) else: raise ValueError("Need to specify at least one of 'labels' or 'columns'") def _sort( self, by: List[Column], ascending: Union[bool, List[bool]], inplace: bool, na_position: str ): if isinstance(ascending, bool): ascending = [ascending] * len(by) if len(ascending) != len(by): raise ValueError( "Length of ascending ({}) != length of by ({})".format(len(ascending), len(by)) ) if na_position not in ("first", "last"): raise ValueError("invalid na_position: '{}'".format(na_position)) # Mapper: Get a spark column function for (ascending, na_position) combination # Note that 'asc_nulls_first' and friends were added as of Spark 2.4, see SPARK-23847. mapper = { (True, "first"): lambda x: Column(getattr(x._jc, "asc_nulls_first")()), (True, "last"): lambda x: Column(getattr(x._jc, "asc_nulls_last")()), (False, "first"): lambda x: Column(getattr(x._jc, "desc_nulls_first")()), (False, "last"): lambda x: Column(getattr(x._jc, "desc_nulls_last")()), } by = [mapper[(asc, na_position)](scol) for scol, asc in zip(by, ascending)] sdf = self._sdf.sort(*(by + [NATURAL_ORDER_COLUMN_NAME])) kdf = DataFrame(self._internal.with_new_sdf(sdf)) # type: ks.DataFrame if inplace: self._internal = kdf._internal return None else: return kdf def sort_values( self, by: Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]], ascending: Union[bool, List[bool]] = True, inplace: bool = False, na_position: str = "last", ) -> Optional["DataFrame"]: """ Sort by the values along either axis. Parameters ---------- by : str or list of str ascending : bool or list of bool, default True Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the by. inplace : bool, default False if True, perform operation in-place na_position : {'first', 'last'}, default 'last' `first` puts NaNs at the beginning, `last` puts NaNs at the end Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({ ... 'col1': ['A', 'B', None, 'D', 'C'], ... 'col2': [2, 9, 8, 7, 4], ... 'col3': [0, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df col1 col2 col3 0 A 2 0 1 B 9 9 2 None 8 4 3 D 7 2 4 C 4 3 Sort by col1 >>> df.sort_values(by=['col1']) col1 col2 col3 0 A 2 0 1 B 9 9 4 C 4 3 3 D 7 2 2 None 8 4 Sort Descending >>> df.sort_values(by='col1', ascending=False) col1 col2 col3 3 D 7 2 4 C 4 3 1 B 9 9 0 A 2 0 2 None 8 4 Sort by multiple columns >>> df = ks.DataFrame({ ... 'col1': ['A', 'A', 'B', None, 'D', 'C'], ... 'col2': [2, 1, 9, 8, 7, 4], ... 'col3': [0, 1, 9, 4, 2, 3], ... }, ... columns=['col1', 'col2', 'col3']) >>> df.sort_values(by=['col1', 'col2']) col1 col2 col3 1 A 1 1 0 A 2 0 2 B 9 9 5 C 4 3 4 D 7 2 3 None 8 4 """ inplace = validate_bool_kwarg(inplace, "inplace") if isinstance(by, (str, tuple)): by = [by] # type: ignore else: by = [b if isinstance(b, tuple) else (b,) for b in by] # type: ignore new_by = [] for colname in by: ser = self[colname] if not isinstance(ser, ks.Series): raise ValueError( "The column %s is not unique. For a multi-index, the label must be a tuple " "with elements corresponding to each level." % name_like_string(colname) ) new_by.append(ser.spark_column) return self._sort(by=new_by, ascending=ascending, inplace=inplace, na_position=na_position) def sort_index( self, axis: int = 0, level: Optional[Union[int, List[int]]] = None, ascending: bool = True, inplace: bool = False, kind: str = None, na_position: str = "last", ) -> Optional["DataFrame"]: """ Sort object by labels (along an axis) Parameters ---------- axis : index, columns to direct sorting. Currently, only axis = 0 is supported. level : int or level name or list of ints or list of level names if not None, sort on values in specified index level(s) ascending : boolean, default True Sort ascending vs. descending inplace : bool, default False if True, perform operation in-place kind : str, default None Koalas does not allow specifying the sorting algorithm at the moment, default None na_position : {‘first’, ‘last’}, default ‘last’ first puts NaNs at the beginning, last puts NaNs at the end. Not implemented for MultiIndex. Returns ------- sorted_obj : DataFrame Examples -------- >>> df = ks.DataFrame({'A': [2, 1, np.nan]}, index=['b', 'a', np.nan]) >>> df.sort_index() A a 1.0 b 2.0 NaN NaN >>> df.sort_index(ascending=False) A b 2.0 a 1.0 NaN NaN >>> df.sort_index(na_position='first') A NaN NaN a 1.0 b 2.0 >>> df.sort_index(inplace=True) >>> df A a 1.0 b 2.0 NaN NaN >>> df = ks.DataFrame({'A': range(4), 'B': range(4)[::-1]}, ... index=[['b', 'b', 'a', 'a'], [1, 0, 1, 0]], ... columns=['A', 'B']) >>> df.sort_index() A B a 0 3 0 1 2 1 b 0 1 2 1 0 3 >>> df.sort_index(level=1) # doctest: +SKIP A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 >>> df.sort_index(level=[1, 0]) A B a 0 3 0 b 0 1 2 a 1 2 1 b 1 0 3 """ inplace = validate_bool_kwarg(inplace, "inplace") axis = validate_axis(axis) if axis != 0: raise NotImplementedError("No other axis than 0 are supported at the moment") if kind is not None: raise NotImplementedError( "Specifying the sorting algorithm is not supported at the moment." ) if level is None or (is_list_like(level) and len(level) == 0): # type: ignore by = self._internal.index_spark_columns elif is_list_like(level): by = [self._internal.index_spark_columns[l] for l in level] # type: ignore else: by = [self._internal.index_spark_columns[level]] return self._sort(by=by, ascending=ascending, inplace=inplace, na_position=na_position) # TODO: add keep = First def nlargest(self, n: int, columns: "Any") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in descending order. Return the first `n` rows with the largest values in `columns`, in descending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=False).head(n)``, but more performant in Pandas. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of rows to return. columns : label or list of labels Column label(s) to order by. Returns ------- DataFrame The first `n` rows ordered by the given columns in descending order. See Also -------- DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in ascending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Notes ----- This function cannot be used with all column types. For example, when specifying columns with `object` or `category` dtypes, ``TypeError`` is raised. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nlargest`` to select the three rows having the largest values in column "population". >>> df.nlargest(n=3, columns='X') X Y 5 7.0 11 4 6.0 10 3 5.0 9 >>> df.nlargest(n=3, columns=['Y', 'X']) X Y 6 NaN 12 5 7.0 11 4 6.0 10 """ kdf = self.sort_values(by=columns, ascending=False) # type: Optional[DataFrame] assert kdf is not None return kdf.head(n=n) # TODO: add keep = First def nsmallest(self, n: int, columns: "Any") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in ascending order. Return the first `n` rows with the smallest values in `columns`, in ascending order. The columns that are not specified are returned as well, but not used for ordering. This method is equivalent to ``df.sort_values(columns, ascending=True).head(n)``, but more performant. In Koalas, thanks to Spark's lazy execution and query optimizer, the two would have same performance. Parameters ---------- n : int Number of items to retrieve. columns : list or str Column name or names to order by. Returns ------- DataFrame See Also -------- DataFrame.nlargest : Return the first `n` rows ordered by `columns` in descending order. DataFrame.sort_values : Sort DataFrame by the values. DataFrame.head : Return the first `n` rows without re-ordering. Examples -------- >>> df = ks.DataFrame({'X': [1, 2, 3, 5, 6, 7, np.nan], ... 'Y': [6, 7, 8, 9, 10, 11, 12]}) >>> df X Y 0 1.0 6 1 2.0 7 2 3.0 8 3 5.0 9 4 6.0 10 5 7.0 11 6 NaN 12 In the following example, we will use ``nsmallest`` to select the three rows having the smallest values in column "a". >>> df.nsmallest(n=3, columns='X') # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 To order by the largest values in column "a" and then "c", we can specify multiple columns like in the next example. >>> df.nsmallest(n=3, columns=['Y', 'X']) # doctest: +NORMALIZE_WHITESPACE X Y 0 1.0 6 1 2.0 7 2 3.0 8 """ kdf = self.sort_values(by=columns, ascending=True) # type: Optional[DataFrame] assert kdf is not None return kdf.head(n=n) def isin(self, values): """ Whether each element in the DataFrame is contained in values. Parameters ---------- values : iterable or dict The sequence of values to test. If values is a dict, the keys must be the column names, which must match. Series and DataFrame are not supported. Returns ------- DataFrame DataFrame of booleans showing whether each element in the DataFrame is contained in values. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]}, ... index=['falcon', 'dog'], ... columns=['num_legs', 'num_wings']) >>> df num_legs num_wings falcon 2 2 dog 4 0 When ``values`` is a list check whether every value in the DataFrame is present in the list (which animals have 0 or 2 legs or wings) >>> df.isin([0, 2]) num_legs num_wings falcon True True dog False True When ``values`` is a dict, we can pass values to check for each column separately: >>> df.isin({'num_wings': [0, 3]}) num_legs num_wings falcon False False dog False True """ if isinstance(values, (pd.DataFrame, pd.Series)): raise NotImplementedError("DataFrame and Series are not supported") if isinstance(values, dict) and not set(values.keys()).issubset(self.columns): raise AttributeError( "'DataFrame' object has no attribute %s" % (set(values.keys()).difference(self.columns)) ) data_spark_columns = [] if isinstance(values, dict): for i, col in enumerate(self.columns): if col in values: data_spark_columns.append( self._internal.spark_column_for(self._internal.column_labels[i]) .isin(values[col]) .alias(self._internal.data_spark_column_names[i]) ) else: data_spark_columns.append( F.lit(False).alias(self._internal.data_spark_column_names[i]) ) elif is_list_like(values): data_spark_columns += [ self._internal.spark_column_for(label) .isin(list(values)) .alias(self._internal.spark_column_name_for(label)) for label in self._internal.column_labels ] else: raise TypeError("Values should be iterable, Series, DataFrame or dict.") return DataFrame(self._internal.with_new_columns(data_spark_columns)) @property def shape(self): """ Return a tuple representing the dimensionality of the DataFrame. Examples -------- >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4]}) >>> df.shape (2, 2) >>> df = ks.DataFrame({'col1': [1, 2], 'col2': [3, 4], ... 'col3': [5, 6]}) >>> df.shape (2, 3) """ return len(self), len(self.columns) def merge( self, right: "DataFrame", how: str = "inner", on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, left_on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, right_on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, left_index: bool = False, right_index: bool = False, suffixes: Tuple[str, str] = ("_x", "_y"), ) -> "DataFrame": """ Merge DataFrame objects with a database-style join. The index of the resulting DataFrame will be one of the following: - 0...n if no index is used for merging - Index of the left DataFrame if merged only on the index of the right DataFrame - Index of the right DataFrame if merged only on the index of the left DataFrame - All involved indices if merged using the indices of both DataFrames e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will be an index (x, a, b) Parameters ---------- right: Object to merge with. how: Type of merge to be performed. {'left', 'right', 'outer', 'inner'}, default 'inner' left: use only keys from left frame, similar to a SQL left outer join; preserve key order. right: use only keys from right frame, similar to a SQL right outer join; preserve key order. outer: use union of keys from both frames, similar to a SQL full outer join; sort keys lexicographically. inner: use intersection of keys from both frames, similar to a SQL inner join; preserve the order of the left keys. on: Column or index level names to join on. These must be found in both DataFrames. If on is None and not merging on indexes then this defaults to the intersection of the columns in both DataFrames. left_on: Column or index level names to join on in the left DataFrame. Can also be an array or list of arrays of the length of the left DataFrame. These arrays are treated as if they are columns. right_on: Column or index level names to join on in the right DataFrame. Can also be an array or list of arrays of the length of the right DataFrame. These arrays are treated as if they are columns. left_index: Use the index from the left DataFrame as the join key(s). If it is a MultiIndex, the number of keys in the other DataFrame (either the index or a number of columns) must match the number of levels. right_index: Use the index from the right DataFrame as the join key. Same caveats as left_index. suffixes: Suffix to apply to overlapping column names in the left and right side, respectively. Returns ------- DataFrame A DataFrame of the two merged objects. See Also -------- DataFrame.join : Join columns of another DataFrame. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df1 = ks.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [1, 2, 3, 5]}, ... columns=['lkey', 'value']) >>> df2 = ks.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'], ... 'value': [5, 6, 7, 8]}, ... columns=['rkey', 'value']) >>> df1 lkey value 0 foo 1 1 bar 2 2 baz 3 3 foo 5 >>> df2 rkey value 0 foo 5 1 bar 6 2 baz 7 3 foo 8 Merge df1 and df2 on the lkey and rkey columns. The value columns have the default suffixes, _x and _y, appended. >>> merged = df1.merge(df2, left_on='lkey', right_on='rkey') >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS lkey value_x rkey value_y ...bar 2 bar 6 ...baz 3 baz 7 ...foo 1 foo 5 ...foo 1 foo 8 ...foo 5 foo 5 ...foo 5 foo 8 >>> left_kdf = ks.DataFrame({'A': [1, 2]}) >>> right_kdf = ks.DataFrame({'B': ['x', 'y']}, index=[1, 2]) >>> left_kdf.merge(right_kdf, left_index=True, right_index=True).sort_index() A B 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='left').sort_index() A B 0 1 None 1 2 x >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='right').sort_index() A B 1 2.0 x 2 NaN y >>> left_kdf.merge(right_kdf, left_index=True, right_index=True, how='outer').sort_index() A B 0 1.0 None 1 2.0 x 2 NaN y Notes ----- As described in #263, joining string columns currently returns None for missing values instead of NaN. """ def to_list( os: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] ) -> List[Tuple[str, ...]]: if os is None: return [] elif isinstance(os, tuple): return [os] elif isinstance(os, str): return [(os,)] else: return [o if isinstance(o, tuple) else (o,) for o in os] # type: ignore if isinstance(right, ks.Series): right = right.to_frame() if on: if left_on or right_on: raise ValueError( 'Can only pass argument "on" OR "left_on" and "right_on", ' "not a combination of both." ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(on))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(on))) else: # TODO: need special handling for multi-index. if left_index: left_key_names = self._internal.index_spark_column_names else: left_key_names = list(map(self._internal.spark_column_name_for, to_list(left_on))) if right_index: right_key_names = right._internal.index_spark_column_names else: right_key_names = list( map(right._internal.spark_column_name_for, to_list(right_on)) ) if left_key_names and not right_key_names: raise ValueError("Must pass right_on or right_index=True") if right_key_names and not left_key_names: raise ValueError("Must pass left_on or left_index=True") if not left_key_names and not right_key_names: common = list(self.columns.intersection(right.columns)) if len(common) == 0: raise ValueError( "No common columns to perform merge on. Merge options: " "left_on=None, right_on=None, left_index=False, right_index=False" ) left_key_names = list(map(self._internal.spark_column_name_for, to_list(common))) right_key_names = list(map(right._internal.spark_column_name_for, to_list(common))) if len(left_key_names) != len(right_key_names): # type: ignore raise ValueError("len(left_keys) must equal len(right_keys)") if how == "full": warnings.warn( "Warning: While Koalas will accept 'full', you should use 'outer' " + "instead to be compatible with the pandas merge API", UserWarning, ) if how == "outer": # 'outer' in pandas equals 'full' in Spark how = "full" if how not in ("inner", "left", "right", "full"): raise ValueError( "The 'how' parameter has to be amongst the following values: ", "['inner', 'left', 'right', 'outer']", ) left_table = self._sdf.alias("left_table") right_table = right._sdf.alias("right_table") left_key_columns = [ # type: ignore scol_for(left_table, label) for label in left_key_names ] right_key_columns = [ # type: ignore scol_for(right_table, label) for label in right_key_names ] join_condition = reduce( lambda x, y: x & y, [lkey == rkey for lkey, rkey in zip(left_key_columns, right_key_columns)], ) joined_table = left_table.join(right_table, join_condition, how=how) # Unpack suffixes tuple for convenience left_suffix = suffixes[0] right_suffix = suffixes[1] # Append suffixes to columns with the same name to avoid conflicts later duplicate_columns = set(self._internal.column_labels) & set(right._internal.column_labels) exprs = [] data_columns = [] column_labels = [] left_scol_for = lambda label: scol_for( left_table, self._internal.spark_column_name_for(label) ) right_scol_for = lambda label: scol_for( right_table, right._internal.spark_column_name_for(label) ) for label in self._internal.column_labels: col = self._internal.spark_column_name_for(label) scol = left_scol_for(label) if label in duplicate_columns: spark_column_name = self._internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and spark_column_name in right_key_names ): # type: ignore right_scol = right_scol_for(label) if how == "right": scol = right_scol elif how == "full": scol = F.when(scol.isNotNull(), scol).otherwise(right_scol).alias(col) else: pass else: col = col + left_suffix scol = scol.alias(col) label = tuple([label[0] + left_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) for label in right._internal.column_labels: col = right._internal.spark_column_name_for(label) scol = right_scol_for(label) if label in duplicate_columns: spark_column_name = self._internal.spark_column_name_for(label) if ( spark_column_name in left_key_names and spark_column_name in right_key_names ): # type: ignore continue else: col = col + right_suffix scol = scol.alias(col) label = tuple([label[0] + right_suffix] + list(label[1:])) exprs.append(scol) data_columns.append(col) column_labels.append(label) left_index_scols = self._internal.index_spark_columns right_index_scols = right._internal.index_spark_columns # Retain indices if they are used for joining if left_index: if right_index: if how in ("inner", "left"): exprs.extend(left_index_scols) index_map = self._internal.index_map elif how == "right": exprs.extend(right_index_scols) index_map = right._internal.index_map else: index_map = OrderedDict() for (col, name), left_scol, right_scol in zip( self._internal.index_map.items(), left_index_scols, right_index_scols ): scol = F.when(left_scol.isNotNull(), left_scol).otherwise(right_scol) exprs.append(scol.alias(col)) index_map[col] = name else: exprs.extend(right_index_scols) index_map = right._internal.index_map elif right_index: exprs.extend(left_index_scols) index_map = self._internal.index_map else: index_map = OrderedDict() selected_columns = joined_table.select(*exprs) internal = _InternalFrame( spark_frame=selected_columns, index_map=index_map if index_map else None, column_labels=column_labels, data_spark_columns=[scol_for(selected_columns, col) for col in data_columns], ) return DataFrame(internal) def join( self, right: "DataFrame", on: Optional[Union[str, List[str], Tuple[str, ...], List[Tuple[str, ...]]]] = None, how: str = "left", lsuffix: str = "", rsuffix: str = "", ) -> "DataFrame": """ Join columns of another DataFrame. Join columns with `right` DataFrame either on index or on a key column. Efficiently join multiple DataFrame objects by index at once by passing a list. Parameters ---------- right: DataFrame, Series on: str, list of str, or array-like, optional Column or index level name(s) in the caller to join on the index in `right`, otherwise joins index-on-index. If multiple values given, the `right` DataFrame must have a MultiIndex. Can pass an array as the join key if it is not already contained in the calling DataFrame. Like an Excel VLOOKUP operation. how: {'left', 'right', 'outer', 'inner'}, default 'left' How to handle the operation of the two objects. * left: use `left` frame’s index (or column if on is specified). * right: use `right`’s index. * outer: form union of `left` frame’s index (or column if on is specified) with right’s index, and sort it. lexicographically. * inner: form intersection of `left` frame’s index (or column if on is specified) with `right`’s index, preserving the order of the `left`’s one. lsuffix : str, default '' Suffix to use from left frame's overlapping columns. rsuffix : str, default '' Suffix to use from `right` frame's overlapping columns. Returns ------- DataFrame A dataframe containing columns from both the `left` and `right`. See Also -------- DataFrame.merge: For column(s)-on-columns(s) operations. DataFrame.update : Modify in place using non-NA values from another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Notes ----- Parameters on, lsuffix, and rsuffix are not supported when passing a list of DataFrame objects. Examples -------- >>> kdf1 = ks.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'], ... 'A': ['A0', 'A1', 'A2', 'A3']}, ... columns=['key', 'A']) >>> kdf2 = ks.DataFrame({'key': ['K0', 'K1', 'K2'], ... 'B': ['B0', 'B1', 'B2']}, ... columns=['key', 'B']) >>> kdf1 key A 0 K0 A0 1 K1 A1 2 K2 A2 3 K3 A3 >>> kdf2 key B 0 K0 B0 1 K1 B1 2 K2 B2 Join DataFrames using their indexes. >>> join_kdf = kdf1.join(kdf2, lsuffix='_left', rsuffix='_right') >>> join_kdf.sort_values(by=join_kdf.columns) key_left A key_right B 0 K0 A0 K0 B0 1 K1 A1 K1 B1 2 K2 A2 K2 B2 3 K3 A3 None None If we want to join using the key columns, we need to set key to be the index in both df and right. The joined DataFrame will have key as its index. >>> join_kdf = kdf1.set_index('key').join(kdf2.set_index('key')) >>> join_kdf.sort_values(by=join_kdf.columns) # doctest: +NORMALIZE_WHITESPACE A B key K0 A0 B0 K1 A1 B1 K2 A2 B2 K3 A3 None Another option to join using the key columns is to use the on parameter. DataFrame.join always uses right’s index but we can use any column in df. This method preserves the original DataFrame’s index in the result. >>> join_kdf = kdf1.join(kdf2.set_index('key'), on='key') >>> join_kdf.index Int64Index([0, 1, 2, 3], dtype='int64') """ if isinstance(right, ks.Series): common = list(self.columns.intersection([right.name])) else: common = list(self.columns.intersection(right.columns)) if len(common) > 0 and not lsuffix and not rsuffix: raise ValueError( "columns overlap but no suffix specified: " "{rename}".format(rename=common) ) if on: self = self.set_index(on) join_kdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ).reset_index() else: join_kdf = self.merge( right, left_index=True, right_index=True, how=how, suffixes=(lsuffix, rsuffix) ) return join_kdf def append( self, other: "DataFrame", ignore_index: bool = False, verify_integrity: bool = False, sort: bool = False, ) -> "DataFrame": """ Append rows of other to the end of caller, returning a new object. Columns in other that are not in the caller are added as new columns. Parameters ---------- other : DataFrame or Series/dict-like object, or list of these The data to append. ignore_index : boolean, default False If True, do not use the index labels. verify_integrity : boolean, default False If True, raise ValueError on creating index with duplicates. sort : boolean, default False Currently not supported. Returns ------- appended : DataFrame Examples -------- >>> df = ks.DataFrame([[1, 2], [3, 4]], columns=list('AB')) >>> df.append(df) A B 0 1 2 1 3 4 0 1 2 1 3 4 >>> df.append(df, ignore_index=True) A B 0 1 2 1 3 4 2 1 2 3 3 4 """ if isinstance(other, ks.Series): raise ValueError("DataFrames.append() does not support appending Series to DataFrames") if sort: raise NotImplementedError("The 'sort' parameter is currently not supported") if not ignore_index: index_scols = self._internal.index_spark_columns if len(index_scols) != len(other._internal.index_spark_columns): raise ValueError("Both DataFrames have to have the same number of index levels") if verify_integrity and len(index_scols) > 0: if ( self._sdf.select(index_scols) .intersect(other._sdf.select(other._internal.index_spark_columns)) .count() ) > 0: raise ValueError("Indices have overlapping values") # Lazy import to avoid circular dependency issues from databricks.koalas.namespace import concat return concat([self, other], ignore_index=ignore_index) # TODO: add 'filter_func' and 'errors' parameter def update(self, other: "DataFrame", join: str = "left", overwrite: bool = True): """ Modify in place using non-NA values from another DataFrame. Aligns on indices. There is no return value. Parameters ---------- other : DataFrame, or Series join : 'left', default 'left' Only left join is implemented, keeping the index and columns of the original object. overwrite : bool, default True How to handle non-NA values for overlapping keys: * True: overwrite original DataFrame's values with values from `other`. * False: only update values that are NA in the original DataFrame. Returns ------- None : method directly changes calling object See Also -------- DataFrame.merge : For column(s)-on-columns(s) operations. DataFrame.join : Join columns of another DataFrame. DataFrame.hint : Specifies some hint on the current DataFrame. broadcast : Marks a DataFrame as small enough for use in broadcast joins. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': [4, 5, 6], 'C': [7, 8, 9]}, columns=['B', 'C']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4 1 2 5 2 3 6 The DataFrame's length does not increase as a result of the update, only values at matching index/column labels are updated. >>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 a d 1 b e 2 c f For Series, it's name attribute must be set. >>> df = ks.DataFrame({'A': ['a', 'b', 'c'], 'B': ['x', 'y', 'z']}, columns=['A', 'B']) >>> new_column = ks.Series(['d', 'e'], name='B', index=[0, 2]) >>> df.update(new_column) >>> df.sort_index() A B 0 a d 1 b y 2 c e If `other` contains None the corresponding values are not updated in the original dataframe. >>> df = ks.DataFrame({'A': [1, 2, 3], 'B': [400, 500, 600]}, columns=['A', 'B']) >>> new_df = ks.DataFrame({'B': [4, None, 6]}, columns=['B']) >>> df.update(new_df) >>> df.sort_index() A B 0 1 4.0 1 2 500.0 2 3 6.0 """ if join != "left": raise NotImplementedError("Only left join is supported") if isinstance(other, ks.Series): other = DataFrame(other) update_columns = list( set(self._internal.column_labels).intersection(set(other._internal.column_labels)) ) update_sdf = self.join(other[update_columns], rsuffix="_new")._sdf for column_labels in update_columns: column_name = self._internal.spark_column_name_for(column_labels) old_col = scol_for(update_sdf, column_name) new_col = scol_for( update_sdf, other._internal.spark_column_name_for(column_labels) + "_new" ) if overwrite: update_sdf = update_sdf.withColumn( column_name, F.when(new_col.isNull(), old_col).otherwise(new_col) ) else: update_sdf = update_sdf.withColumn( column_name, F.when(old_col.isNull(), new_col).otherwise(old_col) ) sdf = update_sdf.select( [scol_for(update_sdf, col) for col in self._internal.spark_column_names] + list(HIDDEN_COLUMNS) ) internal = self._internal.with_new_sdf(sdf) self._internal = internal def sample( self, n: Optional[int] = None, frac: Optional[float] = None, replace: bool = False, random_state: Optional[int] = None, ) -> "DataFrame": """ Return a random sample of items from an axis of object. Please call this function using named argument by specifying the ``frac`` argument. You can use `random_state` for reproducibility. However, note that different from pandas, specifying a seed in Koalas/Spark does not guarantee the sampled rows will be fixed. The result set depends on not only the seed, but also how the data is distributed across machines and to some extent network randomness when shuffle operations are involved. Even in the simplest case, the result set will depend on the system's CPU core count. Parameters ---------- n : int, optional Number of items to return. This is currently NOT supported. Use frac instead. frac : float, optional Fraction of axis items to return. replace : bool, default False Sample with or without replacement. random_state : int, optional Seed for the random number generator (if int). Returns ------- Series or DataFrame A new object of same type as caller containing the sampled items. Examples -------- >>> df = ks.DataFrame({'num_legs': [2, 4, 8, 0], ... 'num_wings': [2, 0, 0, 0], ... 'num_specimen_seen': [10, 2, 1, 8]}, ... index=['falcon', 'dog', 'spider', 'fish'], ... columns=['num_legs', 'num_wings', 'num_specimen_seen']) >>> df # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 dog 4 0 2 spider 8 0 1 fish 0 0 8 A random 25% sample of the ``DataFrame``. Note that we use `random_state` to ensure the reproducibility of the examples. >>> df.sample(frac=0.25, random_state=1) # doctest: +SKIP num_legs num_wings num_specimen_seen falcon 2 2 10 fish 0 0 8 Extract 25% random elements from the ``Series`` ``df['num_legs']``, with replacement, so the same items could appear more than once. >>> df['num_legs'].sample(frac=0.4, replace=True, random_state=1) # doctest: +SKIP falcon 2 spider 8 spider 8 Name: num_legs, dtype: int64 Specifying the exact number of items to return is not supported at the moment. >>> df.sample(n=5) # doctest: +ELLIPSIS Traceback (most recent call last): ... NotImplementedError: Function sample currently does not support specifying ... """ # Note: we don't run any of the doctests because the result can change depending on the # system's core count. if n is not None: raise NotImplementedError( "Function sample currently does not support specifying " "exact number of items to return. Use frac instead." ) if frac is None: raise ValueError("frac must be specified.") sdf = self._sdf.sample(withReplacement=replace, fraction=frac, seed=random_state) return DataFrame(self._internal.with_new_sdf(sdf)) def astype(self, dtype) -> "DataFrame": """ Cast a Koalas object to a specified dtype ``dtype``. Parameters ---------- dtype : data type, or dict of column name -> data type Use a numpy.dtype or Python type to cast entire Koalas object to the same type. Alternatively, use {col: dtype, ...}, where col is a column label and dtype is a numpy.dtype or Python type to cast one or more of the DataFrame's columns to column-specific types. Returns ------- casted : same type as caller See Also -------- to_datetime : Convert argument to datetime. Examples -------- >>> df = ks.DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3]}, dtype='int64') >>> df a b 0 1 1 1 2 2 2 3 3 Convert to float type: >>> df.astype('float') a b 0 1.0 1.0 1 2.0 2.0 2 3.0 3.0 Convert to int64 type back: >>> df.astype('int64') a b 0 1 1 1 2 2 2 3 3 Convert column a to float type: >>> df.astype({'a': float}) a b 0 1.0 1 1 2.0 2 2 3.0 3 """ applied = [] if is_dict_like(dtype): for col_name in dtype.keys(): if col_name not in self.columns: raise KeyError( "Only a column name can be used for the " "key in a dtype mappings argument." ) for col_name, col in self.items(): if col_name in dtype: applied.append(col.astype(dtype=dtype[col_name])) else: applied.append(col) else: for col_name, col in self.items(): applied.append(col.astype(dtype=dtype)) return DataFrame(self._internal.with_new_columns(applied)) def add_prefix(self, prefix): """ Prefix labels with string `prefix`. For Series, the row labels are prefixed. For DataFrame, the column labels are prefixed. Parameters ---------- prefix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_suffix: Suffix column labels with string `suffix`. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_prefix('col_') col_A col_B 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(prefix, str) return self._apply_series_op( lambda kser: kser.rename(tuple([prefix + i for i in kser._internal.column_labels[0]])) ) def add_suffix(self, suffix): """ Suffix labels with string `suffix`. For Series, the row labels are suffixed. For DataFrame, the column labels are suffixed. Parameters ---------- suffix : str The string to add before each label. Returns ------- DataFrame New DataFrame with updated labels. See Also -------- Series.add_prefix: Prefix row labels with string `prefix`. Series.add_suffix: Suffix row labels with string `suffix`. DataFrame.add_prefix: Prefix column labels with string `prefix`. Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]}, columns=['A', 'B']) >>> df A B 0 1 3 1 2 4 2 3 5 3 4 6 >>> df.add_suffix('_col') A_col B_col 0 1 3 1 2 4 2 3 5 3 4 6 """ assert isinstance(suffix, str) return self._apply_series_op( lambda kser: kser.rename(tuple([i + suffix for i in kser._internal.column_labels[0]])) ) # TODO: include, and exclude should be implemented. def describe(self, percentiles: Optional[List[float]] = None) -> "DataFrame": """ Generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's distribution, excluding ``NaN`` values. Analyzes both numeric and object series, as well as ``DataFrame`` column sets of mixed data types. The output will vary depending on what is provided. Refer to the notes below for more detail. Parameters ---------- percentiles : list of ``float`` in range [0.0, 1.0], default [0.25, 0.5, 0.75] A list of percentiles to be computed. Returns ------- Series or DataFrame Summary statistics of the Series or Dataframe provided. See Also -------- DataFrame.count: Count number of non-NA/null observations. DataFrame.max: Maximum of the values in the object. DataFrame.min: Minimum of the values in the object. DataFrame.mean: Mean of the values. DataFrame.std: Standard deviation of the observations. Notes ----- For numeric data, the result's index will include ``count``, ``mean``, ``std``, ``min``, ``25%``, ``50%``, ``75%``, ``max``. Currently only numeric data is supported. Examples -------- Describing a numeric ``Series``. >>> s = ks.Series([1, 2, 3]) >>> s.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: 0, dtype: float64 Describing a ``DataFrame``. Only numeric fields are returned. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0], ... 'object': ['a', 'b', 'c'] ... }, ... columns=['numeric1', 'numeric2', 'object']) >>> df.describe() numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 For multi-index columns: >>> df.columns = [('num', 'a'), ('num', 'b'), ('obj', 'c')] >>> df.describe() # doctest: +NORMALIZE_WHITESPACE num a b count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 25% 1.0 4.0 50% 2.0 5.0 75% 3.0 6.0 max 3.0 6.0 >>> df[('num', 'b')].describe() count 3.0 mean 5.0 std 1.0 min 4.0 25% 4.0 50% 5.0 75% 6.0 max 6.0 Name: (num, b), dtype: float64 Describing a ``DataFrame`` and selecting custom percentiles. >>> df = ks.DataFrame({'numeric1': [1, 2, 3], ... 'numeric2': [4.0, 5.0, 6.0] ... }, ... columns=['numeric1', 'numeric2']) >>> df.describe(percentiles = [0.85, 0.15]) numeric1 numeric2 count 3.0 3.0 mean 2.0 5.0 std 1.0 1.0 min 1.0 4.0 15% 1.0 4.0 50% 2.0 5.0 85% 3.0 6.0 max 3.0 6.0 Describing a column from a ``DataFrame`` by accessing it as an attribute. >>> df.numeric1.describe() count 3.0 mean 2.0 std 1.0 min 1.0 25% 1.0 50% 2.0 75% 3.0 max 3.0 Name: numeric1, dtype: float64 Describing a column from a ``DataFrame`` by accessing it as an attribute and selecting custom percentiles. >>> df.numeric1.describe(percentiles = [0.85, 0.15]) count 3.0 mean 2.0 std 1.0 min 1.0 15% 1.0 50% 2.0 85% 3.0 max 3.0 Name: numeric1, dtype: float64 """ exprs = [] column_labels = [] for label in self._internal.column_labels: scol = self._internal.spark_column_for(label) spark_type = self._internal.spark_type_for(label) if isinstance(spark_type, DoubleType) or isinstance(spark_type, FloatType): exprs.append( F.nanvl(scol, F.lit(None)).alias(self._internal.spark_column_name_for(label)) ) column_labels.append(label) elif isinstance(spark_type, NumericType): exprs.append(scol) column_labels.append(label) if len(exprs) == 0: raise ValueError("Cannot describe a DataFrame without columns") if percentiles is not None: if any((p < 0.0) or (p > 1.0) for p in percentiles): raise ValueError("Percentiles should all be in the interval [0, 1]") # appending 50% if not in percentiles already percentiles = (percentiles + [0.5]) if 0.5 not in percentiles else percentiles else: percentiles = [0.25, 0.5, 0.75] formatted_perc = ["{:.0%}".format(p) for p in sorted(percentiles)] stats = ["count", "mean", "stddev", "min", *formatted_perc, "max"] sdf = self._sdf.select(*exprs).summary(stats) sdf = sdf.replace("stddev", "std", subset="summary") internal = _InternalFrame( spark_frame=sdf, index_map=OrderedDict({"summary": None}), column_labels=column_labels, data_spark_columns=[ scol_for(sdf, self._internal.spark_column_name_for(label)) for label in column_labels ], ) return DataFrame(internal).astype("float64") def drop_duplicates(self, subset=None, keep="first", inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns. keep : {'first', 'last', False}, default 'first' Determines which duplicates (if any) to keep. - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy. Returns ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. >>> df = ks.DataFrame( ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b']) >>> df a b 0 1 a 1 2 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates().sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates('a').sort_index() a b 0 1 a 1 2 a 4 3 d >>> df.drop_duplicates(['a', 'b']).sort_index() a b 0 1 a 1 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep='last').sort_index() a b 0 1 a 2 2 a 3 2 c 4 3 d >>> df.drop_duplicates(keep=False).sort_index() a b 0 1 a 3 2 c 4 3 d """ inplace = validate_bool_kwarg(inplace, "inplace") sdf, column = self._mark_duplicates(subset, keep) sdf = sdf.where(~scol_for(sdf, column)).drop(column) internal = self._internal.with_new_sdf(sdf) if inplace: self._internal = internal else: return DataFrame(internal) def reindex( self, labels: Optional[Any] = None, index: Optional[Any] = None, columns: Optional[Any] = None, axis: Optional[Union[int, str]] = None, copy: Optional[bool] = True, fill_value: Optional[Any] = None, ) -> "DataFrame": """ Conform DataFrame to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and ``copy=False``. Parameters ---------- labels: array-like, optional New labels / index to conform the axis specified by ‘axis’ to. index, columns: array-like, optional New labels / index to conform to, should be specified using keywords. Preferably an Index object to avoid duplicating data axis: int or str, optional Axis to target. Can be either the axis name (‘index’, ‘columns’) or number (0, 1). copy : bool, default True Return a new object, even if the passed indexes are the same. fill_value : scalar, default np.NaN Value to use for missing values. Defaults to NaN, but can be any "compatible" value. Returns ------- DataFrame with changed index. See Also -------- DataFrame.set_index : Set row labels. DataFrame.reset_index : Remove row labels or move them to new columns. Examples -------- ``DataFrame.reindex`` supports two calling conventions * ``(index=index_labels, columns=column_labels, ...)`` * ``(labels, axis={'index', 'columns'}, ...)`` We *highly* recommend using keyword arguments to clarify your intent. Create a dataframe with some fictional data. >>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror'] >>> df = ks.DataFrame({ ... 'http_status': [200, 200, 404, 404, 301], ... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]}, ... index=index, ... columns=['http_status', 'response_time']) >>> df http_status response_time Firefox 200 0.04 Chrome 200 0.02 Safari 404 0.07 IE10 404 0.08 Konqueror 301 1.00 Create a new index and reindex the dataframe. By default values in the new index that do not have corresponding records in the dataframe are assigned ``NaN``. >>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10', ... 'Chrome'] >>> df.reindex(new_index).sort_index() ... # doctest: +NORMALIZE_WHITESPACE http_status response_time Chrome 200.0 0.02 Comodo Dragon NaN NaN IE10 404.0 0.08 Iceweasel NaN NaN Safari 404.0 0.07 We can fill in the missing values by passing a value to the keyword ``fill_value``. >>> df.reindex(new_index, fill_value=0, copy=False).sort_index() ... # doctest: +NORMALIZE_WHITESPACE http_status response_time Chrome 200 0.02 Comodo Dragon 0 0.00 IE10 404 0.08 Iceweasel 0 0.00 Safari 404 0.07 We can also reindex the columns. >>> df.reindex(columns=['http_status', 'user_agent']).sort_index() ... # doctest: +NORMALIZE_WHITESPACE http_status user_agent Chrome 200 NaN Comodo Dragon 0 NaN IE10 404 NaN Iceweasel 0 NaN Safari 404 NaN Or we can use "axis-style" keyword arguments >>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index() ... # doctest: +NORMALIZE_WHITESPACE http_status user_agent Chrome 200 NaN Comodo Dragon 0 NaN IE10 404 NaN Iceweasel 0 NaN Safari 404 NaN To further illustrate the filling functionality in ``reindex``, we will create a dataframe with a monotonically increasing index (for example, a sequence of dates). >>> date_index = pd.date_range('1/1/2010', periods=6, freq='D') >>> df2 = ks.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]}, ... index=date_index) >>> df2.sort_index() # doctest: +NORMALIZE_WHITESPACE prices 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 Suppose we decide to expand the dataframe to cover a wider date range. >>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D') >>> df2.reindex(date_index2).sort_index() # doctest: +NORMALIZE_WHITESPACE prices 2009-12-29 NaN 2009-12-30 NaN 2009-12-31 NaN 2010-01-01 100.0 2010-01-02 101.0 2010-01-03 NaN 2010-01-04 100.0 2010-01-05 89.0 2010-01-06 88.0 2010-01-07 NaN """ if axis is not None and (index is not None or columns is not None): raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.") if labels is not None: axis = validate_axis(axis) if axis == 0: index = labels elif axis == 1: columns = labels else: raise ValueError("No axis named %s for object type %s." % (axis, type(axis))) if index is not None and not is_list_like(index): raise TypeError( "Index must be called with a collection of some kind, " "%s was passed" % type(index) ) if columns is not None and not is_list_like(columns): raise TypeError( "Columns must be called with a collection of some kind, " "%s was passed" % type(columns) ) df = self.copy() if index is not None: df = DataFrame(df._reindex_index(index)) if columns is not None: df = DataFrame(df._reindex_columns(columns)) # Process missing values. if fill_value is not None: df = df.fillna(fill_value) # Copy if copy: return df.copy() else: self._internal = df._internal return self def _reindex_index(self, index): # When axis is index, we can mimic pandas' by a right outer join. assert ( len(self._internal.index_spark_column_names) <= 1 ), "Index should be single column or not set." index_column = self._internal.index_spark_column_names[0] kser = ks.Series(list(index)) labels = kser._internal._sdf.select(kser.spark_column.alias(index_column)) joined_df = self._sdf.drop(NATURAL_ORDER_COLUMN_NAME).join( labels, on=index_column, how="right" ) internal = self._internal.with_new_sdf(joined_df) return internal def _reindex_columns(self, columns): level = self._internal.column_labels_level if level > 1: label_columns = list(columns) for col in label_columns: if not isinstance(col, tuple): raise TypeError("Expected tuple, got {}".format(type(col))) else: label_columns = [(col,) for col in columns] for col in label_columns: if len(col) != level: raise ValueError( "shape (1,{}) doesn't match the shape (1,{})".format(len(col), level) ) scols, labels = [], [] for label in label_columns: if label in self._internal.column_labels: scols.append(self._internal.spark_column_for(label)) else: scols.append(F.lit(np.nan).alias(name_like_string(label))) labels.append(label) return self._internal.with_new_columns(scols, column_labels=labels) def melt(self, id_vars=None, value_vars=None, var_name=None, value_name="value"): """ Unpivot a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (`id_vars`), while all other columns, considered measured variables (`value_vars`), are "unpivoted" to the row axis, leaving just two non-identifier columns, 'variable' and 'value'. Parameters ---------- frame : DataFrame id_vars : tuple, list, or ndarray, optional Column(s) to use as identifier variables. value_vars : tuple, list, or ndarray, optional Column(s) to unpivot. If not specified, uses all columns that are not set as `id_vars`. var_name : scalar, default 'variable' Name to use for the 'variable' column. If None it uses `frame.columns.name` or ‘variable’. value_name : scalar, default 'value' Name to use for the 'value' column. Returns ------- DataFrame Unpivoted DataFrame. Examples -------- >>> df = ks.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, ... 'B': {0: 1, 1: 3, 2: 5}, ... 'C': {0: 2, 1: 4, 2: 6}}, ... columns=['A', 'B', 'C']) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> ks.melt(df) variable value 0 A a 1 B 1 2 C 2 3 A b 4 B 3 5 C 4 6 A c 7 B 5 8 C 6 >>> df.melt(id_vars='A') A variable value 0 a B 1 1 a C 2 2 b B 3 3 b C 4 4 c B 5 5 c C 6 >>> df.melt(value_vars='A') variable value 0 A a 1 A b 2 A c >>> ks.melt(df, id_vars=['A', 'B']) A B variable value 0 a 1 C 2 1 b 3 C 4 2 c 5 C 6 >>> df.melt(id_vars=['A'], value_vars=['C']) A variable value 0 a C 2 1 b C 4 2 c C 6 The names of 'variable' and 'value' columns can be customized: >>> ks.melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') A myVarname myValname 0 a B 1 1 b B 3 2 c B 5 """ column_labels = self._internal.column_labels if id_vars is None: id_vars = [] else: if isinstance(id_vars, str): id_vars = [(id_vars,)] elif isinstance(id_vars, tuple): if self._internal.column_labels_level == 1: id_vars = [idv if isinstance(idv, tuple) else (idv,) for idv in id_vars] else: raise ValueError( "id_vars must be a list of tuples" " when columns are a MultiIndex" ) else: id_vars = [idv if isinstance(idv, tuple) else (idv,) for idv in id_vars] non_existence_col = [idv for idv in id_vars if idv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'id_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if value_vars is None: value_vars = [] else: if isinstance(value_vars, str): value_vars = [(value_vars,)] elif isinstance(value_vars, tuple): if self._internal.column_labels_level == 1: value_vars = [ valv if isinstance(valv, tuple) else (valv,) for valv in value_vars ] else: raise ValueError( "value_vars must be a list of tuples" " when columns are a MultiIndex" ) else: value_vars = [valv if isinstance(valv, tuple) else (valv,) for valv in value_vars] non_existence_col = [valv for valv in value_vars if valv not in column_labels] if len(non_existence_col) != 0: raveled_column_labels = np.ravel(column_labels) missing = [ nec for nec in np.ravel(non_existence_col) if nec not in raveled_column_labels ] if len(missing) != 0: raise KeyError( "The following 'value_vars' are not present" " in the DataFrame: {}".format(missing) ) else: raise KeyError( "None of {} are in the {}".format(non_existence_col, column_labels) ) if len(value_vars) == 0: value_vars = column_labels column_labels = [label for label in column_labels if label not in id_vars] sdf = self._sdf if var_name is None: if self._internal.column_label_names is not None: var_name = self._internal.column_label_names elif self._internal.column_labels_level == 1: var_name = ["variable"] else: var_name = [ "variable_{}".format(i) for i in range(self._internal.column_labels_level) ] elif isinstance(var_name, str): var_name = [var_name] pairs = F.explode( F.array( *[ F.struct( *( [F.lit(c).alias(name) for c, name in zip(label, var_name)] + [self._internal.spark_column_for(label).alias(value_name)] ) ) for label in column_labels if label in value_vars ] ) ) columns = ( [ self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars ] + [F.col("pairs.%s" % name) for name in var_name[: self._internal.column_labels_level]] + [F.col("pairs.%s" % value_name)] ) exploded_df = sdf.withColumn("pairs", pairs).select(columns) return DataFrame(exploded_df) def stack(self): """ Stack the prescribed level(s) from columns to index. Return a reshaped DataFrame or Series having a multi-level index with one or more new inner-most levels compared to the current DataFrame. The new inner-most levels are created by pivoting the columns of the current dataframe: - if the columns have a single level, the output is a Series; - if the columns have multiple levels, the new index level(s) is (are) taken from the prescribed level(s) and the output is a DataFrame. The new index levels are sorted. Returns ------- DataFrame or Series Stacked dataframe or series. See Also -------- DataFrame.unstack : Unstack prescribed level(s) from index axis onto column axis. DataFrame.pivot : Reshape dataframe from long format to wide format. DataFrame.pivot_table : Create a spreadsheet-style pivot table as a DataFrame. Notes ----- The function is named by analogy with a collection of books being reorganized from being side by side on a horizontal position (the columns of the dataframe) to being stacked vertically on top of each other (in the index of the dataframe). Examples -------- **Single level columns** >>> df_single_level_cols = ks.DataFrame([[0, 1], [2, 3]], ... index=['cat', 'dog'], ... columns=['weight', 'height']) Stacking a dataframe with a single level column axis returns a Series: >>> df_single_level_cols weight height cat 0 1 dog 2 3 >>> df_single_level_cols.stack().sort_index() cat height 1 weight 0 dog height 3 weight 2 Name: 0, dtype: int64 **Multi level columns: simple case** >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('weight', 'pounds')]) >>> df_multi_level_cols1 = ks.DataFrame([[1, 2], [2, 4]], ... index=['cat', 'dog'], ... columns=multicol1) Stacking a dataframe with a multi-level column axis: >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE weight kg pounds cat 1 2 dog 2 4 >>> df_multi_level_cols1.stack().sort_index() weight cat kg 1 pounds 2 dog kg 2 pounds 4 **Missing values** >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'), ... ('height', 'm')]) >>> df_multi_level_cols2 = ks.DataFrame([[1.0, 2.0], [3.0, 4.0]], ... index=['cat', 'dog'], ... columns=multicol2) It is common to have missing values when stacking a dataframe with multi-level columns, as the stacked dataframe typically has more values than the original dataframe. Missing values are filled with NaNs: >>> df_multi_level_cols2 weight height kg m cat 1.0 2.0 dog 3.0 4.0 >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP height weight cat kg NaN 1.0 m 2.0 NaN dog kg NaN 3.0 m 4.0 NaN """ from databricks.koalas.series import _col if len(self._internal.column_labels) == 0: return DataFrame(self._internal.with_filter(F.lit(False))) column_labels = defaultdict(dict) index_values = set() should_returns_series = False for label in self._internal.column_labels: new_label = label[:-1] if len(new_label) == 0: new_label = ("0",) should_returns_series = True value = label[-1] scol = self._internal.spark_column_for(label) column_labels[new_label][value] = scol index_values.add(value) column_labels = OrderedDict(sorted(column_labels.items(), key=lambda x: x[0])) if self._internal.column_label_names is None: column_label_names = None index_name = None else: column_label_names = self._internal.column_label_names[:-1] if self._internal.column_label_names[-1] is None: index_name = None else: index_name = (self._internal.column_label_names[-1],) index_column = SPARK_INDEX_NAME_FORMAT(len(self._internal.index_map)) index_map = list(self._internal.index_map.items()) + [(index_column, index_name)] data_columns = [name_like_string(label) for label in column_labels] structs = [ F.struct( [F.lit(value).alias(index_column)] + [ ( column_labels[label][value] if value in column_labels[label] else F.lit(None) ).alias(name) for label, name in zip(column_labels, data_columns) ] ).alias(value) for value in index_values ] pairs = F.explode(F.array(structs)) sdf = self._sdf.withColumn("pairs", pairs) sdf = sdf.select( self._internal.index_spark_columns + [sdf["pairs"][index_column].alias(index_column)] + [sdf["pairs"][name].alias(name) for name in data_columns] ) internal = _InternalFrame( spark_frame=sdf, index_map=OrderedDict(index_map), column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names, ) kdf = DataFrame(internal) if should_returns_series: return _col(kdf) else: return kdf def unstack(self): """ Pivot the (necessarily hierarchical) index labels. Returns a DataFrame having a new level of column labels whose inner-most level consists of the pivoted index labels. If the index is not a MultiIndex, the output will be a Series. .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and it could cause a serious performance degradation since Spark partitions it row based. Returns ------- Series or DataFrame See Also -------- DataFrame.pivot : Pivot a table based on column values. DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack). Examples -------- >>> df = ks.DataFrame({"A": {"0": "a", "1": "b", "2": "c"}, ... "B": {"0": "1", "1": "3", "2": "5"}, ... "C": {"0": "2", "1": "4", "2": "6"}}, ... columns=["A", "B", "C"]) >>> df A B C 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() A 0 a 1 b 2 c B 0 1 1 3 2 5 C 0 2 1 4 2 6 Name: 0, dtype: object >>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) >>> df.unstack().sort_index() X A 0 a 1 b 2 c B 0 1 1 3 2 5 Y C 0 2 1 4 2 6 Name: 0, dtype: object For MultiIndex case: >>> df = ks.DataFrame({"A": ["a", "b", "c"], ... "B": [1, 3, 5], ... "C": [2, 4, 6]}, ... columns=["A", "B", "C"]) >>> df = df.set_index('A', append=True) >>> df # doctest: +NORMALIZE_WHITESPACE B C A 0 a 1 2 1 b 3 4 2 c 5 6 >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE B C A a b c a b c 0 1.0 NaN NaN 2.0 NaN NaN 1 NaN 3.0 NaN NaN 4.0 NaN 2 NaN NaN 5.0 NaN NaN 6.0 """ from databricks.koalas.series import _col if len(self._internal.index_spark_column_names) > 1: # The index after `reset_index()` will never be used, so use "distributed" index # as a dummy to avoid overhead. with option_context("compute.default_index_type", "distributed"): df = self.reset_index() index = df._internal.column_labels[: len(self._internal.index_spark_column_names) - 1] columns = df.columns[len(self._internal.index_spark_column_names) - 1] df = df.pivot_table( index=index, columns=columns, values=self._internal.column_labels, aggfunc="first" ) internal = df._internal.copy( index_map=OrderedDict( (index_column, name) for index_column, name in zip( df._internal.index_spark_column_names, self._internal.index_names[:-1] ) ), column_label_names=( df._internal.column_label_names[:-1] + [ None if self._internal.index_names[-1] is None else df._internal.column_label_names[-1] ] ), ) return DataFrame(internal) # TODO: Codes here are similar with melt. Should we deduplicate? column_labels = self._internal.column_labels ser_name = "0" sdf = self._sdf new_index_columns = [ SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level) ] new_index_map = [] if self._internal.column_label_names is not None: new_index_map.extend(zip(new_index_columns, self._internal.column_label_names)) else: new_index_map.extend(zip(new_index_columns, [None] * len(new_index_columns))) pairs = F.explode( F.array( *[ F.struct( *( [F.lit(c).alias(name) for c, name in zip(idx, new_index_columns)] + [self._internal.spark_column_for(idx).alias(ser_name)] ) ) for idx in column_labels ] ) ) columns = [ F.col("pairs.%s" % name) for name in new_index_columns[: self._internal.column_labels_level] ] + [F.col("pairs.%s" % ser_name)] new_index_len = len(new_index_columns) existing_index_columns = [] for i, index_name in enumerate(self._internal.index_names): new_index_map.append((SPARK_INDEX_NAME_FORMAT(i + new_index_len), index_name)) existing_index_columns.append( self._internal.index_spark_columns[i].alias( SPARK_INDEX_NAME_FORMAT(i + new_index_len) ) ) exploded_df = sdf.withColumn("pairs", pairs).select(existing_index_columns + columns) return _col(DataFrame(_InternalFrame(exploded_df, index_map=OrderedDict(new_index_map)))) # TODO: axis, skipna, and many arguments should be implemented. def all(self, axis: Union[int, str] = 0) -> bool: """ Return whether all elements are True. Returns True unless there is at least one element within a series that is False or equivalent (e.g. zero or empty) Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Examples -------- Create a dataframe from a dictionary. >>> df = ks.DataFrame({ ... 'col1': [True, True, True], ... 'col2': [True, False, False], ... 'col3': [0, 0, 0], ... 'col4': [1, 2, 3], ... 'col5': [True, True, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return a boolean. >>> df.all() col1 True col2 False col3 False col4 True col5 True col6 False Name: all, dtype: bool Returns ------- Series """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') applied = [] column_labels = self._internal.column_labels for label in column_labels: scol = self._internal.spark_column_for(label) all_col = F.min(F.coalesce(scol.cast("boolean"), F.lit(True))) applied.append(F.when(all_col.isNull(), True).otherwise(all_col)) # TODO: there is a similar logic to transpose in, for instance, # DataFrame.any, Series.quantile. Maybe we should deduplicate it. value_column = "value" cols = [] for label, applied_col in zip(column_labels, applied): cols.append( F.struct( [F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] + [applied_col.alias(value_column)] ) ) sdf = self._sdf.select(F.array(*cols).alias("arrays")).select(F.explode(F.col("arrays"))) sdf = sdf.selectExpr("col.*") index_column_name = lambda i: ( None if self._internal.column_label_names is None else (self._internal.column_label_names[i],) ) internal = self._internal.copy( spark_frame=sdf, index_map=OrderedDict( (SPARK_INDEX_NAME_FORMAT(i), index_column_name(i)) for i in range(self._internal.column_labels_level) ), column_labels=None, data_spark_columns=[scol_for(sdf, value_column)], column_label_names=None, ) return DataFrame(internal)[value_column].rename("all") # TODO: axis, skipna, and many arguments should be implemented. def any(self, axis: Union[int, str] = 0) -> bool: """ Return whether any element is True. Returns False unless there is at least one element within a series that is True or equivalent (e.g. non-zero or non-empty). Parameters ---------- axis : {0 or 'index'}, default 0 Indicate which axis or axes should be reduced. * 0 / 'index' : reduce the index, return a Series whose index is the original column labels. Examples -------- Create a dataframe from a dictionary. >>> df = ks.DataFrame({ ... 'col1': [False, False, False], ... 'col2': [True, False, False], ... 'col3': [0, 0, 1], ... 'col4': [0, 1, 2], ... 'col5': [False, False, None], ... 'col6': [True, False, None]}, ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6']) Default behaviour checks if column-wise values all return a boolean. >>> df.any() col1 False col2 True col3 True col4 True col5 False col6 True Name: any, dtype: bool Returns ------- Series """ axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') applied = [] column_labels = self._internal.column_labels for label in column_labels: scol = self._internal.spark_column_for(label) all_col = F.max(F.coalesce(scol.cast("boolean"), F.lit(False))) applied.append(F.when(all_col.isNull(), False).otherwise(all_col)) # TODO: there is a similar logic to transpose in, for instance, # DataFrame.all, Series.quantile. Maybe we should deduplicate it. value_column = "value" cols = [] for label, applied_col in zip(column_labels, applied): cols.append( F.struct( [F.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for i, col in enumerate(label)] + [applied_col.alias(value_column)] ) ) sdf = self._sdf.select(F.array(*cols).alias("arrays")).select(F.explode(F.col("arrays"))) sdf = sdf.selectExpr("col.*") index_column_name = lambda i: ( None if self._internal.column_label_names is None else (self._internal.column_label_names[i],) ) internal = self._internal.copy( spark_frame=sdf, index_map=OrderedDict( (SPARK_INDEX_NAME_FORMAT(i), index_column_name(i)) for i in range(self._internal.column_labels_level) ), column_labels=None, data_spark_columns=[scol_for(sdf, value_column)], column_label_names=None, ) return DataFrame(internal)[value_column].rename("any") # TODO: add axis, numeric_only, pct, na_option parameter def rank(self, method="average", ascending=True): """ Compute numerical data ranks (1 through n) along axis. Equal values are assigned a rank that is the average of the ranks of those values. .. note:: the current implementation of rank uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- method : {'average', 'min', 'max', 'first', 'dense'} * average: average rank of group * min: lowest rank in group * max: highest rank in group * first: ranks assigned in order they appear in the array * dense: like 'min', but rank always increases by 1 between groups ascending : boolean, default True False for ranks by high (1) to low (N) Returns ------- ranks : same type as caller Examples -------- >>> df = ks.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns= ['A', 'B']) >>> df A B 0 1 4 1 2 3 2 2 2 3 3 1 >>> df.rank().sort_index() A B 0 1.0 4.0 1 2.5 3.0 2 2.5 2.0 3 4.0 1.0 If method is set to 'min', it use lowest rank in group. >>> df.rank(method='min').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 4.0 1.0 If method is set to 'max', it use highest rank in group. >>> df.rank(method='max').sort_index() A B 0 1.0 4.0 1 3.0 3.0 2 3.0 2.0 3 4.0 1.0 If method is set to 'dense', it leaves no gaps in group. >>> df.rank(method='dense').sort_index() A B 0 1.0 4.0 1 2.0 3.0 2 2.0 2.0 3 3.0 1.0 """ return self._apply_series_op(lambda kser: kser.rank(method=method, ascending=ascending)) def filter(self, items=None, like=None, regex=None, axis=None): """ Subset rows or columns of dataframe according to labels in the specified index. Note that this routine does not filter a dataframe on its contents. The filter is applied to the labels of the index. Parameters ---------- items : list-like Keep labels from axis which are in items. like : string Keep labels from axis for which "like in label == True". regex : string (regular expression) Keep labels from axis for which re.search(regex, label) == True. axis : int or string axis name The axis to filter on. By default this is the info axis, 'index' for Series, 'columns' for DataFrame. Returns ------- same type as input object See Also -------- DataFrame.loc Notes ----- The ``items``, ``like``, and ``regex`` parameters are enforced to be mutually exclusive. ``axis`` defaults to the info axis that is used when indexing with ``[]``. Examples -------- >>> df = ks.DataFrame(np.array(([1, 2, 3], [4, 5, 6])), ... index=['mouse', 'rabbit'], ... columns=['one', 'two', 'three']) >>> # select columns by name >>> df.filter(items=['one', 'three']) one three mouse 1 3 rabbit 4 6 >>> # select columns by regular expression >>> df.filter(regex='e$', axis=1) one three mouse 1 3 rabbit 4 6 >>> # select rows containing 'bbi' >>> df.filter(like='bbi', axis=0) one two three rabbit 4 5 6 """ if sum(x is not None for x in (items, like, regex)) > 1: raise TypeError( "Keyword arguments `items`, `like`, or `regex` " "are mutually exclusive" ) axis = validate_axis(axis, none_axis=1) index_scols = self._internal.index_spark_columns if items is not None: if is_list_like(items): items = list(items) else: raise ValueError("items should be a list-like object.") if axis == 0: # TODO: support multi-index here if len(index_scols) != 1: raise ValueError("Single index must be specified.") col = None for item in items: if col is None: col = index_scols[0] == F.lit(item) else: col = col | (index_scols[0] == F.lit(item)) return DataFrame(self._internal.with_filter(col)) elif axis == 1: return self[items] elif like is not None: if axis == 0: # TODO: support multi-index here if len(index_scols) != 1: raise ValueError("Single index must be specified.") return DataFrame(self._internal.with_filter(index_scols[0].contains(like))) elif axis == 1: column_labels = self._internal.column_labels output_labels = [label for label in column_labels if any(like in i for i in label)] return self[output_labels] elif regex is not None: if axis == 0: # TODO: support multi-index here if len(index_scols) != 1: raise ValueError("Single index must be specified.") return DataFrame(self._internal.with_filter(index_scols[0].rlike(regex))) elif axis == 1: column_labels = self._internal.column_labels matcher = re.compile(regex) output_labels = [ label for label in column_labels if any(matcher.search(i) is not None for i in label) ] return self[output_labels] else: raise TypeError("Must pass either `items`, `like`, or `regex`") def rename( self, mapper=None, index=None, columns=None, axis="index", inplace=False, level=None, errors="ignore", ): """ Alter axes labels. Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series will be left as-is. Extra labels listed don’t throw an error. Parameters ---------- mapper : dict-like or function Dict-like or functions transformations to apply to that axis’ values. Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index` and `columns`. index : dict-like or function Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper"). columns : dict-like or function Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper"). axis : int or str, default 'index' Axis to target with mapper. Can be either the axis name ('index', 'columns') or number (0, 1). inplace : bool, default False Whether to return a new DataFrame. level : int or level name, default None In case of a MultiIndex, only rename labels in the specified level. errors : {'ignore', 'raise}, default 'ignore' If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns` contains labels that are not present in the Index being transformed. If 'ignore', existing keys will be renamed and extra keys will be ignored. Returns ------- DataFrame with the renamed axis labels. Raises: ------- `KeyError` If any of the labels is not found in the selected axis and "errors='raise'". Examples -------- >>> kdf1 = ks.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}) >>> kdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE a c 0 1 4 1 2 5 2 3 6 >>> kdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> def str_lower(s) -> str: ... return str.lower(s) >>> kdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE a b 0 1 4 1 2 5 2 3 6 >>> def mul10(x) -> int: ... return x * 10 >>> kdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE A B 0 1 4 10 2 5 20 3 6 >>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')]) >>> kdf2 = ks.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx) >>> kdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE x y A B C D 0 1 2 3 4 1 5 6 7 8 >>> kdf3 = ks.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab')) >>> kdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE a b x a 1 2 b 3 4 y c 5 6 d 7 8 """ def gen_mapper_fn(mapper): if isinstance(mapper, dict): if len(mapper) == 0: if errors == "raise": raise KeyError("Index include label which is not in the `mapper`.") else: return DataFrame(self._internal) type_set = set(map(lambda x: type(x), mapper.values())) if len(type_set) > 1: raise ValueError("Mapper dict should have the same value type.") spark_return_type = as_spark_type(list(type_set)[0]) def mapper_fn(x): if x in mapper: return mapper[x] else: if errors == "raise": raise KeyError("Index include value which is not in the `mapper`") return x elif callable(mapper): spark_return_type = infer_return_type(mapper).tpe def mapper_fn(x): return mapper(x) else: raise ValueError( "`mapper` or `index` or `columns` should be " "either dict-like or function type." ) return mapper_fn, spark_return_type index_mapper_fn = None index_mapper_ret_stype = None columns_mapper_fn = None inplace = validate_bool_kwarg(inplace, "inplace") if mapper: axis = validate_axis(axis) if axis == 0: index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(mapper) elif axis == 1: columns_mapper_fn, columns_mapper_ret_stype = gen_mapper_fn(mapper) else: raise ValueError( "argument axis should be either the axis name " "(‘index’, ‘columns’) or number (0, 1)" ) else: if index: index_mapper_fn, index_mapper_ret_stype = gen_mapper_fn(index) if columns: columns_mapper_fn, _ = gen_mapper_fn(columns) if not index and not columns: raise ValueError("Either `index` or `columns` should be provided.") internal = self._internal if index_mapper_fn: # rename index labels, if `level` is None, rename all index columns, otherwise only # rename the corresponding level index. # implement this by transform the underlying spark dataframe, # Example: # suppose the kdf index column in underlying spark dataframe is "index_0", "index_1", # if rename level 0 index labels, will do: # ``kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0"))`` # if rename all index labels (`level` is None), then will do: # ``` # kdf._sdf.withColumn("index_0", mapper_fn_udf(col("index_0")) # .withColumn("index_1", mapper_fn_udf(col("index_1")) # ``` index_columns = internal.index_spark_column_names num_indices = len(index_columns) if level: if level < 0 or level >= num_indices: raise ValueError("level should be an integer between [0, num_indices)") def gen_new_index_column(level): index_col_name = index_columns[level] index_mapper_udf = pandas_udf( lambda s: s.map(index_mapper_fn), returnType=index_mapper_ret_stype ) return index_mapper_udf(scol_for(internal.spark_frame, index_col_name)) sdf = internal.spark_frame if level is None: for i in range(num_indices): sdf = sdf.withColumn(index_columns[i], gen_new_index_column(i)) else: sdf = sdf.withColumn(index_columns[level], gen_new_index_column(level)) internal = internal.with_new_sdf(sdf) if columns_mapper_fn: # rename column name. # Will modify the `_internal._column_labels` and transform underlying spark dataframe # to the same column name with `_internal._column_labels`. if level: if level < 0 or level >= internal.column_labels_level: raise ValueError("level should be an integer between [0, column_labels_level)") def gen_new_column_labels_entry(column_labels_entry): if isinstance(column_labels_entry, tuple): if level is None: # rename all level columns return tuple(map(columns_mapper_fn, column_labels_entry)) else: # only rename specified level column entry_list = list(column_labels_entry) entry_list[level] = columns_mapper_fn(entry_list[level]) return tuple(entry_list) else: return columns_mapper_fn(column_labels_entry) new_column_labels = list(map(gen_new_column_labels_entry, internal.column_labels)) if internal.column_labels_level == 1: new_data_columns = [col[0] for col in new_column_labels] else: new_data_columns = [str(col) for col in new_column_labels] new_data_scols = [ scol_for(internal.spark_frame, old_col_name).alias(new_col_name) for old_col_name, new_col_name in zip( internal.data_spark_column_names, new_data_columns ) ] internal = internal.with_new_columns(new_data_scols, column_labels=new_column_labels) if inplace: self._internal = internal return self else: return DataFrame(internal) def keys(self): """ Return alias for columns. Returns ------- Index Columns of the DataFrame. Examples -------- >>> df = ks.DataFrame([[1, 2], [4, 5], [7, 8]], ... index=['cobra', 'viper', 'sidewinder'], ... columns=['max_speed', 'shield']) >>> df max_speed shield cobra 1 2 viper 4 5 sidewinder 7 8 >>> df.keys() Index(['max_speed', 'shield'], dtype='object') """ return self.columns def pct_change(self, periods=1): """ Percentage change between the current and a prior element. .. note:: the current implementation of this API uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- periods : int, default 1 Periods to shift for forming percent change. Returns ------- DataFrame Examples -------- Percentage change in French franc, Deutsche Mark, and Italian lira from 1980-01-01 to 1980-03-01. >>> df = ks.DataFrame({ ... 'FR': [4.0405, 4.0963, 4.3149], ... 'GR': [1.7246, 1.7482, 1.8519], ... 'IT': [804.74, 810.01, 860.13]}, ... index=['1980-01-01', '1980-02-01', '1980-03-01']) >>> df FR GR IT 1980-01-01 4.0405 1.7246 804.74 1980-02-01 4.0963 1.7482 810.01 1980-03-01 4.3149 1.8519 860.13 >>> df.pct_change() FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 0.013810 0.013684 0.006549 1980-03-01 0.053365 0.059318 0.061876 You can set periods to shift for forming percent change >>> df.pct_change(2) FR GR IT 1980-01-01 NaN NaN NaN 1980-02-01 NaN NaN NaN 1980-03-01 0.067912 0.073814 0.06883 """ window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween(-periods, -periods) def op(kser): prev_row = F.lag(kser.spark_column, periods).over(window) return ((kser.spark_column - prev_row) / prev_row).alias( kser._internal.data_spark_column_names[0] ) return self._apply_series_op(op) # TODO: axis = 1 def idxmax(self, axis=0): """ Return index of first occurrence of maximum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with maximum value using `to_pandas()` because we suppose the number of rows with max values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmax Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmax() a 2 b 0 c 2 Name: 0, dtype: int64 For Multi-column Index >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmax().sort_index() a x 2 b y 0 c z 2 Name: 0, dtype: int64 """ max_cols = map(lambda scol: F.max(scol), self._internal.data_spark_columns) sdf_max = self._sdf.select(*max_cols).head() # `sdf_max` looks like below # +------+------+------+ # |(a, x)|(b, y)|(c, z)| # +------+------+------+ # | 3| 4.0| 400| # +------+------+------+ conds = ( scol == max_val for scol, max_val in zip(self._internal.data_spark_columns, sdf_max) ) cond = reduce(lambda x, y: x | y, conds) kdf = DataFrame(self._internal.with_filter(cond)) pdf = kdf.to_pandas() return ks.from_pandas(pdf.idxmax()) # TODO: axis = 1 def idxmin(self, axis=0): """ Return index of first occurrence of minimum over requested axis. NA/null values are excluded. .. note:: This API collect all rows with minimum value using `to_pandas()` because we suppose the number of rows with min values are usually small in general. Parameters ---------- axis : 0 or 'index' Can only be set to 0 at the moment. Returns ------- Series See Also -------- Series.idxmin Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf a b c 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmin() a 0 b 3 c 1 Name: 0, dtype: int64 For Multi-column Index >>> kdf = ks.DataFrame({'a': [1, 2, 3, 2], ... 'b': [4.0, 2.0, 3.0, 1.0], ... 'c': [300, 200, 400, 200]}) >>> kdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> kdf a b c x y z 0 1 4.0 300 1 2 2.0 200 2 3 3.0 400 3 2 1.0 200 >>> kdf.idxmin().sort_index() a x 0 b y 3 c z 1 Name: 0, dtype: int64 """ min_cols = map(lambda scol: F.min(scol), self._internal.data_spark_columns) sdf_min = self._sdf.select(*min_cols).head() conds = ( scol == min_val for scol, min_val in zip(self._internal.data_spark_columns, sdf_min) ) cond = reduce(lambda x, y: x | y, conds) kdf = DataFrame(self._internal.with_filter(cond)) pdf = kdf.to_pandas() return ks.from_pandas(pdf.idxmin()) def info(self, verbose=None, buf=None, max_cols=None, null_counts=None): """ Print a concise summary of a DataFrame. This method prints information about a DataFrame including the index dtype and column dtypes, non-null values and memory usage. Parameters ---------- verbose : bool, optional Whether to print the full summary. buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. max_cols : int, optional When to switch from the verbose to the truncated output. If the DataFrame has more than `max_cols` columns, the truncated output is used. null_counts : bool, optional Whether to show the non-null counts. Returns ------- None This method prints a summary of a DataFrame and returns None. See Also -------- DataFrame.describe: Generate descriptive statistics of DataFrame columns. Examples -------- >>> int_values = [1, 2, 3, 4, 5] >>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon'] >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0] >>> df = ks.DataFrame( ... {"int_col": int_values, "text_col": text_values, "float_col": float_values}, ... columns=['int_col', 'text_col', 'float_col']) >>> df int_col text_col float_col 0 1 alpha 0.00 1 2 beta 0.25 2 3 gamma 0.50 3 4 delta 0.75 4 5 epsilon 1.00 Prints information of all columns: >>> df.info(verbose=True) # doctest: +SKIP <class 'databricks.koalas.frame.DataFrame'> Index: 5 entries, 0 to 4 Data columns (total 3 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 int_col 5 non-null int64 1 text_col 5 non-null object 2 float_col 5 non-null float64 dtypes: float64(1), int64(1), object(1) Prints a summary of columns count and its dtypes but not per column information: >>> df.info(verbose=False) # doctest: +SKIP <class 'databricks.koalas.frame.DataFrame'> Index: 5 entries, 0 to 4 Columns: 3 entries, int_col to float_col dtypes: float64(1), int64(1), object(1) Pipe output of DataFrame.info to buffer instead of sys.stdout, get buffer content and writes to a text file: >>> import io >>> buffer = io.StringIO() >>> df.info(buf=buffer) >>> s = buffer.getvalue() >>> with open('%s/info.txt' % path, "w", ... encoding="utf-8") as f: ... _ = f.write(s) >>> with open('%s/info.txt' % path) as f: ... f.readlines() # doctest: +SKIP ["<class 'databricks.koalas.frame.DataFrame'>\\n", 'Index: 5 entries, 0 to 4\\n', 'Data columns (total 3 columns):\\n', ' # Column Non-Null Count Dtype \\n', '--- ------ -------------- ----- \\n', ' 0 int_col 5 non-null int64 \\n', ' 1 text_col 5 non-null object \\n', ' 2 float_col 5 non-null float64\\n', 'dtypes: float64(1), int64(1), object(1)'] """ # To avoid pandas' existing config affects Koalas. # TODO: should we have corresponding Koalas configs? with pd.option_context( "display.max_info_columns", sys.maxsize, "display.max_info_rows", sys.maxsize ): try: # hack to use pandas' info as is. self._data = self count_func = self.count self.count = lambda: count_func().to_pandas() return pd.DataFrame.info( self, verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts, ) finally: del self._data self.count = count_func # TODO: fix parameter 'axis' and 'numeric_only' to work same as pandas' def quantile(self, q=0.5, axis=0, numeric_only=True, accuracy=10000): """ Return value at the given quantile. .. note:: Unlike pandas', the quantile in Koalas is an approximated quantile based upon approximate percentile computation because computing quantile across a large dataset is extremely expensive. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) 0 <= q <= 1, the quantile(s) to compute. axis : int, default 0 or 'index' Can only be set to 0 at the moment. numeric_only : bool, default True If False, the quantile of datetime and timedelta data will be computed as well. Can only be set to True at the moment. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- Series or DataFrame If q is an array, a DataFrame will be returned where the index is q, the columns are the columns of self, and the values are the quantiles. If q is a float, a Series will be returned where the index is the columns of self and the values are the quantiles. Examples -------- >>> kdf = ks.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]}) >>> kdf a b 0 1 6 1 2 7 2 3 8 3 4 9 4 5 0 >>> kdf.quantile(.5) a 3 b 7 Name: 0.5, dtype: int64 >>> kdf.quantile([.25, .5, .75]) a b 0.25 2 6 0.5 3 7 0.75 4 8 """ result_as_series = False axis = validate_axis(axis) if axis != 0: raise NotImplementedError('axis should be either 0 or "index" currently.') if numeric_only is not True: raise NotImplementedError("quantile currently doesn't supports numeric_only") if isinstance(q, float): result_as_series = True key = str(q) q = (q,) quantiles = q # First calculate the percentiles from all columns and map it to each `quantiles` # by creating each entry as a struct. So, it becomes an array of structs as below: # # +-----------------------------------------+ # | arrays| # +-----------------------------------------+ # |[[0.25, 2, 6], [0.5, 3, 7], [0.75, 4, 8]]| # +-----------------------------------------+ sdf = self._sdf args = ", ".join(map(str, quantiles)) percentile_cols = [] for column in self._internal.data_spark_column_names: percentile_cols.append( F.expr("approx_percentile(`%s`, array(%s), %s)" % (column, args, accuracy)).alias( column ) ) sdf = sdf.select(percentile_cols) # Here, after select percntile cols, a spark_frame looks like below: # +---------+---------+ # | a| b| # +---------+---------+ # |[2, 3, 4]|[6, 7, 8]| # +---------+---------+ cols_dict = OrderedDict() for column in self._internal.data_spark_column_names: cols_dict[column] = list() for i in range(len(quantiles)): cols_dict[column].append(scol_for(sdf, column).getItem(i).alias(column)) internal_index_column = SPARK_DEFAULT_INDEX_NAME cols = [] for i, col in enumerate(zip(*cols_dict.values())): cols.append(F.struct(F.lit("%s" % quantiles[i]).alias(internal_index_column), *col)) sdf = sdf.select(F.array(*cols).alias("arrays")) # And then, explode it and manually set the index. # +-----------------+---+---+ # |__index_level_0__| a| b| # +-----------------+---+---+ # | 0.25| 2| 6| # | 0.5| 3| 7| # | 0.75| 4| 8| # +-----------------+---+---+ sdf = sdf.select(F.explode(F.col("arrays"))).selectExpr("col.*") internal = self._internal.copy( spark_frame=sdf, data_spark_columns=[ scol_for(sdf, col) for col in self._internal.data_spark_column_names ], index_map=OrderedDict({internal_index_column: None}), column_labels=self._internal.column_labels, column_label_names=None, ) return DataFrame(internal) if not result_as_series else DataFrame(internal).T[key] def query(self, expr, inplace=False): """ Query the columns of a DataFrame with a boolean expression. .. note:: Internal columns that starting with a '__' prefix are able to access, however, they are not supposed to be accessed. .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the pandas specific syntax such as `@` is not supported. If you want the pandas syntax, you can work around with :meth:`DataFrame.apply_batch`, but you should be aware that `query_func` will be executed at different nodes in a distributed manner. So, for example, to use `@` syntax, make sure the variable is serialized by, for example, putting it within the closure as below. >>> df = ks.DataFrame({'A': range(2000), 'B': range(2000)}) >>> def query_func(pdf): ... num = 1995 ... return pdf.query('A > @num') >>> df.apply_batch(query_func) A B 1996 1996 1996 1997 1997 1997 1998 1998 1998 1999 1999 1999 Parameters ---------- expr : str The query string to evaluate. You can refer to column names that contain spaces by surrounding them in backticks. For example, if one of your columns is called ``a a`` and you want to sum it with ``b``, your query should be ```a a` + b``. inplace : bool Whether the query should modify the data in place or return a modified copy. Returns ------- DataFrame DataFrame resulting from the provided query expression. Examples -------- >>> df = ks.DataFrame({'A': range(1, 6), ... 'B': range(10, 0, -2), ... 'C C': range(10, 5, -1)}) >>> df A B C C 0 1 10 10 1 2 8 9 2 3 6 8 3 4 4 7 4 5 2 6 >>> df.query('A > B') A B C C 4 5 2 6 The previous expression is equivalent to >>> df[df.A > df.B] A B C C 4 5 2 6 For columns with spaces in their name, you can use backtick quoting. >>> df.query('B == `C C`') A B C C 0 1 10 10 The previous expression is equivalent to >>> df[df.B == df['C C']] A B C C 0 1 10 10 """ if isinstance(self.columns, pd.MultiIndex): raise ValueError("Doesn't support for MultiIndex columns") if not isinstance(expr, str): raise ValueError("expr must be a string to be evaluated, {} given".format(type(expr))) inplace = validate_bool_kwarg(inplace, "inplace") data_columns = [label[0] for label in self._internal.column_labels] sdf = self._sdf.select( self._internal.index_spark_columns + [ scol.alias(col) for scol, col in zip(self._internal.data_spark_columns, data_columns) ] ).filter(expr) internal = self._internal.with_new_sdf(sdf, data_columns=data_columns) if inplace: self._internal = internal else: return DataFrame(internal) def explain(self, extended: Optional[bool] = None, mode: Optional[str] = None): """ Prints the underlying (logical and physical) Spark plans to the console for debugging purpose. Parameters ---------- extended : boolean, default ``False``. If ``False``, prints only the physical plan. mode : string, default ``None``. The expected output format of plans. Examples -------- >>> df = ks.DataFrame({'id': range(10)}) >>> df.explain() # doctest: +ELLIPSIS == Physical Plan == ... >>> df.explain(True) # doctest: +ELLIPSIS == Parsed Logical Plan == ... == Analyzed Logical Plan == ... == Optimized Logical Plan == ... == Physical Plan == ... >>> df.explain(mode="extended") # doctest: +ELLIPSIS == Parsed Logical Plan == ... == Analyzed Logical Plan == ... == Optimized Logical Plan == ... == Physical Plan == ... """ if LooseVersion(pyspark.__version__) < LooseVersion("3.0"): if mode is not None: if extended is not None: raise Exception("extended and mode can not be specified simultaneously") elif mode == "simple": extended = False elif mode == "extended": extended = True else: raise ValueError( "Unknown explain mode: {}. Accepted explain modes are " "'simple', 'extended'.".format(mode) ) if extended is None: extended = False self._internal.to_internal_spark_frame.explain(extended) else: self._internal.to_internal_spark_frame.explain(extended, mode) def take(self, indices, axis=0, **kwargs): """ Return the elements in the given *positional* indices along an axis. This means that we are not indexing according to actual values in the index attribute of the object. We are indexing according to the actual position of the element in the object. Parameters ---------- indices : array-like An array of ints indicating which positions to take. axis : {0 or 'index', 1 or 'columns', None}, default 0 The axis on which to select elements. ``0`` means that we are selecting rows, ``1`` means that we are selecting columns. **kwargs For compatibility with :meth:`numpy.take`. Has no effect on the output. Returns ------- taken : same type as caller An array-like containing the elements taken from the object. See Also -------- DataFrame.loc : Select a subset of a DataFrame by labels. DataFrame.iloc : Select a subset of a DataFrame by positions. numpy.take : Take elements from an array along an axis. Examples -------- >>> df = ks.DataFrame([('falcon', 'bird', 389.0), ... ('parrot', 'bird', 24.0), ... ('lion', 'mammal', 80.5), ... ('monkey', 'mammal', np.nan)], ... columns=['name', 'class', 'max_speed'], ... index=[0, 2, 3, 1]) >>> df name class max_speed 0 falcon bird 389.0 2 parrot bird 24.0 3 lion mammal 80.5 1 monkey mammal NaN Take elements at positions 0 and 3 along the axis 0 (default). Note how the actual indices selected (0 and 1) do not correspond to our selected indices 0 and 3. That's because we are selecting the 0th and 3rd rows, not rows whose indices equal 0 and 3. >>> df.take([0, 3]).sort_index() name class max_speed 0 falcon bird 389.0 1 monkey mammal NaN Take elements at indices 1 and 2 along the axis 1 (column selection). >>> df.take([1, 2], axis=1) class max_speed 0 bird 389.0 2 bird 24.0 3 mammal 80.5 1 mammal NaN We may take elements using negative integers for positive indices, starting from the end of the object, just like with Python lists. >>> df.take([-1, -2]).sort_index() name class max_speed 1 monkey mammal NaN 3 lion mammal 80.5 """ axis = validate_axis(axis) if not is_list_like(indices) or isinstance(indices, (dict, set)): raise ValueError("`indices` must be a list-like except dict or set") if axis == 0: return self.iloc[indices, :] elif axis == 1: return self.iloc[:, indices] def eval(self, expr, inplace=False): """ Evaluate a string describing operations on DataFrame columns. Operates on columns only, not specific rows or elements. This allows `eval` to run arbitrary code, which can make you vulnerable to code injection if you pass user input to this function. Parameters ---------- expr : str The expression string to evaluate. inplace : bool, default False If the expression contains an assignment, whether to perform the operation inplace and mutate the existing DataFrame. Otherwise, a new DataFrame is returned. Returns ------- The result of the evaluation. See Also -------- DataFrame.query : Evaluates a boolean expression to query the columns of a frame. DataFrame.assign : Can evaluate an expression or function to create new values for a column. eval : Evaluate a Python expression as a string using various backends. Examples -------- >>> df = ks.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)}) >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 >>> df.eval('A + B') 0 11 1 10 2 9 3 8 4 7 Name: 0, dtype: int64 Assignment is allowed though by default the original DataFrame is not modified. >>> df.eval('C = A + B') A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 >>> df A B 0 1 10 1 2 8 2 3 6 3 4 4 4 5 2 Use ``inplace=True`` to modify the original DataFrame. >>> df.eval('C = A + B', inplace=True) >>> df A B C 0 1 10 11 1 2 8 10 2 3 6 9 3 4 4 8 4 5 2 7 """ from databricks.koalas.series import _col if isinstance(self.columns, pd.MultiIndex): raise ValueError("`eval` is not supported for multi-index columns") inplace = validate_bool_kwarg(inplace, "inplace") should_return_series = False should_return_scalar = False # Since `eva_func` doesn't have a type hint, inferring the schema is always preformed # in the `apply_batch`. Hence, the variables `is_seires` and `is_scalar_` can be updated. def eval_func(pdf): nonlocal should_return_series nonlocal should_return_scalar result_inner = pdf.eval(expr, inplace=inplace) if inplace: result_inner = pdf if isinstance(result_inner, pd.Series): should_return_series = True result_inner = result_inner.to_frame() elif is_scalar(result_inner): should_return_scalar = True result_inner = pd.Series(result_inner).to_frame() return result_inner result = self.apply_batch(eval_func) if inplace: # Here, the result is always a frame because the error is thrown during schema inference # from pandas. self._internal = result._internal elif should_return_series: return _col(result) elif should_return_scalar: return _col(result)[0] else: # Returns a frame return result def _to_internal_pandas(self): """ Return a pandas DataFrame directly from _internal to avoid overhead of copy. This method is for internal use only. """ return self._internal.to_pandas_frame def __repr__(self): max_display_count = get_option("display.max_rows") if max_display_count is None: return self._to_internal_pandas().to_string() pdf = self.head(max_display_count + 1)._to_internal_pandas() pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_string = pdf.to_string(show_dimensions=True) match = REPR_PATTERN.search(repr_string) if match is not None: nrows = match.group("rows") ncols = match.group("columns") footer = "\n\n[Showing only the first {nrows} rows x {ncols} columns]".format( nrows=nrows, ncols=ncols ) return REPR_PATTERN.sub(footer, repr_string) return pdf.to_string() def _repr_html_(self): max_display_count = get_option("display.max_rows") # pandas 0.25.1 has a regression about HTML representation so 'bold_rows' # has to be set as False explicitly. See https://github.com/pandas-dev/pandas/issues/28204 bold_rows = not (LooseVersion("0.25.1") == LooseVersion(pd.__version__)) if max_display_count is None: return self._to_internal_pandas().to_html(notebook=True, bold_rows=bold_rows) pdf = self.head(max_display_count + 1)._to_internal_pandas() pdf_length = len(pdf) pdf = pdf.iloc[:max_display_count] if pdf_length > max_display_count: repr_html = pdf.to_html(show_dimensions=True, notebook=True, bold_rows=bold_rows) match = REPR_HTML_PATTERN.search(repr_html) if match is not None: nrows = match.group("rows") ncols = match.group("columns") by = chr(215) footer = ( "\n<p>Showing only the first {rows} rows " "{by} {cols} columns</p>\n</div>".format(rows=nrows, by=by, cols=ncols) ) return REPR_HTML_PATTERN.sub(footer, repr_html) return pdf.to_html(notebook=True, bold_rows=bold_rows) def __getitem__(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, (str, tuple, list)): return self.loc[:, key] elif isinstance(key, slice): if any(type(n) == int or None for n in [key.start, key.stop]): # Seems like pandas Frame always uses int as positional search when slicing # with ints. return self.iloc[key] return self.loc[key] elif isinstance(key, Series): return self.loc[key.astype(bool)] raise NotImplementedError(key) def __setitem__(self, key, value): from databricks.koalas.series import Series if (isinstance(value, Series) and value._kdf is not self) or ( isinstance(value, DataFrame) and value is not self ): # Different Series or DataFrames key = self._index_normalized_label(key) value = self._index_normalized_frame(value) def assign_columns(kdf, this_column_labels, that_column_labels): assert len(key) == len(that_column_labels) # Note that here intentionally uses `zip_longest` that combine # that_columns. for k, this_label, that_label in zip_longest( key, this_column_labels, that_column_labels ): yield (kdf._kser_for(that_label), tuple(["that", *k])) if this_label is not None and this_label[1:] != k: yield (kdf._kser_for(this_label), this_label) kdf = align_diff_frames(assign_columns, self, value, fillna=False, how="left") elif isinstance(key, list): assert isinstance(value, DataFrame) # Same DataFrames. field_names = value.columns kdf = self._assign({k: value[c] for k, c in zip(key, field_names)}) else: # Same Series. kdf = self._assign({key: value}) self._internal = kdf._internal def _index_normalized_label(self, labels): """ Returns a label that is normalized against the current column index level. For example, the key "abc" can be ("abc", "", "") if the current Frame has a multi-index for its column """ level = self._internal.column_labels_level if isinstance(labels, str): labels = [(labels,)] elif isinstance(labels, tuple): labels = [labels] else: labels = [k if isinstance(k, tuple) else (k,) for k in labels] if any(len(label) > level for label in labels): raise KeyError( "Key length ({}) exceeds index depth ({})".format( max(len(label) for label in labels), level ) ) return [tuple(list(label) + ([""] * (level - len(label)))) for label in labels] def _index_normalized_frame(self, kser_or_kdf): """ Returns a frame that is normalized against the current column index level. For example, the name in `pd.Series([...], name="abc")` can be can be ("abc", "", "") if the current DataFrame has a multi-index for its column """ from databricks.koalas.series import Series level = self._internal.column_labels_level if isinstance(kser_or_kdf, Series): kdf = kser_or_kdf.to_frame() else: assert isinstance(kser_or_kdf, DataFrame), type(kser_or_kdf) kdf = kser_or_kdf.copy() kdf.columns = pd.MultiIndex.from_tuples( [ tuple([name_like_string(label)] + ([""] * (level - 1))) for label in kdf._internal.column_labels ] ) return kdf def __getattr__(self, key: str) -> Any: if key.startswith("__"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): property_or_func = getattr(_MissingPandasLikeDataFrame, key) if isinstance(property_or_func, property): return property_or_func.fget(self) # type: ignore else: return partial(property_or_func, self) try: return self.loc[:, key] except KeyError: raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, key) ) def __len__(self): return self._sdf.count() def __dir__(self): fields = [f for f in self._sdf.schema.fieldNames() if " " not in f] return super(DataFrame, self).__dir__() + fields def __iter__(self): return iter(self.columns) # NDArray Compat def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any): # TODO: is it possible to deduplicate it with '_map_series_op'? if all(isinstance(inp, DataFrame) for inp in inputs) and any( inp is not inputs[0] for inp in inputs ): # binary only assert len(inputs) == 2 this = inputs[0] that = inputs[1] if this._internal.column_labels_level != that._internal.column_labels_level: raise ValueError("cannot join with no overlapping index names") # Different DataFrames def apply_op(kdf, this_column_labels, that_column_labels): for this_label, that_label in zip(this_column_labels, that_column_labels): yield ( ufunc(kdf._kser_for(this_label), kdf._kser_for(that_label), **kwargs), this_label, ) return align_diff_frames(apply_op, this, that, fillna=True, how="full") else: # DataFrame and Series applied = [] this = inputs[0] assert all(inp is this for inp in inputs if isinstance(inp, DataFrame)) for label in this._internal.column_labels: arguments = [] for inp in inputs: arguments.append(inp[label] if isinstance(inp, DataFrame) else inp) # both binary and unary. applied.append(ufunc(*arguments, **kwargs)) internal = this._internal.with_new_columns(applied) return DataFrame(internal) if sys.version_info >= (3, 7): def __class_getitem__(cls, params): # This is a workaround to support variadic generic in DataFrame in Python 3.7. # See https://github.com/python/typing/issues/193 # we always wraps the given type hints by a tuple to mimic the variadic generic. return super(cls, DataFrame).__class_getitem__(Tuple[params]) elif (3, 5) <= sys.version_info < (3, 7): # This is a workaround to support variadic generic in DataFrame in Python 3.5+ # The implementation is in its metaclass so this flag is needed to distinguish # Koalas DataFrame. is_dataframe = None def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a dataframe, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2 class _CachedDataFrame(DataFrame): """ Cached Koalas DataFrame, which corresponds to Pandas DataFrame logically, but internally it caches the corresponding Spark DataFrame. """ def __init__(self, internal, storage_level=None): if storage_level is None: self._cached = internal._sdf.cache() elif isinstance(storage_level, StorageLevel): self._cached = internal._sdf.persist(storage_level) else: raise TypeError( "Only a valid pyspark.StorageLevel type is acceptable for the `storage_level`" ) super(_CachedDataFrame, self).__init__(internal) def __enter__(self): return self def __exit__(self, exception_type, exception_value, traceback): self.unpersist() @property def storage_level(self): """ Return the storage level of this cache. Examples -------- >>> import pyspark >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df dogs cats 0 0.2 0.3 1 0.0 0.6 2 0.6 0.0 3 0.2 0.1 >>> with df.cache() as cached_df: ... print(cached_df.storage_level) ... Disk Memory Deserialized 1x Replicated Set the StorageLevel to `MEMORY_ONLY`. >>> with df.persist(pyspark.StorageLevel.MEMORY_ONLY) as cached_df: ... print(cached_df.storage_level) ... Memory Serialized 1x Replicated """ return self._cached.storageLevel def unpersist(self): """ The `unpersist` function is used to uncache the Koalas DataFrame when it is not used with `with` statement. Examples -------- >>> df = ks.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)], ... columns=['dogs', 'cats']) >>> df = df.cache() To uncache the dataframe, use `unpersist` function >>> df.unpersist() """ if self._cached.is_cached: self._cached.unpersist()
1
14,980
I'm wondering which we should use, stable or latest?
databricks-koalas
py
@@ -25,7 +25,8 @@ public abstract class BftExtraDataCodec { protected enum EncodingType { ALL, EXCLUDE_COMMIT_SEALS, - EXCLUDE_COMMIT_SEALS_AND_ROUND_NUMBER + EXCLUDE_COMMIT_SEALS_AND_ROUND_NUMBER, + EXCLUDE_CMS // TODO-lucas How can we achieve this w/o changing the BftExtraDataCodec base class } private static final Logger LOG = LogManager.getLogger();
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.consensus.common.bft; import org.hyperledger.besu.ethereum.core.BlockHeader; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.tuweni.bytes.Bytes; public abstract class BftExtraDataCodec { protected enum EncodingType { ALL, EXCLUDE_COMMIT_SEALS, EXCLUDE_COMMIT_SEALS_AND_ROUND_NUMBER } private static final Logger LOG = LogManager.getLogger(); public static int EXTRA_VANITY_LENGTH = 32; public Bytes encode(final BftExtraData bftExtraData) { return encode(bftExtraData, EncodingType.ALL); } public Bytes encodeWithoutCommitSeals(final BftExtraData bftExtraData) { return encode(bftExtraData, EncodingType.EXCLUDE_COMMIT_SEALS); } public Bytes encodeWithoutCommitSealsAndRoundNumber(final BftExtraData bftExtraData) { return encode(bftExtraData, EncodingType.EXCLUDE_COMMIT_SEALS_AND_ROUND_NUMBER); } protected abstract Bytes encode(final BftExtraData bftExtraData, final EncodingType encodingType); public BftExtraData decode(final BlockHeader blockHeader) { final Object inputExtraData = blockHeader.getParsedExtraData(); if (inputExtraData instanceof BftExtraData) { return (BftExtraData) inputExtraData; } LOG.warn( "Expected a BftExtraData instance but got {}. Reparsing required.", inputExtraData != null ? inputExtraData.getClass().getName() : "null"); return decodeRaw(blockHeader.getExtraData()); } public abstract BftExtraData decodeRaw(Bytes bytes); }
1
25,866
possibly extending EncodingType enum?
hyperledger-besu
java
@@ -26,6 +26,18 @@ describe "Selenium::WebDriver::TargetLocator" do end end + it "should switch to parent frame" do + driver.navigate.to url_for("iframes.html") + + iframe = driver.find_element(:tag_name => "iframe") + driver.switch_to.frame(iframe) + + driver.find_element(:name, 'login').should be_kind_of(WebDriver::Element) + + driver.switch_to.parent_frame + driver.find_element(:id, 'iframe_page_heading').should be_kind_of(WebDriver::Element) + end + # switching by name not yet supported by safari not_compliant_on :browser => [:ie, :iphone, :safari] do it "should switch to a window and back when given a block" do
1
require File.expand_path("../spec_helper", __FILE__) describe "Selenium::WebDriver::TargetLocator" do let(:wait) { Selenium::WebDriver::Wait.new } it "should find the active element" do driver.navigate.to url_for("xhtmlTest.html") driver.switch_to.active_element.should be_an_instance_of(WebDriver::Element) end not_compliant_on :browser => [:iphone] do it "should switch to a frame" do driver.navigate.to url_for("iframes.html") driver.switch_to.frame("iframe1") driver.find_element(:name, 'login').should be_kind_of(WebDriver::Element) end it "should switch to a frame by Element" do driver.navigate.to url_for("iframes.html") iframe = driver.find_element(:tag_name => "iframe") driver.switch_to.frame(iframe) driver.find_element(:name, 'login').should be_kind_of(WebDriver::Element) end end # switching by name not yet supported by safari not_compliant_on :browser => [:ie, :iphone, :safari] do it "should switch to a window and back when given a block" do driver.navigate.to url_for("xhtmlTest.html") driver.find_element(:link, "Open new window").click driver.title.should == "XHTML Test Page" driver.switch_to.window("result") do wait.until { driver.title == "We Arrive Here" } end wait.until { driver.title == "XHTML Test Page" } reset_driver! end it "should handle exceptions inside the block" do driver.navigate.to url_for("xhtmlTest.html") driver.find_element(:link, "Open new window").click driver.title.should == "XHTML Test Page" lambda { driver.switch_to.window("result") { raise "foo" } }.should raise_error(RuntimeError, "foo") driver.title.should == "XHTML Test Page" reset_driver! end it "should switch to a window" do driver.navigate.to url_for("xhtmlTest.html") driver.find_element(:link, "Open new window").click wait.until { driver.title == "XHTML Test Page" } driver.switch_to.window("result") wait.until { driver.title == "We Arrive Here" } reset_driver! end it "should use the original window if the block closes the popup" do driver.navigate.to url_for("xhtmlTest.html") driver.find_element(:link, "Open new window").click driver.title.should == "XHTML Test Page" driver.switch_to.window("result") do wait.until { driver.title == "We Arrive Here" } driver.close end driver.current_url.should include("xhtmlTest.html") driver.title.should == "XHTML Test Page" reset_driver! end end not_compliant_on :browser => [:android, :iphone, :safari] do it "should switch to default content" do driver.navigate.to url_for("iframes.html") driver.switch_to.frame 0 driver.switch_to.default_content driver.find_element(:id => "iframe_page_heading") end end describe "alerts" do not_compliant_on :browser => [:opera, :iphone, :safari, :phantomjs] do it "allows the user to accept an alert" do driver.navigate.to url_for("alerts.html") driver.find_element(:id => "alert").click driver.switch_to.alert.accept driver.title.should == "Testing Alerts" end end not_compliant_on({:browser => :chrome, :platform => :macosx}, # http://code.google.com/p/chromedriver/issues/detail?id=26 {:browser => :opera}, {:browser => :iphone}, {:browser => :safari}, {:browser => :phantomjs}) do it "allows the user to dismiss an alert" do driver.navigate.to url_for("alerts.html") driver.find_element(:id => "alert").click alert = wait_for_alert alert.dismiss driver.title.should == "Testing Alerts" end end not_compliant_on :browser => [:opera, :iphone, :safari, :phantomjs] do it "allows the user to set the value of a prompt" do driver.navigate.to url_for("alerts.html") driver.find_element(:id => "prompt").click alert = wait_for_alert alert.send_keys "cheese" alert.accept text = driver.find_element(:id => "text").text text.should == "cheese" end it "allows the user to get the text of an alert" do driver.navigate.to url_for("alerts.html") driver.find_element(:id => "alert").click alert = wait_for_alert text = alert.text alert.accept text.should == "cheese" end it "raises when calling #text on a closed alert" do driver.navigate.to url_for("alerts.html") driver.find_element(:id => "alert").click alert = wait_for_alert alert.accept expect { alert.text }.to raise_error(Selenium::WebDriver::Error::NoAlertPresentError) end end not_compliant_on :browser => [:ie, :opera, :iphone, :safari, :phantomjs] do it "raises NoAlertOpenError if no alert is present" do lambda { driver.switch_to.alert }.should raise_error( Selenium::WebDriver::Error::NoAlertPresentError, /alert|modal dialog/i) end end compliant_on :browser => [:firefox, :ie] do it "raises an UnhandledAlertError if an alert has not been dealt with" do driver.navigate.to url_for("alerts.html") driver.find_element(:id => "alert").click wait_for_alert lambda { driver.title }.should raise_error(Selenium::WebDriver::Error::UnhandledAlertError, /: "cheese"/) driver.title.should == "Testing Alerts" # :chrome does not auto-dismiss the alert end end end end
1
11,085
I tested it only in Firefox (`./go //rb:firefox-test`)
SeleniumHQ-selenium
js
@@ -27,7 +27,7 @@ using Process = OpenTelemetry.Exporter.Jaeger.Implementation.Process; namespace OpenTelemetry.Exporter.Jaeger { - public class JaegerExporter : BaseExporter<Activity> + public class JaegerExporter : BaseExporter<Activity>, IProviderContainer<TracerProvider> { private readonly int maxPayloadSizeInBytes; private readonly TProtocolFactory protocolFactory;
1
// <copyright file="JaegerExporter.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using System.Diagnostics; using System.Runtime.CompilerServices; using OpenTelemetry.Exporter.Jaeger.Implementation; using OpenTelemetry.Resources; using OpenTelemetry.Trace; using Thrift.Protocol; using Thrift.Transport; using Process = OpenTelemetry.Exporter.Jaeger.Implementation.Process; namespace OpenTelemetry.Exporter.Jaeger { public class JaegerExporter : BaseExporter<Activity> { private readonly int maxPayloadSizeInBytes; private readonly TProtocolFactory protocolFactory; private readonly TTransport clientTransport; private readonly JaegerThriftClient thriftClient; private readonly InMemoryTransport memoryTransport; private readonly TProtocol memoryProtocol; private Dictionary<string, Process> processCache; private int batchByteSize; private bool disposedValue; // To detect redundant dispose calls internal JaegerExporter(JaegerExporterOptions options, TTransport clientTransport = null) { if (options is null) { throw new ArgumentNullException(nameof(options)); } this.maxPayloadSizeInBytes = (!options.MaxPayloadSizeInBytes.HasValue || options.MaxPayloadSizeInBytes <= 0) ? JaegerExporterOptions.DefaultMaxPayloadSizeInBytes : options.MaxPayloadSizeInBytes.Value; this.protocolFactory = new TCompactProtocol.Factory(); this.clientTransport = clientTransport ?? new JaegerThriftClientTransport(options.AgentHost, options.AgentPort); this.thriftClient = new JaegerThriftClient(this.protocolFactory.GetProtocol(this.clientTransport)); this.memoryTransport = new InMemoryTransport(16000); this.memoryProtocol = this.protocolFactory.GetProtocol(this.memoryTransport); this.Process = new Process(options.ServiceName, options.ProcessTags); } internal Process Process { get; set; } internal Dictionary<string, Batch> CurrentBatches { get; } = new Dictionary<string, Batch>(); /// <inheritdoc/> public override ExportResult Export(in Batch<Activity> activityBatch) { try { foreach (var activity in activityBatch) { if (this.processCache == null) { this.ApplyLibraryResource(activity.GetResource()); } this.AppendSpan(activity.ToJaegerSpan()); } this.SendCurrentBatches(null); return ExportResult.Success; } catch (Exception ex) { JaegerExporterEventSource.Log.FailedExport(ex); return ExportResult.Failure; } } internal void ApplyLibraryResource(Resource libraryResource) { if (libraryResource is null) { throw new ArgumentNullException(nameof(libraryResource)); } var process = this.Process; string serviceName = null; string serviceNamespace = null; foreach (var label in libraryResource.Attributes) { string key = label.Key; if (label.Value is string strVal) { switch (key) { case Resource.ServiceNameKey: serviceName = strVal; continue; case Resource.ServiceNamespaceKey: serviceNamespace = strVal; continue; case Resource.LibraryNameKey: case Resource.LibraryVersionKey: continue; } } if (process.Tags == null) { process.Tags = new Dictionary<string, JaegerTag>(); } process.Tags[key] = label.ToJaegerTag(); } if (serviceName != null) { process.ServiceName = serviceNamespace != null ? serviceNamespace + "." + serviceName : serviceName; } if (string.IsNullOrEmpty(process.ServiceName)) { process.ServiceName = JaegerExporterOptions.DefaultServiceName; } this.Process.Message = this.BuildThriftMessage(this.Process).ToArray(); this.processCache = new Dictionary<string, Process> { [this.Process.ServiceName] = this.Process, }; } [MethodImpl(MethodImplOptions.AggressiveInlining)] internal void AppendSpan(JaegerSpan jaegerSpan) { var spanServiceName = jaegerSpan.PeerServiceName ?? this.Process.ServiceName; if (!this.processCache.TryGetValue(spanServiceName, out var spanProcess)) { spanProcess = new Process(spanServiceName, this.Process.Tags); spanProcess.Message = this.BuildThriftMessage(spanProcess).ToArray(); this.processCache.Add(spanServiceName, spanProcess); } var spanMessage = this.BuildThriftMessage(jaegerSpan); jaegerSpan.Return(); var spanTotalBytesNeeded = spanMessage.Count; if (!this.CurrentBatches.TryGetValue(spanServiceName, out var spanBatch)) { spanBatch = new Batch(spanProcess); this.CurrentBatches.Add(spanServiceName, spanBatch); spanTotalBytesNeeded += spanProcess.Message.Length; } if (this.batchByteSize + spanTotalBytesNeeded >= this.maxPayloadSizeInBytes) { this.SendCurrentBatches(spanBatch); // Flushing effectively erases the spanBatch we were working on, so we have to rebuild it. spanTotalBytesNeeded = spanMessage.Count + spanProcess.Message.Length; this.CurrentBatches.Add(spanServiceName, spanBatch); } spanBatch.Add(spanMessage); this.batchByteSize += spanTotalBytesNeeded; } /// <inheritdoc/> protected override void Dispose(bool disposing) { if (!this.disposedValue) { if (disposing) { this.thriftClient.Dispose(); this.clientTransport.Dispose(); this.memoryTransport.Dispose(); this.memoryProtocol.Dispose(); } this.disposedValue = true; } base.Dispose(disposing); } private void SendCurrentBatches(Batch workingBatch) { try { foreach (var batchKvp in this.CurrentBatches) { var batch = batchKvp.Value; this.thriftClient.SendBatch(batch); if (batch != workingBatch) { batch.Return(); } else { batch.Clear(); } } } finally { this.CurrentBatches.Clear(); this.batchByteSize = 0; this.memoryTransport.Reset(); } } [MethodImpl(MethodImplOptions.AggressiveInlining)] private BufferWriterMemory BuildThriftMessage(Process process) { process.Write(this.memoryProtocol); return this.memoryTransport.ToBuffer(); } [MethodImpl(MethodImplOptions.AggressiveInlining)] private BufferWriterMemory BuildThriftMessage(in JaegerSpan jaegerSpan) { jaegerSpan.Write(this.memoryProtocol); return this.memoryTransport.ToBuffer(); } } }
1
17,864
can we do this in the baseexporter itself? So that exporters just access this.Provider.Resource, and baseexporters take care of populating Provider?
open-telemetry-opentelemetry-dotnet
.cs
@@ -156,7 +156,9 @@ module.exports = function(grunt) { 'frontend/express/public/core/session-frequency/javascripts/countly.models.js', 'frontend/express/public/core/session-frequency/javascripts/countly.views.js', 'frontend/express/public/core/events/javascripts/countly.models.js', - 'frontend/express/public/core/events/javascripts/countly.views.js' + 'frontend/express/public/core/events/javascripts/countly.views.js', + 'frontend/express/public/core/events/javascripts/countly.events.models.js', + 'frontend/express/public/core/events/javascripts/countly.events.views.js' ], dest: 'frontend/express/public/javascripts/min/countly.lib.concat.js' }
1
module.exports = function(grunt) { grunt.initConfig({ eslint: { options: { configFile: './.eslintrc.json' }, target: ['./'] }, concat: { options: { separator: ';' }, dom: { src: [ 'frontend/express/public/javascripts/dom/jquery/jquery.js', 'frontend/express/public/javascripts/dom/jquery.form.js', 'frontend/express/public/javascripts/dom/tipsy/jquery.tipsy.js', 'frontend/express/public/javascripts/dom/jquery.noisy.min.js', 'frontend/express/public/javascripts/dom/jquery.sticky.headers.js', 'frontend/express/public/javascripts/dom/jqueryui/jquery-ui.js', 'frontend/express/public/javascripts/dom/jqueryui/jquery-ui-i18n.js', 'frontend/express/public/javascripts/dom/slimScroll.min.js', 'frontend/express/public/javascripts/dom/jquery.easing.1.3.js', 'frontend/express/public/javascripts/dom/dataTables/js/jquery.dataTables.js', 'frontend/express/public/javascripts/dom/dataTables/js/ZeroClipboard.js', 'frontend/express/public/javascripts/dom/dataTables/js/TableTools.js', 'frontend/express/public/javascripts/dom/pace/pace.min.js', 'frontend/express/public/javascripts/dom/drop/tether.min.js', 'frontend/express/public/javascripts/dom/drop/drop.min.js' ], dest: 'frontend/express/public/javascripts/min/countly.dom.concat.js' }, utils: { src: [ 'frontend/express/public/javascripts/utils/polyfills.js', 'frontend/express/public/javascripts/utils/underscore-min.js', 'frontend/express/public/javascripts/utils/lodash.merge.js', 'frontend/express/public/javascripts/utils/prefixfree.min.js', 'frontend/express/public/javascripts/utils/moment/moment-with-locales.min.js', 'frontend/express/public/javascripts/utils/handlebars.js', 'frontend/express/public/javascripts/utils/backbone-min.js', 'frontend/express/public/javascripts/utils/jquery.i18n.properties.js', 'frontend/express/public/javascripts/utils/jstz.min.js', 'frontend/express/public/javascripts/utils/store+json2.min.js', 'frontend/express/public/javascripts/utils/jquery.idle-timer.js', 'frontend/express/public/javascripts/utils/textcounter.min.js', 'frontend/express/public/javascripts/utils/initialAvatar.js', 'frontend/express/public/javascripts/utils/jquery.amaran.min.js', 'frontend/express/public/javascripts/utils/jquery.titlealert.js', 'frontend/express/public/javascripts/utils/jquery.hoverIntent.minified.js', 'frontend/express/public/javascripts/utils/tooltipster/tooltipster.bundle.min.js', 'frontend/express/public/javascripts/utils/highlight/highlight.pack.js', 'frontend/express/public/javascripts/utils/dropzone.js', 'frontend/express/public/javascripts/utils/webfont.js', 'frontend/express/public/javascripts/utils/selectize.min.js', 'frontend/express/public/javascripts/utils/polyfill/es6-promise.auto.min.js', 'frontend/express/public/javascripts/utils/polyfill/intersection-observer.js', 'frontend/express/public/javascripts/utils/vue/vue.min.js', 'frontend/express/public/javascripts/utils/vue/composition-api.min.js', 'frontend/express/public/javascripts/utils/vue/vuex.min.js', 'frontend/express/public/javascripts/utils/echarts.5.min.js', 'frontend/express/public/javascripts/utils/vue/vue-echarts.umd.min.js', 'frontend/express/public/javascripts/utils/vue/vue-color.min.js', 'frontend/express/public/javascripts/utils/vue/v-tooltip.min.js', 'frontend/express/public/javascripts/utils/vue/vee-validate.full.min.js', 'frontend/express/public/javascripts/utils/vue/vue-good-table.min.js', 'frontend/express/public/javascripts/utils/vue/vue2Dropzone.min.js', 'frontend/express/public/javascripts/utils/vue/element-ui.js', 'frontend/express/public/javascripts/utils/vue/inViewportMixin.js', 'frontend/express/public/javascripts/utils/vue/vuescroll.min.js', 'frontend/express/public/javascripts/utils/jquery.xss.js', 'frontend/express/public/javascripts/countly/countly.common.js', 'frontend/express/public/javascripts/utils/simpleUpload.min.js', 'frontend/express/public/javascripts/utils/jsoneditor/codemirror.js', 'frontend/express/public/javascripts/utils/jsoneditor/javascript.js', 'frontend/express/public/javascripts/utils/jsoneditor/json2.js', 'frontend/express/public/javascripts/utils/jsoneditor/jsonlint.js', 'frontend/express/public/javascripts/utils/jsoneditor/minify.json.js', 'frontend/express/public/javascripts/utils/jsoneditor/jsoneditor.js', 'frontend/express/public/javascripts/utils/Sortable.min.js', 'frontend/express/public/javascripts/utils/vue/vuedraggable.umd.min.js', 'frontend/express/public/javascripts/utils/countly.checkbox.js' ], dest: 'frontend/express/public/javascripts/min/countly.utils.concat.js' }, visualization: { src: [ 'frontend/express/public/javascripts/visualization/jquery.peity.min.js', 'frontend/express/public/javascripts/visualization/jquery.sparkline.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.tickrotor.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.pie.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.resize.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.stack.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.spline.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.crosshair.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.orderBars.js', 'frontend/express/public/javascripts/visualization/flot/jquery.flot.navigate.js', 'frontend/express/public/javascripts/visualization/gauge.min.js', 'frontend/express/public/javascripts/visualization/d3/d3.min.js', 'frontend/express/public/javascripts/visualization/rickshaw/rickshaw.min.js', 'frontend/express/public/javascripts/visualization/rickshaw/rickshaw.x.axis.js' ], dest: 'frontend/express/public/javascripts/min/countly.visualization.concat.js' }, lib: { src: [ 'frontend/express/public/javascripts/countly/countly.auth.js', 'frontend/express/public/javascripts/countly/countly.helpers.js', 'frontend/express/public/javascripts/countly/countly.map.helper.js', 'frontend/express/public/javascripts/countly/countly.event.js', 'frontend/express/public/javascripts/countly/countly.session.js', 'frontend/express/public/javascripts/countly/countly.city.js', 'frontend/express/public/javascripts/countly/countly.location.js', 'frontend/express/public/javascripts/countly/countly.device.list.js', 'frontend/express/public/javascripts/countly/countly.device.osmapping.js', 'frontend/express/public/javascripts/countly/countly.device.js', 'frontend/express/public/javascripts/countly/countly.device.detail.js', 'frontend/express/public/javascripts/countly/countly.app.version.js', 'frontend/express/public/javascripts/countly/countly.carrier.js', 'frontend/express/public/javascripts/countly/countly.total.users.js', 'frontend/express/public/javascripts/countly/countly.task.manager.js', 'frontend/express/public/javascripts/countly/countly.app.users.js', 'frontend/express/public/javascripts/countly/countly.template.js', 'frontend/express/public/javascripts/countly/vue/core.js', 'frontend/express/public/javascripts/countly/vue/container.js', 'frontend/express/public/javascripts/countly/vue/helpers.js', 'frontend/express/public/javascripts/countly/vue/data/vuex.js', 'frontend/express/public/javascripts/countly/vue/imports.js', 'frontend/express/public/javascripts/countly/vue/components/nav.js', 'frontend/express/public/javascripts/countly/vue/components/layout.js', 'frontend/express/public/javascripts/countly/vue/components/form.js', 'frontend/express/public/javascripts/countly/vue/components/date.js', 'frontend/express/public/javascripts/countly/vue/components/dropdown.js', 'frontend/express/public/javascripts/countly/vue/components/input.js', 'frontend/express/public/javascripts/countly/vue/datatable-legacy.js', 'frontend/express/public/javascripts/countly/vue/components/datatable.js', 'frontend/express/public/javascripts/countly/vue/components/drawer.js', 'frontend/express/public/javascripts/countly/vue/components/vis.js', 'frontend/express/public/javascripts/countly/vue/components/helpers.js', 'frontend/express/public/javascripts/countly/vue/components/static.js', 'frontend/express/public/javascripts/countly/vue/components/progress.js', 'frontend/express/public/javascripts/countly/vue/legacy.js', 'frontend/express/public/javascripts/countly/countly.vue.legacy.js', 'frontend/express/public/javascripts/countly/countly.token.manager.js', 'frontend/express/public/javascripts/countly/countly.version.history.js', 'frontend/express/public/javascripts/countly/countly.analytics.js', 'frontend/express/public/javascripts/countly/countly.views.js', 'frontend/express/public/core/user-activity/javascripts/countly.models.js', 'frontend/express/public/core/user-activity/javascripts/countly.views.js', 'frontend/express/public/core/session-overview/javascripts/countly.models.js', 'frontend/express/public/core/session-overview/javascripts/countly.views.js', 'frontend/express/public/core/session-durations/javascripts/countly.models.js', 'frontend/express/public/core/session-durations/javascripts/countly.views.js', 'frontend/express/public/core/session-frequency/javascripts/countly.models.js', 'frontend/express/public/core/session-frequency/javascripts/countly.views.js', 'frontend/express/public/core/events/javascripts/countly.models.js', 'frontend/express/public/core/events/javascripts/countly.views.js' ], dest: 'frontend/express/public/javascripts/min/countly.lib.concat.js' } }, uglify: { options: { banner: '/*! Countly <%= grunt.template.today("dd-mm-yyyy") %> */\n', mangle: { reserved: ["$super"] } }, dist: { files: { 'frontend/express/public/javascripts/min/countly.dom.js': 'frontend/express/public/javascripts/min/countly.dom.concat.js', 'frontend/express/public/javascripts/min/countly.utils.js': 'frontend/express/public/javascripts/min/countly.utils.concat.js', 'frontend/express/public/javascripts/min/countly.visualization.js': 'frontend/express/public/javascripts/min/countly.visualization.concat.js', 'frontend/express/public/javascripts/min/countly.lib.js': 'frontend/express/public/javascripts/min/countly.lib.concat.js' } } }, copy: {}, cssmin: { dist: { files: { 'frontend/express/public/stylesheets/main.min.css': [ 'frontend/express/public/stylesheets/main.css', 'frontend/express/public/stylesheets/vue/element-ui.css', 'frontend/express/public/stylesheets/vue/clyvue.css', 'frontend/express/public/stylesheets/amaranjs/amaran.min.css', 'frontend/express/public/stylesheets/selectize/selectize.css', 'frontend/express/public/stylesheets/jsoneditor/codemirror.css', 'frontend/express/public/stylesheets/countly-checkbox/countly.checkbox.css', 'frontend/express/public/javascripts/dom/tipsy/tipsy.css', 'frontend/express/public/javascripts/visualization/rickshaw/rickshaw.min.css', 'frontend/express/public/javascripts/dom/pace/pace-theme-flash.css', 'frontend/express/public/javascripts/dom/drop/drop-theme-countly.min.css', 'frontend/express/public/javascripts/utils/tooltipster/tooltipster.bundle.min.css', 'frontend/express/public/stylesheets/bulma/bulma-custom.css', 'frontend/express/public/stylesheets/styles/manifest.css', ] } } }, mochaTest: { test: { options: { reporter: 'spec', timeout: 50000 }, src: ['test/*/*.js'] } }, mocha_nyc: { coverage: { src: ['test/*/*.js'], // a folder works nicely options: { coverage: true, // this will make the grunt.event.on('coverage') event listener to be triggered mask: '*.js', excludes: ['bin/*', 'frontend/*', 'extend/*', 'Gruntfile.js', 'test/*'], mochaOptions: ['--harmony', '--async-only', '--reporter', 'spec', '--timeout', '50000', '--exit'], nycOptions: ['--harmony', '--clean', 'false'], //,'--include-all-sources' '--all' reportFormats: ['none'] } } }, istanbul_check_coverage: { default: { options: { coverageFolder: 'coverage*', // will check both coverage folders and merge the coverage results check: { lines: 80, statements: 80 } } } }, sass: { // Task dist: { // Target options: { // Target options style: 'compressed', update: true // only compile when scss file is newer than css file }, files: [ { src: 'frontend/express/public/stylesheets/vue/clyvue.scss', dest: 'frontend/express/public/stylesheets/vue/clyvue.css' }, { src: 'frontend/express/public/stylesheets/styles/manifest.scss', dest: 'frontend/express/public/stylesheets/styles/manifest.css' }, { expand: true, src: ['plugins/*/frontend/public/stylesheets/**/*.scss'], ext: '.css', extDot: 'first' } ] } }, watch: { scripts: { files: ['plugins/*/frontend/public/stylesheets/**/*.scss', "frontend/express/public/core/*/stylesheets/**/*.scss", "frontend/express/public/stylesheets/styles/**/*.scss" ], tasks: ['sass'] }, } }); //code coverage grunt.event.on('coverage', function(lcovFileContents, done) { // Check below on the section "The coverage event" done(); }); grunt.loadNpmTasks('grunt-mocha-nyc'); grunt.registerTask('coverage', ['mocha_nyc:coverage']); //-----------code coverage----------- grunt.loadNpmTasks('grunt-contrib-sass'); grunt.loadNpmTasks('grunt-contrib-concat'); grunt.loadNpmTasks('grunt-contrib-uglify'); grunt.loadNpmTasks('grunt-contrib-copy'); grunt.loadNpmTasks('grunt-contrib-cssmin'); grunt.loadNpmTasks('grunt-contrib-watch'); grunt.loadNpmTasks('grunt-eslint'); grunt.loadNpmTasks('grunt-mocha-test'); grunt.registerTask('default', ['eslint', 'mochaTest']); grunt.registerTask('dist', ['sass', 'concat', 'uglify', 'cssmin']); grunt.registerTask('plugins', 'Minify plugin JS / CSS files and copy images', function() { var plugins = require('./plugins/plugins.json'), js = [], css = [], img = [], fs = require('fs'), path = require('path'); console.log('Preparing production files for following plugins: %j', plugins); if (plugins.indexOf('push') !== -1) { if (plugins.indexOf('geo') !== -1) { plugins.splice(plugins.indexOf('geo'), 1); plugins.push('geo'); } if (plugins.indexOf('push_approver') !== -1) { plugins.splice(plugins.indexOf('push_approver'), 1); plugins.push('push_approver'); } } if (plugins.indexOf('drill') !== -1) { if (plugins.indexOf('cohorts') !== -1) { plugins.splice(plugins.indexOf('cohorts'), 1); plugins.push('cohorts'); } if (plugins.indexOf('funnels') !== -1) { plugins.splice(plugins.indexOf('funnels'), 1); plugins.push('funnels'); } if (plugins.indexOf('formulas') !== -1) { plugins.splice(plugins.indexOf('formulas'), 1); plugins.push('formulas'); } } plugins.forEach(function(plugin) { var files, pluginPath = path.join(__dirname, 'plugins', plugin), javascripts = path.join(pluginPath, 'frontend/public/javascripts'), stylesheets = path.join(pluginPath, 'frontend/public/stylesheets'), images = path.join(pluginPath, 'frontend/public/images', plugin); if (fs.existsSync(javascripts) && fs.statSync(javascripts).isDirectory()) { files = fs.readdirSync(javascripts); if (files.length) { // move models to the top, then all dependencies, then views for (var i = 0; i < files.length; i++) { if (files[i].indexOf('countly.models.js') !== -1 && i !== 0) { files.splice(0, 0, files.splice(i, 1)[0]); } else if (files[i].indexOf('countly.views.js') !== -1 && i !== files.length - 1) { files.splice(files.length - 1, 0, files.splice(i, 1)[0]); } } files.forEach(function(name) { var file = path.join(javascripts, name); if (fs.statSync(file).isFile() && name.indexOf('.') !== 0 && name.endsWith('.js')) { js.push('plugins/' + plugin + '/frontend/public/javascripts/' + name); } }); } } if (fs.existsSync(stylesheets) && fs.statSync(stylesheets).isDirectory()) { files = fs.readdirSync(stylesheets); files.forEach(function(name) { var file = path.join(stylesheets, name); if (fs.statSync(file).isFile() && name !== 'pre-login.css' && name.indexOf('.') !== 0 && name.endsWith('.css')) { css.push('plugins/' + plugin + '/frontend/public/stylesheets/' + name); } }); } try { if (fs.existsSync(images) && fs.statSync(images).isDirectory()) { img.push({ expand: true, cwd: 'plugins/' + plugin + '/frontend/public/images/' + plugin + '/', filter: 'isFile', src: '**', dest: 'frontend/express/public/images/' + plugin + '/' }); } } catch (err) { if (err.code !== 'ENOENT') { throw err; } } }); grunt.config('copy.plugins.files', img); grunt.config('concat.plugins.src', js); grunt.config('concat.plugins.dest', 'frontend/express/public/javascripts/min/countly.plugins.concat.js'); grunt.config('uglify.plugins.files.frontend/express/public/javascripts/min/countly\\.plugins\\.js', 'frontend/express/public/javascripts/min/countly.plugins.concat.js'); grunt.config('cssmin.plugins.files.frontend/express/public/stylesheets/plugins\\.min\\.css', css); // grunt.task.loadTasks(['copy:plugins', 'concat:plugins', 'uglify:plugins']); // grunt.task.run(['concat', 'uglify']); grunt.task.run(['concat:plugins', 'uglify:plugins', 'copy:plugins', 'sass', 'cssmin:plugins']); console.log('Done preparing production files'); }); grunt.registerTask('locales', 'Concat all locale files into one', function() { var plugins = require('./plugins/plugins.json'), locales = {}, fs = require('fs'), path = require('path'); console.log('Preparing locale files for core & plugins: %j', plugins); var pushLocaleFile = function(name, path) { var lang = ''; name = name.replace('.properties', ''); if (name.indexOf('_') !== -1) { lang = name.split('_').pop(); if (lang.length > 3 || lang === "en") { lang = ''; } } if (!locales[lang]) { locales[lang] = []; } locales[lang].push(path); }; function processLocaleDir(dir) { if (!fs.existsSync(dir)) { return; } fs.readdirSync(dir).forEach(function(name) { var file = path.join(dir, name); if (fs.statSync(file).isFile() && name.indexOf('.') !== 0) { pushLocaleFile(name, dir + '/' + name); } }); } [path.join(__dirname, 'frontend/express/public/localization/dashboard'), path.join(__dirname, 'frontend/express/public/localization/help'), path.join(__dirname, 'frontend/express/public/localization/mail')].forEach(processLocaleDir); plugins.forEach(function(plugin) { var localization = path.join(__dirname, 'plugins', plugin, 'frontend/public/localization'); try { if (fs.statSync(localization).isDirectory()) { fs.readdirSync(localization).forEach(function(name) { var file = path.join(localization, name); if (fs.statSync(file).isFile() && name.indexOf('.') !== 0) { pushLocaleFile(name, 'plugins/' + plugin + '/frontend/public/localization/' + name); } }); } } catch (err) { if (err.code !== 'ENOENT') { throw err; } } }); processLocaleDir(path.join(__dirname, 'frontend/express/public/localization/custom')); for (var lang in locales) { grunt.config('concat.locales_' + lang + '.options.separator', '\n\n'); grunt.config('concat.locales_' + lang + '.src', locales[lang]); grunt.config('concat.locales_' + lang + '.dest', 'frontend/express/public/localization/min/locale' + (lang.length ? '_' + lang : '') + '.properties'); grunt.task.run('concat:locales_' + lang); } console.log('Done preparing locale files'); }); grunt.registerTask('dist-all', ['dist', 'plugins', 'locales']); };
1
14,045
what is the difference between `/core/events/javascripts/countly.views.js` and `/core/events/javascripts/countly.events.views.js`
Countly-countly-server
js
@@ -16,6 +16,10 @@ axe.a11yCheck = function (context, options, callback) { options = {}; } + if (!('iframes' in options)) { + options.iframes = true; + } + var audit = axe._audit; if (!audit) { throw new Error('No audit configured');
1
/** * Starts analysis on the current document and its subframes * * @param {Object} context The `Context` specification object @see Context * @param {Array} options Optional RuleOptions * @param {Function} callback The function to invoke when analysis is complete; receives an array of `RuleResult`s */ axe.a11yCheck = function (context, options, callback) { 'use strict'; if (typeof options === 'function') { callback = options; options = {}; } if (!options || typeof options !== 'object') { options = {}; } var audit = axe._audit; if (!audit) { throw new Error('No audit configured'); } options.reporter = options.reporter || audit.reporter || 'v2'; var reporter = axe.getReporter(options.reporter); axe._runRules(context, options, function (results) { var res = reporter(results, options, callback); if (res !== undefined) { callback(res); } }, axe.log); };
1
10,965
We should make sure this is also supported by `axe.run`. Perhaps moving it into run-rules would cover both API methods more easily?
dequelabs-axe-core
js
@@ -46,9 +46,9 @@ class MemoryBasedStorage(StorageBase): def set_record_timestamp(self, collection_id, parent_id, record, modified_field=DEFAULT_MODIFIED_FIELD, last_modified=None): - timestamp = self._bump_timestamp(collection_id, parent_id, record, - modified_field, - last_modified=last_modified) + timestamp = self._bump_and_store_timestamp(collection_id, parent_id, record, + modified_field, + last_modified=last_modified) record[modified_field] = timestamp return record
1
import re import operator from collections import defaultdict from collections import abc import numbers from kinto.core import utils from kinto.core.decorators import synchronized from kinto.core.storage import ( StorageBase, exceptions, DEFAULT_ID_FIELD, DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD, MISSING) from kinto.core.utils import (COMPARISON, find_nested_value) import ujson def tree(): return defaultdict(tree) class MemoryBasedStorage(StorageBase): """Abstract storage class, providing basic operations and methods for in-memory implementations of sorting and filtering. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def initialize_schema(self, dry_run=False): # Nothing to do. pass def strip_deleted_record(self, collection_id, parent_id, record, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD): """Strip the record of all its fields expect id and timestamp, and set the deletion field value (e.g deleted=True) """ deleted = {} deleted[id_field] = record[id_field] deleted[modified_field] = record[modified_field] deleted[deleted_field] = True return deleted def set_record_timestamp(self, collection_id, parent_id, record, modified_field=DEFAULT_MODIFIED_FIELD, last_modified=None): timestamp = self._bump_timestamp(collection_id, parent_id, record, modified_field, last_modified=last_modified) record[modified_field] = timestamp return record def extract_record_set(self, records, filters, sorting, id_field, deleted_field, pagination_rules=None, limit=None): """Take the list of records and handle filtering, sorting and pagination. """ return extract_record_set(records, filters=filters, sorting=sorting, id_field=id_field, deleted_field=deleted_field, pagination_rules=pagination_rules, limit=limit) class Storage(MemoryBasedStorage): """Storage backend implementation in memory. Useful for development or testing purposes, but records are lost after each server restart. Enable in configuration:: kinto.storage_backend = kinto.core.storage.memory Enable strict json validation before saving (instead of the more lenient ujson, see #1238):: kinto.storage_strict_json = true """ def __init__(self, *args, readonly=False, **kwargs): super().__init__(*args, **kwargs) self.readonly = readonly self.flush() def flush(self, auth=None): self._store = tree() self._cemetery = tree() self._timestamps = defaultdict(dict) @synchronized def collection_timestamp(self, collection_id, parent_id, auth=None): ts = self._timestamps[parent_id].get(collection_id) if ts is not None: return ts if self.readonly: error_msg = 'Cannot initialize empty collection timestamp when running in readonly.' raise exceptions.BackendError(message=error_msg) return self._bump_timestamp(collection_id, parent_id) def _bump_timestamp(self, collection_id, parent_id, record=None, modified_field=None, last_modified=None): """Timestamp are base on current millisecond. .. note :: Here it is assumed that if requests from the same user burst in, the time will slide into the future. It is not problematic since the timestamp notion is opaque, and behaves like a revision number. """ # XXX factorize code from memory and redis backends. is_specified = (record is not None and modified_field in record or last_modified is not None) collection_timestamp = self._timestamps[parent_id].get(collection_id, 0) if is_specified: # If there is a timestamp in the new record, try to use it. if last_modified is not None: current = last_modified else: current = record[modified_field] # If it is equal to current collection timestamp, bump it. if current == collection_timestamp: collection_timestamp += 1 current = collection_timestamp # If it is superior (future), use it as new collection timestamp. elif current > collection_timestamp: collection_timestamp = current # Else (past), do nothing. else: # Not specified, use a new one. current = utils.msec_time() # If two ops in the same msec, bump it. if current <= collection_timestamp: current = collection_timestamp + 1 collection_timestamp = current self._timestamps[parent_id][collection_id] = collection_timestamp return current @synchronized def create(self, collection_id, parent_id, record, id_generator=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): id_generator = id_generator or self.id_generator record = {**record} if id_field in record: # Raise unicity error if record with same id already exists. try: existing = self.get(collection_id, parent_id, record[id_field]) raise exceptions.UnicityError(id_field, existing) except exceptions.RecordNotFoundError: pass else: record[id_field] = id_generator() self.set_record_timestamp(collection_id, parent_id, record, modified_field=modified_field) _id = record[id_field] record = ujson.loads(self.json.dumps(record)) self._store[parent_id][collection_id][_id] = record self._cemetery[parent_id][collection_id].pop(_id, None) return record @synchronized def get(self, collection_id, parent_id, object_id, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): collection = self._store[parent_id][collection_id] if object_id not in collection: raise exceptions.RecordNotFoundError(object_id) return {**collection[object_id]} @synchronized def update(self, collection_id, parent_id, object_id, record, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): record = {**record} record[id_field] = object_id record = ujson.loads(self.json.dumps(record)) self.set_record_timestamp(collection_id, parent_id, record, modified_field=modified_field) self._store[parent_id][collection_id][object_id] = record self._cemetery[parent_id][collection_id].pop(object_id, None) return record @synchronized def delete(self, collection_id, parent_id, object_id, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None, last_modified=None): existing = self.get(collection_id, parent_id, object_id) # Need to delete the last_modified field of the record. del existing[modified_field] self.set_record_timestamp(collection_id, parent_id, existing, modified_field=modified_field, last_modified=last_modified) existing = self.strip_deleted_record(collection_id, parent_id, existing) # Add to deleted items, remove from store. if with_deleted: deleted = {**existing} self._cemetery[parent_id][collection_id][object_id] = deleted self._store[parent_id][collection_id].pop(object_id) return existing @synchronized def purge_deleted(self, collection_id, parent_id, before=None, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, auth=None): parent_id_match = re.compile(parent_id.replace('*', '.*')) by_parent_id = {pid: collections for pid, collections in self._cemetery.items() if parent_id_match.match(pid)} num_deleted = 0 for pid, collections in by_parent_id.items(): if collection_id is not None: collections = {collection_id: collections[collection_id]} for collection, colrecords in collections.items(): if before is None: kept = {} else: kept = {key: value for key, value in colrecords.items() if value[modified_field] >= before} self._cemetery[pid][collection] = kept num_deleted += (len(colrecords) - len(kept)) return num_deleted @synchronized def get_all(self, collection_id, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, include_deleted=False, id_field=DEFAULT_ID_FIELD, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None): records = _get_objects_by_parent_id(self._store, parent_id, collection_id) records, count = self.extract_record_set(records=records, filters=filters, sorting=None, id_field=id_field, deleted_field=deleted_field) deleted = [] if include_deleted: deleted = _get_objects_by_parent_id(self._cemetery, parent_id, collection_id) records, count = self.extract_record_set(records=records + deleted, filters=filters, sorting=sorting, id_field=id_field, deleted_field=deleted_field, pagination_rules=pagination_rules, limit=limit) return records, count @synchronized def delete_all(self, collection_id, parent_id, filters=None, sorting=None, pagination_rules=None, limit=None, id_field=DEFAULT_ID_FIELD, with_deleted=True, modified_field=DEFAULT_MODIFIED_FIELD, deleted_field=DEFAULT_DELETED_FIELD, auth=None): records = _get_objects_by_parent_id(self._store, parent_id, collection_id, with_meta=True) records, count = self.extract_record_set(records=records, filters=filters, sorting=sorting, pagination_rules=pagination_rules, limit=limit, id_field=id_field, deleted_field=deleted_field) deleted = [self.delete(r.pop('__collection_id__'), r.pop('__parent_id__'), r[id_field], id_field=id_field, with_deleted=with_deleted, modified_field=modified_field, deleted_field=deleted_field) for r in records] return deleted def extract_record_set(records, filters, sorting, pagination_rules=None, limit=None, id_field=DEFAULT_ID_FIELD, deleted_field=DEFAULT_DELETED_FIELD): """Apply filters, sorting, limit, and pagination rules to the list of `records`. """ filtered = list(apply_filters(records, filters or [])) total_records = len(filtered) if pagination_rules: paginated = {} for rule in pagination_rules: values = apply_filters(filtered, rule) paginated.update(((x[id_field], x) for x in values)) paginated = paginated.values() else: paginated = filtered sorted_ = apply_sorting(paginated, sorting or []) filtered_deleted = len([r for r in sorted_ if r.get(deleted_field) is True]) if limit: sorted_ = list(sorted_)[:limit] return sorted_, total_records - filtered_deleted def apply_filters(records, filters): """Filter the specified records, using basic iteration. """ def contains_filtering(record_value, search_term): if record_value == MISSING: return False try: search_set = set(search_term) record_value_set = set(record_value) except TypeError: return False return record_value_set.intersection(search_set) == search_set def contains_any_filtering(record_value, search_term): if record_value == MISSING: return False try: return set(record_value).intersection(set(search_term)) except TypeError: return False operators = { COMPARISON.LT: operator.lt, COMPARISON.MAX: operator.le, COMPARISON.EQ: operator.eq, COMPARISON.NOT: operator.ne, COMPARISON.MIN: operator.ge, COMPARISON.GT: operator.gt, COMPARISON.IN: operator.contains, COMPARISON.EXCLUDE: lambda x, y: not operator.contains(x, y), COMPARISON.LIKE: lambda x, y: re.search(y, x, re.IGNORECASE), COMPARISON.CONTAINS: contains_filtering, COMPARISON.CONTAINS_ANY: contains_any_filtering, } for record in records: matches = True for f in filters: right = f.value if f.field == DEFAULT_ID_FIELD: if isinstance(right, int): right = str(right) left = find_nested_value(record, f.field, MISSING) if f.operator in (COMPARISON.IN, COMPARISON.EXCLUDE): right, left = left, right elif f.operator == COMPARISON.LIKE: # Add implicit start/end wildchars if none is specified. if '*' not in right: right = '*{}*'.format(right) right = '^{}$'.format(right.replace('*', '.*')) elif f.operator in (COMPARISON.LT, COMPARISON.MAX, COMPARISON.EQ, COMPARISON.NOT, COMPARISON.MIN, COMPARISON.GT): left = schwartzian_transform(left) right = schwartzian_transform(right) if f.operator == COMPARISON.HAS: matches = left != MISSING if f.value else left == MISSING else: matches = matches and operators[f.operator](left, right) if matches: yield record def schwartzian_transform(value): """Decorate a value with a tag that enforces the Postgres sort order. The sort order, per https://www.postgresql.org/docs/9.6/static/datatype-json.html, is: Object > Array > Boolean > Number > String > Null Note that there are more interesting rules for comparing objects and arrays but we probably don't need to be that compatible. MISSING represents what would be a SQL NULL, which is "bigger" than everything else. """ if value is None: return (0, value) if isinstance(value, str): return (1, value) if isinstance(value, bool): # This has to be before Number, because bools are a subclass # of int :( return (3, value) if isinstance(value, numbers.Number): return (2, value) if isinstance(value, abc.Sequence): return (4, value) if isinstance(value, abc.Mapping): return (5, value) if value is MISSING: return (6, value) raise ValueError('Unknown value: {}'.format(value)) # pragma: no cover def apply_sorting(records, sorting): """Sort the specified records, using cumulative python sorting. """ result = list(records) if not result: return result def column(record, name): return schwartzian_transform(find_nested_value(record, name, default=MISSING)) for sort in reversed(sorting): result = sorted(result, key=lambda r: column(r, sort.field), reverse=(sort.direction < 0)) return result def _get_objects_by_parent_id(store, parent_id, collection_id, with_meta=False): if parent_id is not None: parent_id_match = re.compile('^{}$'.format(parent_id.replace('*', '.*'))) by_parent_id = {pid: collections for pid, collections in store.items() if parent_id_match.match(pid)} else: by_parent_id = store[parent_id] objects = [] for pid, collections in by_parent_id.items(): if collection_id is not None: collections = {collection_id: collections[collection_id]} for collection, colobjects in collections.items(): for r in colobjects.values(): if with_meta: objects.append(dict(__collection_id__=collection, __parent_id__=pid, **r)) else: objects.append(r) return objects def load_from_config(config): settings = {**config.get_settings()} strict = settings.get('storage_strict_json', False) return Storage(strict_json=strict)
1
11,620
If the `MemoryBasedStorage` relies on a `self._bump_and_store_timestamp()`, then every child class will have to implement it. So it should not be prefixed with `_`. And should raise `NotImplementedError` etc. :)
Kinto-kinto
py
@@ -8,7 +8,6 @@ using System.Collections.Generic; namespace System.Diagnostics { - [MemoryDiagnoser] [BenchmarkCategory(Categories.Libraries)] public class Perf_Activity {
1
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using BenchmarkDotNet.Attributes; using MicroBenchmarks; using System.Collections.Generic; namespace System.Diagnostics { [MemoryDiagnoser] [BenchmarkCategory(Categories.Libraries)] public class Perf_Activity { private static readonly ActivitySource s_ActivitySource; private static readonly ActivityListener s_ActivityListener; private static readonly Activity s_ActivitySmall; private static readonly Activity s_ActivityLarge; private static readonly ActivityLink s_ActivityLinkSmall; private static readonly ActivityLink s_ActivityLinkLarge; static Perf_Activity() { s_ActivitySource = new ActivitySource("TestActivitySource"); s_ActivityListener = new ActivityListener { ShouldListenTo = s => s.Name == "TestActivitySource", Sample = (ref ActivityCreationOptions<ActivityContext> o) => ActivitySamplingResult.AllDataAndRecorded }; Dictionary<string, object> LargeTagSet = new Dictionary<string, object>(); for (int i = 0; i < 1024; i++) { if (i % 2 == 0) LargeTagSet.Add($"Key{i}", i); else LargeTagSet.Add($"Key{i}", i.ToString()); } ActivitySource.AddActivityListener(s_ActivityListener); s_ActivitySmall = s_ActivitySource.StartActivity( "TestActivity", ActivityKind.Internal, parentContext: default, tags: new Dictionary<string, object> { ["tag1"] = "string1", ["tag2"] = 1, ["tag3"] = "string2", ["tag4"] = false, }, links: new ActivityLink[] { new ActivityLink(default), new ActivityLink(default), new ActivityLink(default), new ActivityLink(default), }); s_ActivitySmall.AddEvent(new ActivityEvent("TestEvent1")); s_ActivitySmall.AddEvent(new ActivityEvent("TestEvent2")); s_ActivitySmall.AddEvent(new ActivityEvent("TestEvent3")); s_ActivitySmall.AddEvent(new ActivityEvent("TestEvent4")); s_ActivitySmall.Stop(); ActivityLink[] LargeLinkSet = new ActivityLink[1024]; for (int i = 0; i < 1024; i++) { LargeLinkSet[i] = new ActivityLink(default); } s_ActivityLarge = s_ActivitySource.StartActivity( "TestActivity", ActivityKind.Internal, parentContext: default, tags: LargeTagSet, links: LargeLinkSet); for (int i = 0; i < 1024; i++) { s_ActivityLarge.AddEvent(new ActivityEvent($"TestEvent{i}")); } s_ActivityLarge.Stop(); s_ActivityLinkSmall = new ActivityLink( default, new ActivityTagsCollection( new Dictionary<string, object> { ["tag1"] = "string1", ["tag2"] = 1, ["tag3"] = "string2", ["tag4"] = false, })); s_ActivityLinkLarge = new ActivityLink( default, new ActivityTagsCollection(LargeTagSet)); } [Benchmark] public void EnumerateActivityTagsSmall() { foreach (var _ in s_ActivitySmall.Tags) { } } [Benchmark] public void EnumerateActivityTagsLarge() { foreach (var _ in s_ActivityLarge.Tags) { } } [Benchmark] public void EnumerateActivityTagObjectsSmall() { foreach (var _ in s_ActivitySmall.TagObjects) { } } [Benchmark] public void EnumerateActivityTagObjectsLarge() { foreach (var _ in s_ActivityLarge.TagObjects) { } } [Benchmark] public void EnumerateActivityLinksSmall() { foreach (var _ in s_ActivitySmall.Links) { } } [Benchmark] public void EnumerateActivityLinksLarge() { foreach (var _ in s_ActivityLarge.Links) { } } [Benchmark] public void EnumerateActivityEventsSmall() { foreach (var _ in s_ActivitySmall.Events) { } } [Benchmark] public void EnumerateActivityEventsLarge() { foreach (var _ in s_ActivityLarge.Events) { } } [Benchmark] public void EnumerateActivityLinkTagsSmall() { foreach (var _ in s_ActivityLinkSmall.Tags) { } } [Benchmark] public void EnumerateActivityLinkTagsLarge() { foreach (var _ in s_ActivityLinkLarge.Tags) { } } } }
1
12,054
why removed the MemoryDiagnoser attribute?
dotnet-performance
.cs
@@ -3939,7 +3939,16 @@ function getModelsMapForPopulate(model, docs, options) { foreignField = foreignField.call(doc); } - const ret = convertTo_id(utils.getValue(localField, doc)); + const localFieldPath = modelSchema.paths[localField]; + const localFieldGetters = localFieldPath ? localFieldPath.getters : []; + let ret; + + if (localFieldGetters.length) { + ret = localFieldGetters[0].call(doc, doc[localField]); + } else { + ret = convertTo_id(utils.getValue(localField, doc)); + } + const id = String(utils.getValue(foreignField, doc)); options._docs[id] = Array.isArray(ret) ? ret.slice() : ret;
1
'use strict'; /*! * Module dependencies. */ var Aggregate = require('./aggregate'); var ChangeStream = require('./cursor/ChangeStream'); var Document = require('./document'); var DocumentNotFoundError = require('./error').DocumentNotFoundError; var DivergentArrayError = require('./error').DivergentArrayError; var Error = require('./error'); var EventEmitter = require('events').EventEmitter; var MongooseMap = require('./types/map'); var OverwriteModelError = require('./error').OverwriteModelError; var PromiseProvider = require('./promise_provider'); var Query = require('./query'); var Schema = require('./schema'); var VersionError = require('./error').VersionError; var ParallelSaveError = require('./error').ParallelSaveError; var applyHooks = require('./helpers/model/applyHooks'); var applyMethods = require('./helpers/model/applyMethods'); var applyStatics = require('./helpers/model/applyStatics'); var cast = require('./cast'); var castUpdate = require('./helpers/query/castUpdate'); var discriminator = require('./helpers/model/discriminator'); var getDiscriminatorByValue = require('./queryhelpers').getDiscriminatorByValue; var internalToObjectOptions = require('./options').internalToObjectOptions; var isPathSelectedInclusive = require('./helpers/projection/isPathSelectedInclusive'); var get = require('lodash.get'); var getSchemaTypes = require('./helpers/populate/getSchemaTypes'); var getVirtual = require('./helpers/populate/getVirtual'); var modifiedPaths = require('./helpers/update/modifiedPaths'); var mpath = require('mpath'); var parallel = require('async/parallel'); var parallelLimit = require('async/parallelLimit'); var setDefaultsOnInsert = require('./helpers/setDefaultsOnInsert'); var utils = require('./utils'); const VERSION_WHERE = 1; const VERSION_INC = 2; const VERSION_ALL = VERSION_WHERE | VERSION_INC; /** * Model constructor * * Provides the interface to MongoDB collections as well as creates document instances. * * @param {Object} doc values with which to create the document * @inherits Document http://mongoosejs.com/docs/api.html#document-js * @event `error`: If listening to this event, 'error' is emitted when a document was saved without passing a callback and an `error` occurred. If not listening, the event bubbles to the connection used to create this Model. * @event `index`: Emitted after `Model#ensureIndexes` completes. If an error occurred it is passed with the event. * @event `index-single-start`: Emitted when an individual index starts within `Model#ensureIndexes`. The fields and options being used to build the index are also passed with the event. * @event `index-single-done`: Emitted when an individual index finishes within `Model#ensureIndexes`. If an error occurred it is passed with the event. The fields, options, and index name are also passed. * @api public */ function Model(doc, fields, skipId) { if (fields instanceof Schema) { throw new TypeError('2nd argument to `Model` must be a POJO or string, ' + '**not** a schema. Make sure you\'re calling `mongoose.model()`, not ' + '`mongoose.Model()`.'); } Document.call(this, doc, fields, skipId); } /*! * Inherits from Document. * * All Model.prototype features are available on * top level (non-sub) documents. */ Model.prototype.__proto__ = Document.prototype; Model.prototype.$isMongooseModelPrototype = true; /** * Connection the model uses. * * @api public * @property db * @memberOf Model * @instance */ Model.prototype.db; /** * Collection the model uses. * * @api public * @property collection * @memberOf Model * @instance */ Model.prototype.collection; /** * The name of the model * * @api public * @property modelName * @memberOf Model * @instance */ Model.prototype.modelName; /** * Additional properties to attach to the query when calling `save()` and * `isNew` is false. * * @api public * @property $where * @memberOf Model * @instance */ Model.prototype.$where; /** * If this is a discriminator model, `baseModelName` is the name of * the base model. * * @api public * @property baseModelName * @memberOf Model * @instance */ Model.prototype.baseModelName; /*! * ignore */ Model.prototype.$__handleSave = function(options, callback) { const _this = this; let i; let keys; let len; if (!options.safe && this.schema.options.safe) { options.safe = this.schema.options.safe; } if (typeof options.safe === 'boolean') { options.safe = null; } let safe = options.safe ? utils.clone(options.safe) : options.safe; const session = 'session' in options ? options.session : this.$session(); if (session != null) { safe = typeof safe === 'object' && safe != null ? safe : {}; safe.session = session; } if (this.isNew) { // send entire doc const obj = this.toObject(internalToObjectOptions); if ((obj || {})._id === void 0) { // documents must have an _id else mongoose won't know // what to update later if more changes are made. the user // wouldn't know what _id was generated by mongodb either // nor would the ObjectId generated my mongodb necessarily // match the schema definition. setTimeout(function() { callback(new Error('document must have an _id before saving')); }, 0); return; } this.$__version(true, obj); this.collection.insertOne(obj, safe, function(err, ret) { if (err) { _this.isNew = true; _this.emit('isNew', true); _this.constructor.emit('isNew', true); callback(err, null); return; } callback(null, ret); }); this.$__reset(); this.isNew = false; this.emit('isNew', false); this.constructor.emit('isNew', false); // Make it possible to retry the insert this.$__.inserting = true; } else { // Make sure we don't treat it as a new object on error, // since it already exists this.$__.inserting = false; const delta = this.$__delta(); if (delta) { if (delta instanceof Error) { callback(delta); return; } const where = this.$__where(delta[0]); if (where instanceof Error) { callback(where); return; } if (this.$where) { keys = Object.keys(this.$where); len = keys.length; for (i = 0; i < len; ++i) { where[keys[i]] = this.$where[keys[i]]; } } this.collection.updateOne(where, delta[1], safe, function(err, ret) { if (err) { callback(err); return; } ret.$where = where; callback(null, ret); }); } else { this.$__reset(); callback(); return; } this.emit('isNew', false); this.constructor.emit('isNew', false); } }; /*! * ignore */ Model.prototype.$__save = function(options, callback) { this.$__handleSave(options, (error, result) => { if (error) { return this.schema.s.hooks.execPost('save:error', this, [this], { error: error }, function(error) { callback(error); }); } // store the modified paths before the document is reset const modifiedPaths = this.modifiedPaths(); this.$__reset(); let numAffected = 0; if (get(options, 'safe.w') !== 0 && get(options, 'w') !== 0) { // Skip checking if write succeeded if writeConcern is set to // unacknowledged writes, because otherwise `numAffected` will always be 0 if (result) { if (Array.isArray(result)) { numAffected = result.length; } else if (result.result && result.result.n !== undefined) { numAffected = result.result.n; } else if (result.result && result.result.nModified !== undefined) { numAffected = result.result.nModified; } else { numAffected = result; } } // was this an update that required a version bump? if (this.$__.version && !this.$__.inserting) { let doIncrement = VERSION_INC === (VERSION_INC & this.$__.version); this.$__.version = undefined; let key = this.schema.options.versionKey; let version = this.getValue(key) || 0; if (numAffected <= 0) { // the update failed. pass an error back let err = new VersionError(this, version, modifiedPaths); return callback(err); } // increment version if was successful if (doIncrement) { this.setValue(key, version + 1); } } if (result != null && numAffected <= 0) { error = new DocumentNotFoundError(result.$where); return this.schema.s.hooks.execPost('save:error', this, [this], { error: error }, function(error) { callback(error); }); } } this.$__.saving = undefined; this.emit('save', this, numAffected); this.constructor.emit('save', this, numAffected); callback(null, this); }); }; /** * Saves this document. * * ####Example: * * product.sold = Date.now(); * product.save(function (err, product) { * if (err) .. * }) * * The callback will receive three parameters * * 1. `err` if an error occurred * 2. `product` which is the saved `product` * * As an extra measure of flow control, save will return a Promise. * ####Example: * product.save().then(function(product) { * ... * }); * * @param {Object} [options] options optional options * @param {Object} [options.safe] overrides [schema's safe option](http://mongoosejs.com//docs/guide.html#safe) * @param {Boolean} [options.validateBeforeSave] set to false to save without validating. * @param {Function} [fn] optional callback * @return {Promise|undefined} Returns undefined if used with callback or a Promise otherwise. * @api public * @see middleware http://mongoosejs.com/docs/middleware.html */ Model.prototype.save = function(options, fn) { let parallelSave; if (this.$__.saving) { parallelSave = new ParallelSaveError(this); parallelSave.stack = this.$__.saving; } else { this.$__.saving = new ParallelSaveError(this).stack; } if (typeof options === 'function') { fn = options; options = undefined; } if (options != null) { options = utils.clone(options); } else { options = {}; } if (fn) { fn = this.constructor.$wrapCallback(fn); } const originalStack = new Error().stack; return utils.promiseOrCallback(fn, cb => { if (parallelSave) { this.$__handleReject(parallelSave); return cb(parallelSave); } this.$__save(options, error => { this.$__.saving = undefined; if (error) { // gh-2633: since VersionError is very generic, take the // stack trace of the original save() function call rather // than the async trace if (error instanceof VersionError) { error.stack = originalStack; } this.$__handleReject(error); return cb(error); } cb(null, this); }); }); }; /*! * Determines whether versioning should be skipped for the given path * * @param {Document} self * @param {String} path * @return {Boolean} true if versioning should be skipped for the given path */ function shouldSkipVersioning(self, path) { var skipVersioning = self.schema.options.skipVersioning; if (!skipVersioning) return false; // Remove any array indexes from the path path = path.replace(/\.\d+\./, '.'); return skipVersioning[path]; } /*! * Apply the operation to the delta (update) clause as * well as track versioning for our where clause. * * @param {Document} self * @param {Object} where * @param {Object} delta * @param {Object} data * @param {Mixed} val * @param {String} [operation] */ function operand(self, where, delta, data, val, op) { // delta op || (op = '$set'); if (!delta[op]) delta[op] = {}; delta[op][data.path] = val; // disabled versioning? if (self.schema.options.versionKey === false) return; // path excluded from versioning? if (shouldSkipVersioning(self, data.path)) return; // already marked for versioning? if (VERSION_ALL === (VERSION_ALL & self.$__.version)) return; switch (op) { case '$set': case '$unset': case '$pop': case '$pull': case '$pullAll': case '$push': case '$addToSet': break; default: // nothing to do return; } // ensure updates sent with positional notation are // editing the correct array element. // only increment the version if an array position changes. // modifying elements of an array is ok if position does not change. if (op === '$push' || op === '$addToSet' || op === '$pullAll' || op === '$pull') { self.$__.version = VERSION_INC; } else if (/^\$p/.test(op)) { // potentially changing array positions self.increment(); } else if (Array.isArray(val)) { // $set an array self.increment(); } else if (/\.\d+\.|\.\d+$/.test(data.path)) { // now handling $set, $unset // subpath of array self.$__.version = VERSION_WHERE; } } /*! * Compiles an update and where clause for a `val` with _atomics. * * @param {Document} self * @param {Object} where * @param {Object} delta * @param {Object} data * @param {Array} value */ function handleAtomics(self, where, delta, data, value) { if (delta.$set && delta.$set[data.path]) { // $set has precedence over other atomics return; } if (typeof value.$__getAtomics === 'function') { value.$__getAtomics().forEach(function(atomic) { const op = atomic[0]; const val = atomic[1]; operand(self, where, delta, data, val, op); }); return; } // legacy support for plugins var atomics = value._atomics, ops = Object.keys(atomics), i = ops.length, val, op; if (i === 0) { // $set if (utils.isMongooseObject(value)) { value = value.toObject({depopulate: 1, _isNested: true}); } else if (value.valueOf) { value = value.valueOf(); } return operand(self, where, delta, data, value); } function iter(mem) { return utils.isMongooseObject(mem) ? mem.toObject({depopulate: 1, _isNested: true}) : mem; } while (i--) { op = ops[i]; val = atomics[op]; if (utils.isMongooseObject(val)) { val = val.toObject({depopulate: true, transform: false, _isNested: true}); } else if (Array.isArray(val)) { val = val.map(iter); } else if (val.valueOf) { val = val.valueOf(); } if (op === '$addToSet') { val = {$each: val}; } operand(self, where, delta, data, val, op); } } /** * Produces a special query document of the modified properties used in updates. * * @api private * @method $__delta * @memberOf Model * @instance */ Model.prototype.$__delta = function() { const dirty = this.$__dirty(); if (!dirty.length && VERSION_ALL !== this.$__.version) { return; } let where = {}; let delta = {}; const len = dirty.length; const divergent = []; let d = 0; where._id = this._doc._id; // If `_id` is an object, need to depopulate, but also need to be careful // because `_id` can technically be null (see gh-6406) if (get(where, '_id.$__', null) != null) { where._id = where._id.toObject({ transform: false, depopulate: true }); } for (; d < len; ++d) { const data = dirty[d]; let value = data.value; const match = checkDivergentArray(this, data.path, value); if (match) { divergent.push(match); continue; } const pop = this.populated(data.path, true); if (!pop && this.$__.selected) { // If any array was selected using an $elemMatch projection, we alter the path and where clause // NOTE: MongoDB only supports projected $elemMatch on top level array. const pathSplit = data.path.split('.'); const top = pathSplit[0]; if (this.$__.selected[top] && this.$__.selected[top].$elemMatch) { // If the selected array entry was modified if (pathSplit.length > 1 && pathSplit[1] == 0 && typeof where[top] === 'undefined') { where[top] = this.$__.selected[top]; pathSplit[1] = '$'; data.path = pathSplit.join('.'); } // if the selected array was modified in any other way throw an error else { divergent.push(data.path); continue; } } } if (divergent.length) continue; if (undefined === value) { operand(this, where, delta, data, 1, '$unset'); } else if (value === null) { operand(this, where, delta, data, null); } else if (value._path && value._atomics) { // arrays and other custom types (support plugins etc) handleAtomics(this, where, delta, data, value); } else if (value._path && Buffer.isBuffer(value)) { // MongooseBuffer value = value.toObject(); operand(this, where, delta, data, value); } else { value = utils.clone(value, { depopulate: true, transform: false, virtuals: false, _isNested: true }); operand(this, where, delta, data, value); } } if (divergent.length) { return new DivergentArrayError(divergent); } if (this.$__.version) { this.$__version(where, delta); } return [where, delta]; }; /*! * Determine if array was populated with some form of filter and is now * being updated in a manner which could overwrite data unintentionally. * * @see https://github.com/Automattic/mongoose/issues/1334 * @param {Document} doc * @param {String} path * @return {String|undefined} */ function checkDivergentArray(doc, path, array) { // see if we populated this path var pop = doc.populated(path, true); if (!pop && doc.$__.selected) { // If any array was selected using an $elemMatch projection, we deny the update. // NOTE: MongoDB only supports projected $elemMatch on top level array. var top = path.split('.')[0]; if (doc.$__.selected[top + '.$']) { return top; } } if (!(pop && array && array.isMongooseArray)) return; // If the array was populated using options that prevented all // documents from being returned (match, skip, limit) or they // deselected the _id field, $pop and $set of the array are // not safe operations. If _id was deselected, we do not know // how to remove elements. $pop will pop off the _id from the end // of the array in the db which is not guaranteed to be the // same as the last element we have here. $set of the entire array // would be similarily destructive as we never received all // elements of the array and potentially would overwrite data. var check = pop.options.match || pop.options.options && utils.object.hasOwnProperty(pop.options.options, 'limit') || // 0 is not permitted pop.options.options && pop.options.options.skip || // 0 is permitted pop.options.select && // deselected _id? (pop.options.select._id === 0 || /\s?-_id\s?/.test(pop.options.select)); if (check) { var atomics = array._atomics; if (Object.keys(atomics).length === 0 || atomics.$set || atomics.$pop) { return path; } } } /** * Appends versioning to the where and update clauses. * * @api private * @method $__version * @memberOf Model * @instance */ Model.prototype.$__version = function(where, delta) { var key = this.schema.options.versionKey; if (where === true) { // this is an insert if (key) this.setValue(key, delta[key] = 0); return; } // updates // only apply versioning if our versionKey was selected. else // there is no way to select the correct version. we could fail // fast here and force them to include the versionKey but // thats a bit intrusive. can we do this automatically? if (!this.isSelected(key)) { return; } // $push $addToSet don't need the where clause set if (VERSION_WHERE === (VERSION_WHERE & this.$__.version)) { var value = this.getValue(key); if (value != null) where[key] = value; } if (VERSION_INC === (VERSION_INC & this.$__.version)) { if (get(delta.$set, key, null) != null) { // Version key is getting set, means we'll increment the doc's version // after a successful save, so we should set the incremented version so // future saves don't fail (gh-5779) ++delta.$set[key]; } else { delta.$inc = delta.$inc || {}; delta.$inc[key] = 1; } } }; /** * Signal that we desire an increment of this documents version. * * ####Example: * * Model.findById(id, function (err, doc) { * doc.increment(); * doc.save(function (err) { .. }) * }) * * @see versionKeys http://mongoosejs.com/docs/guide.html#versionKey * @api public */ Model.prototype.increment = function increment() { this.$__.version = VERSION_ALL; return this; }; /** * Returns a query object * * @api private * @method $__where * @memberOf Model * @instance */ Model.prototype.$__where = function _where(where) { where || (where = {}); if (!where._id) { where._id = this._doc._id; } if (this._doc._id === void 0) { return new Error('No _id found on document!'); } return where; }; /** * Removes this document from the db. * * ####Example: * product.remove(function (err, product) { * if (err) return handleError(err); * Product.findById(product._id, function (err, product) { * console.log(product) // null * }) * }) * * * As an extra measure of flow control, remove will return a Promise (bound to `fn` if passed) so it could be chained, or hooked to recieve errors * * ####Example: * product.remove().then(function (product) { * ... * }).catch(function (err) { * assert.ok(err) * }) * * @param {function(err,product)} [fn] optional callback * @return {Promise} Promise * @api public */ Model.prototype.remove = function remove(options, fn) { if (typeof options === 'function') { fn = options; options = undefined; } if (!options) { options = {}; } if (fn) { fn = this.constructor.$wrapCallback(fn); } return utils.promiseOrCallback(fn, cb => { this.$__remove(options, cb); }); }; /*! * ignore */ Model.prototype.$__remove = function $__remove(options, cb) { if (this.$__.isDeleted) { return utils.immediate(() => cb(null, this)); } var where = this.$__where(); if (where instanceof Error) { return cb(where); } this.collection.remove(where, options, err => { if (!err) { this.$__.isDeleted = true; this.emit('remove', this); this.constructor.emit('remove', this); return cb(null, this); } this.$__.isDeleted = false; cb(err); }); }; /** * Returns another Model instance. * * ####Example: * * var doc = new Tank; * doc.model('User').findById(id, callback); * * @param {String} name model name * @api public */ Model.prototype.model = function model(name) { return this.db.model(name); }; /** * Adds a discriminator type. * * ####Example: * * function BaseSchema() { * Schema.apply(this, arguments); * * this.add({ * name: String, * createdAt: Date * }); * } * util.inherits(BaseSchema, Schema); * * var PersonSchema = new BaseSchema(); * var BossSchema = new BaseSchema({ department: String }); * * var Person = mongoose.model('Person', PersonSchema); * var Boss = Person.discriminator('Boss', BossSchema); * new Boss().__t; // "Boss". `__t` is the default `discriminatorKey` * * var employeeSchema = new Schema({ boss: ObjectId }); * var Employee = Person.discriminator('Employee', employeeSchema, 'staff'); * new Employee().__t; // "staff" because of 3rd argument above * * @param {String} name discriminator model name * @param {Schema} schema discriminator model schema * @param {String} value the string stored in the `discriminatorKey` property * @api public */ Model.discriminator = function(name, schema, value) { var model; if (typeof name === 'function') { model = name; name = utils.getFunctionName(model); if (!(model.prototype instanceof Model)) { throw new Error('The provided class ' + name + ' must extend Model'); } } schema = discriminator(this, name, schema, value); if (this.db.models[name]) { throw new OverwriteModelError(name); } schema.$isRootDiscriminator = true; model = this.db.model(model || name, schema, this.collection.name); this.discriminators[name] = model; var d = this.discriminators[name]; d.prototype.__proto__ = this.prototype; Object.defineProperty(d, 'baseModelName', { value: this.modelName, configurable: true, writable: false }); // apply methods and statics applyMethods(d, schema); applyStatics(d, schema); return d; }; // Model (class) features /*! * Give the constructor the ability to emit events. */ for (var i in EventEmitter.prototype) { Model[i] = EventEmitter.prototype[i]; } /** * Performs any async initialization of this model against MongoDB. Currently, * this function is only responsible for building [indexes](https://docs.mongodb.com/manual/indexes/), * unless [`autoIndex`](http://mongoosejs.com/docs/guide.html#autoIndex) is turned off. * * This function is called automatically, so you don't need to call it. * This function is also idempotent, so you may call it to get back a promise * that will resolve when your indexes are finished building as an alternative * to `MyModel.on('index')` * * ####Example: * * var eventSchema = new Schema({ thing: { type: 'string', unique: true }}) * // This calls `Event.init()` implicitly, so you don't need to call * // `Event.init()` on your own. * var Event = mongoose.model('Event', eventSchema); * * Event.init().then(function(Event) { * // You can also use `Event.on('index')` if you prefer event emitters * // over promises. * console.log('Indexes are done building!'); * }); * * @api public * @param {Function} [callback] * @returns {Promise} */ Model.init = function init(callback) { this.schema.emit('init', this); if (this.$init) { if (callback) { this.$init.then(() => callback(), err => callback(err)); return null; } return this.$init; } const Promise = PromiseProvider.get(); const autoIndex = this.schema.options.autoIndex; this.$init = new Promise((resolve, reject) => { if (autoIndex || (autoIndex == null && this.db.config.autoIndex)) { this.ensureIndexes({ _automatic: true }, function(error) { if (error) { return reject(error); } resolve(this); }); } else { resolve(this); } }); if (callback) { this.$init.then(() => callback(), err => callback(err)); this.$caught = true; return null; } else { const _catch = this.$init.catch; const _this = this; this.$init.catch = function() { this.$caught = true; return _catch.apply(_this.$init, arguments); }; } return this.$init; }; /** * Makes the indexes in MongoDB match the indexes defined in this model's * schema. This function will drop any indexes that are not defined in * the model's schema except the `_id` index, and build any indexes that * are in your schema but not in MongoDB. * * @param {Object} [options] options to pass to `ensureIndexes()` * @param {Function} [callback] optional callback * @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback. * @api public */ Model.syncIndexes = function syncIndexes(options, callback) { callback = this.$wrapCallback(callback); const dropNonSchemaIndexes = (cb) => { this.listIndexes((err, indexes) => { if (err != null) { return cb(err); } const schemaIndexes = this.schema.indexes(); let toDrop = []; for (const index of indexes) { let found = false; // Never try to drop `_id` index, MongoDB server doesn't allow it if (index.key._id) { continue; } for (const schemaIndex of schemaIndexes) { const key = schemaIndex[0]; const options = _decorateDiscriminatorIndexOptions(this, utils.clone(schemaIndex[1])); // If these options are different, need to rebuild the index const optionKeys = ['unique', 'partialFilterExpression', 'sparse']; const indexCopy = Object.assign({}, index); for (const key of optionKeys) { if (!(key in options) && !(key in indexCopy)) { continue; } indexCopy[key] = options[key]; } if (utils.deepEqual(key, index.key) && utils.deepEqual(index, indexCopy)) { found = true; break; } } if (!found) { toDrop.push(index.name); } } if (toDrop.length === 0) { return cb(null, []); } dropIndexes(toDrop, cb); }); }; const dropIndexes = (toDrop, cb) => { let remaining = toDrop.length; let error = false; toDrop.forEach(indexName => { this.collection.dropIndex(indexName, err => { if (err != null) { error = true; return cb(err); } if (!error) { --remaining || cb(null, toDrop); } }); }); }; return utils.promiseOrCallback(callback, cb => { dropNonSchemaIndexes((err, dropped) => { if (err != null) { return cb(err); } this.ensureIndexes(options, err => { if (err != null) { return cb(err); } cb(null, dropped); }); }); }); }; /** * Lists the indexes currently defined in MongoDB. This may or may not be * the same as the indexes defined in your schema depending on whether you * use the [`autoIndex` option](/docs/guide.html#autoIndex) and if you * build indexes manually. * * @param {Function} [cb] optional callback * @return {Promise|undefined} Returns `undefined` if callback is specified, returns a promise if no callback. * @api public */ Model.listIndexes = function init(callback) { callback = this.$wrapCallback(callback); const _listIndexes = cb => { this.collection.listIndexes().toArray(cb); }; return utils.promiseOrCallback(callback, cb => { // Buffering if (this.collection.buffer) { this.collection.addQueue(_listIndexes, [cb]); } else { _listIndexes(cb); } }); }; /** * Sends `createIndex` commands to mongo for each index declared in the schema. * The `createIndex` commands are sent in series. * * ####Example: * * Event.ensureIndexes(function (err) { * if (err) return handleError(err); * }); * * After completion, an `index` event is emitted on this `Model` passing an error if one occurred. * * ####Example: * * var eventSchema = new Schema({ thing: { type: 'string', unique: true }}) * var Event = mongoose.model('Event', eventSchema); * * Event.on('index', function (err) { * if (err) console.error(err); // error occurred during index creation * }) * * _NOTE: It is not recommended that you run this in production. Index creation may impact database performance depending on your load. Use with caution._ * * @param {Object} [options] internal options * @param {Function} [cb] optional callback * @return {Promise} * @api public */ Model.ensureIndexes = function ensureIndexes(options, callback) { if (typeof options === 'function') { callback = options; options = null; } if (callback) { callback = this.$wrapCallback(callback); } return utils.promiseOrCallback(callback, cb => { _ensureIndexes(this, options || {}, error => { if (error) { return cb(error); } cb(null); }); }); }; /** * Similar to `ensureIndexes()`, except for it uses the [`createIndex`](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#createIndex) * function. The `ensureIndex()` function checks to see if an index with that * name already exists, and, if not, does not attempt to create the index. * `createIndex()` bypasses this check. * * @param {Object} [options] internal options * @param {Function} [cb] optional callback * @return {Promise} * @api public */ Model.createIndexes = function createIndexes(options, callback) { if (typeof options === 'function') { callback = options; options = {}; } options = options || {}; options.createIndex = true; return this.ensureIndexes(options, callback); }; function _ensureIndexes(model, options, callback) { const indexes = model.schema.indexes(); options = options || {}; const done = function(err) { if (err && !model.$caught) { model.emit('error', err); } model.emit('index', err); callback && callback(err); }; for (const index of indexes) { const keys = Object.keys(index[0]); if (keys.length === 1 && keys[0] === '_id') { console.warn('mongoose: Cannot specify a custom index on `_id` for ' + 'model name "' + model.modelName + '", ' + 'MongoDB does not allow overwriting the default `_id` index. See ' + 'http://bit.ly/mongodb-id-index'); } } if (!indexes.length) { utils.immediate(function() { done(); }); return; } // Indexes are created one-by-one to support how MongoDB < 2.4 deals // with background indexes. const indexSingleDone = function(err, fields, options, name) { model.emit('index-single-done', err, fields, options, name); }; const indexSingleStart = function(fields, options) { model.emit('index-single-start', fields, options); }; const create = function() { if (options._automatic) { if (model.schema.options.autoIndex === false || (model.schema.options.autoIndex == null && model.db.config.autoIndex === false)) { return done(); } } const index = indexes.shift(); if (!index) { return done(); } const indexFields = utils.clone(index[0]); const indexOptions = utils.clone(index[1]); _decorateDiscriminatorIndexOptions(model, indexOptions); _handleSafe(options); indexSingleStart(indexFields, options); const methodName = options.createIndex ? 'createIndex' : 'ensureIndex'; model.collection[methodName](indexFields, indexOptions, utils.tick(function(err, name) { indexSingleDone(err, indexFields, indexOptions, name); if (err) { return done(err); } create(); })); }; utils.immediate(function() { // If buffering is off, do this manually. if (options._automatic && !model.collection.collection) { model.collection.addQueue(create, []); } else { create(); } }); } function _decorateDiscriminatorIndexOptions(model, indexOptions) { // If the model is a discriminator and it has a unique index, add a // partialFilterExpression by default so the unique index will only apply // to that discriminator. if (model.baseModelName != null && indexOptions.unique && !('partialFilterExpression' in indexOptions) && !('sparse' in indexOptions)) { indexOptions.partialFilterExpression = { [model.schema.options.discriminatorKey]: model.modelName }; } return indexOptions; } function _handleSafe(options) { if (options.safe) { if (typeof options.safe === 'boolean') { options.w = options.safe; delete options.safe; } if (typeof options.safe === 'object') { options.w = options.safe.w; options.j = options.safe.j; options.wtimeout = options.safe.wtimeout; delete options.safe; } } } /** * Schema the model uses. * * @property schema * @receiver Model * @api public * @memberOf Model */ Model.schema; /*! * Connection instance the model uses. * * @property db * @api public * @memberOf Model */ Model.db; /*! * Collection the model uses. * * @property collection * @api public * @memberOf Model */ Model.collection; /** * Base Mongoose instance the model uses. * * @property base * @api public * @memberOf Model */ Model.base; /** * Registered discriminators for this model. * * @property discriminators * @api public * @memberOf Model */ Model.discriminators; /** * Translate any aliases fields/conditions so the final query or document object is pure * * ####Example: * * Character * .find(Character.translateAliases({ * '名': 'Eddard Stark' // Alias for 'name' * }) * .exec(function(err, characters) {}) * * ####Note: * Only translate arguments of object type anything else is returned raw * * @param {Object} raw fields/conditions that may contain aliased keys * @return {Object} the translated 'pure' fields/conditions */ Model.translateAliases = function translateAliases(fields) { var aliases = this.schema.aliases; if (typeof fields === 'object') { // Fields is an object (query conditions or document fields) for (var key in fields) { if (aliases[key]) { fields[aliases[key]] = fields[key]; delete fields[key]; } } return fields; } else { // Don't know typeof fields return fields; } }; /** * Removes all documents that match `conditions` from the collection. * To remove just the first document that matches `conditions`, set the `single` * option to true. * * ####Example: * * Character.remove({ name: 'Eddard Stark' }, function (err) {}); * * ####Note: * * This method sends a remove command directly to MongoDB, no Mongoose documents * are involved. Because no Mongoose documents are involved, _no middleware * (hooks) are executed_. * * @param {Object} conditions * @param {Function} [callback] * @return {Query} * @api public */ Model.remove = function remove(conditions, callback) { if (typeof conditions === 'function') { callback = conditions; conditions = {}; } // get the mongodb collection object var mq = new this.Query({}, {}, this, this.collection); callback = this.$wrapCallback(callback); return mq.remove(conditions, callback); }; /** * Deletes the first document that matches `conditions` from the collection. * Behaves like `remove()`, but deletes at most one document regardless of the * `single` option. * * ####Example: * * Character.deleteOne({ name: 'Eddard Stark' }, function (err) {}); * * ####Note: * * Like `Model.remove()`, this function does **not** trigger `pre('remove')` or `post('remove')` hooks. * * @param {Object} conditions * @param {Function} [callback] * @return {Query} * @api public */ Model.deleteOne = function deleteOne(conditions, callback) { if (typeof conditions === 'function') { callback = conditions; conditions = {}; } // get the mongodb collection object var mq = new this.Query(conditions, {}, this, this.collection); callback = this.$wrapCallback(callback); return mq.deleteOne(callback); }; /** * Deletes all of the documents that match `conditions` from the collection. * Behaves like `remove()`, but deletes all documents that match `conditions` * regardless of the `single` option. * * ####Example: * * Character.deleteMany({ name: /Stark/, age: { $gte: 18 } }, function (err) {}); * * ####Note: * * Like `Model.remove()`, this function does **not** trigger `pre('remove')` or `post('remove')` hooks. * * @param {Object} conditions * @param {Function} [callback] * @return {Query} * @api public */ Model.deleteMany = function deleteMany(conditions, callback) { if (typeof conditions === 'function') { callback = conditions; conditions = {}; } // get the mongodb collection object var mq = new this.Query(conditions, {}, this, this.collection); if (callback) { callback = this.$wrapCallback(callback); } return mq.deleteMany(callback); }; /** * Finds documents * * The `conditions` are cast to their respective SchemaTypes before the command is sent. * * ####Examples: * * // named john and at least 18 * MyModel.find({ name: 'john', age: { $gte: 18 }}); * * // executes immediately, passing results to callback * MyModel.find({ name: 'john', age: { $gte: 18 }}, function (err, docs) {}); * * // name LIKE john and only selecting the "name" and "friends" fields, executing immediately * MyModel.find({ name: /john/i }, 'name friends', function (err, docs) { }) * * // passing options * MyModel.find({ name: /john/i }, null, { skip: 10 }) * * // passing options and executing immediately * MyModel.find({ name: /john/i }, null, { skip: 10 }, function (err, docs) {}); * * // executing a query explicitly * var query = MyModel.find({ name: /john/i }, null, { skip: 10 }) * query.exec(function (err, docs) {}); * * // using the promise returned from executing a query * var query = MyModel.find({ name: /john/i }, null, { skip: 10 }); * var promise = query.exec(); * promise.addBack(function (err, docs) {}); * * @param {Object} conditions * @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select) * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see field selection #query_Query-select * @see promise #promise-js * @api public */ Model.find = function find(conditions, projection, options, callback) { if (typeof conditions === 'function') { callback = conditions; conditions = {}; projection = null; options = null; } else if (typeof projection === 'function') { callback = projection; projection = null; options = null; } else if (typeof options === 'function') { callback = options; options = null; } var mq = new this.Query({}, {}, this, this.collection); mq.select(projection); mq.setOptions(options); if (this.schema.discriminatorMapping && this.schema.discriminatorMapping.isRoot && mq.selectedInclusively()) { // Need to select discriminator key because original schema doesn't have it mq.select(this.schema.options.discriminatorKey); } if (callback) { callback = this.$wrapCallback(callback); } return mq.find(conditions, callback); }; /** * Finds a single document by its _id field. `findById(id)` is almost* * equivalent to `findOne({ _id: id })`. If you want to query by a document's * `_id`, use `findById()` instead of `findOne()`. * * The `id` is cast based on the Schema before sending the command. * * This function triggers the following middleware. * * - `findOne()` * * \* Except for how it treats `undefined`. If you use `findOne()`, you'll see * that `findOne(undefined)` and `findOne({ _id: undefined })` are equivalent * to `findOne({})` and return arbitrary documents. However, mongoose * translates `findById(undefined)` into `findOne({ _id: null })`. * * ####Example: * * // find adventure by id and execute immediately * Adventure.findById(id, function (err, adventure) {}); * * // same as above * Adventure.findById(id).exec(callback); * * // select only the adventures name and length * Adventure.findById(id, 'name length', function (err, adventure) {}); * * // same as above * Adventure.findById(id, 'name length').exec(callback); * * // include all properties except for `length` * Adventure.findById(id, '-length').exec(function (err, adventure) {}); * * // passing options (in this case return the raw js objects, not mongoose documents by passing `lean` * Adventure.findById(id, 'name', { lean: true }, function (err, doc) {}); * * // same as above * Adventure.findById(id, 'name').lean().exec(function (err, doc) {}); * * @param {Object|String|Number} id value of `_id` to query by * @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select) * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see field selection #query_Query-select * @see lean queries #query_Query-lean * @api public */ Model.findById = function findById(id, projection, options, callback) { if (typeof id === 'undefined') { id = null; } if (callback) { callback = this.$wrapCallback(callback); } return this.findOne({_id: id}, projection, options, callback); }; /** * Finds one document. * * The `conditions` are cast to their respective SchemaTypes before the command is sent. * * *Note:* `conditions` is optional, and if `conditions` is null or undefined, * mongoose will send an empty `findOne` command to MongoDB, which will return * an arbitrary document. If you're querying by `_id`, use `findById()` instead. * * ####Example: * * // find one iphone adventures - iphone adventures?? * Adventure.findOne({ type: 'iphone' }, function (err, adventure) {}); * * // same as above * Adventure.findOne({ type: 'iphone' }).exec(function (err, adventure) {}); * * // select only the adventures name * Adventure.findOne({ type: 'iphone' }, 'name', function (err, adventure) {}); * * // same as above * Adventure.findOne({ type: 'iphone' }, 'name').exec(function (err, adventure) {}); * * // specify options, in this case lean * Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }, callback); * * // same as above * Adventure.findOne({ type: 'iphone' }, 'name', { lean: true }).exec(callback); * * // chaining findOne queries (same as above) * Adventure.findOne({ type: 'iphone' }).select('name').lean().exec(callback); * * @param {Object} [conditions] * @param {Object|String} [projection] optional fields to return, see [`Query.prototype.select()`](#query_Query-select) * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see field selection #query_Query-select * @see lean queries #query_Query-lean * @api public */ Model.findOne = function findOne(conditions, projection, options, callback) { if (typeof options === 'function') { callback = options; options = null; } else if (typeof projection === 'function') { callback = projection; projection = null; options = null; } else if (typeof conditions === 'function') { callback = conditions; conditions = {}; projection = null; options = null; } // get the mongodb collection object var mq = new this.Query({}, {}, this, this.collection); mq.select(projection); mq.setOptions(options); if (this.schema.discriminatorMapping && this.schema.discriminatorMapping.isRoot && mq.selectedInclusively()) { mq.select(this.schema.options.discriminatorKey); } if (callback) { callback = this.$wrapCallback(callback); } return mq.findOne(conditions, callback); }; /** * Estimates the number of documents in the MongoDB collection. Faster than * using `countDocuments()` for large collections because * `estimatedDocumentCount()` uses collection metadata rather than scanning * the entire collection. * * ####Example: * * const numAdventures = Adventure.estimatedDocumentCount(); * * @param {Object} [options] * @param {Function} [callback] * @return {Query} * @api public */ Model.estimatedDocumentCount = function estimatedDocumentCount(options, callback) { // get the mongodb collection object const mq = new this.Query({}, {}, this, this.collection); callback = this.$wrapCallback(callback); return mq.estimatedDocumentCount(options, callback); }; /** * Counts number of matching documents in a database collection. * * ####Example: * * Adventure.countDocuments({ type: 'jungle' }, function (err, count) { * if (err) .. * console.log('there are %d jungle adventures', count); * }); * * @param {Object} conditions * @param {Function} [callback] * @return {Query} * @api public */ Model.countDocuments = function countDocuments(conditions, callback) { if (typeof conditions === 'function') { callback = conditions; conditions = {}; } // get the mongodb collection object const mq = new this.Query({}, {}, this, this.collection); callback = this.$wrapCallback(callback); return mq.countDocuments(conditions, callback); }; /** * Counts number of matching documents in a database collection. This method * is deprecated, use `countDocuments()` instead. * * ####Example: * * Adventure.count({ type: 'jungle' }, function (err, count) { * if (err) .. * console.log('there are %d jungle adventures', count); * }); * * @deprecated * @param {Object} conditions * @param {Function} [callback] * @return {Query} * @api public */ Model.count = function count(conditions, callback) { if (typeof conditions === 'function') { callback = conditions; conditions = {}; } // get the mongodb collection object var mq = new this.Query({}, {}, this, this.collection); if (callback) { callback = this.$wrapCallback(callback); } return mq.count(conditions, callback); }; /** * Creates a Query for a `distinct` operation. * * Passing a `callback` immediately executes the query. * * ####Example * * Link.distinct('url', { clicks: {$gt: 100}}, function (err, result) { * if (err) return handleError(err); * * assert(Array.isArray(result)); * console.log('unique urls with more than 100 clicks', result); * }) * * var query = Link.distinct('url'); * query.exec(callback); * * @param {String} field * @param {Object} [conditions] optional * @param {Function} [callback] * @return {Query} * @api public */ Model.distinct = function distinct(field, conditions, callback) { // get the mongodb collection object var mq = new this.Query({}, {}, this, this.collection); if (typeof conditions === 'function') { callback = conditions; conditions = {}; } if (callback) { callback = this.$wrapCallback(callback); } return mq.distinct(field, conditions, callback); }; /** * Creates a Query, applies the passed conditions, and returns the Query. * * For example, instead of writing: * * User.find({age: {$gte: 21, $lte: 65}}, callback); * * we can instead write: * * User.where('age').gte(21).lte(65).exec(callback); * * Since the Query class also supports `where` you can continue chaining * * User * .where('age').gte(21).lte(65) * .where('name', /^b/i) * ... etc * * @param {String} path * @param {Object} [val] optional value * @return {Query} * @api public */ Model.where = function where(path, val) { void val; // eslint // get the mongodb collection object var mq = new this.Query({}, {}, this, this.collection).find({}); return mq.where.apply(mq, arguments); }; /** * Creates a `Query` and specifies a `$where` condition. * * Sometimes you need to query for things in mongodb using a JavaScript expression. You can do so via `find({ $where: javascript })`, or you can use the mongoose shortcut method $where via a Query chain or from your mongoose Model. * * Blog.$where('this.username.indexOf("val") !== -1').exec(function (err, docs) {}); * * @param {String|Function} argument is a javascript string or anonymous function * @method $where * @memberOf Model * @return {Query} * @see Query.$where #query_Query-%24where * @api public */ Model.$where = function $where() { var mq = new this.Query({}, {}, this, this.collection).find({}); return mq.$where.apply(mq, arguments); }; /** * Issues a mongodb findAndModify update command. * * Finds a matching document, updates it according to the `update` arg, passing any `options`, and returns the found document (if any) to the callback. The query executes immediately if `callback` is passed else a Query object is returned. * * ####Options: * * - `new`: bool - if true, return the modified document rather than the original. defaults to false (changed in 4.0) * - `upsert`: bool - creates the object if it doesn't exist. defaults to false. * - `fields`: {Object|String} - Field selection. Equivalent to `.select(fields).findOneAndUpdate()` * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndUpdate(conditions, update, options, callback) // executes * A.findOneAndUpdate(conditions, update, options) // returns Query * A.findOneAndUpdate(conditions, update, callback) // executes * A.findOneAndUpdate(conditions, update) // returns Query * A.findOneAndUpdate() // returns Query * * ####Note: * * All top level update keys which are not `atomic` operation names are treated as set operations: * * ####Example: * * var query = { name: 'borne' }; * Model.findOneAndUpdate(query, { name: 'jason bourne' }, options, callback) * * // is sent as * Model.findOneAndUpdate(query, { $set: { name: 'jason bourne' }}, options, callback) * * This helps prevent accidentally overwriting your document with `{ name: 'jason bourne' }`. * * ####Note: * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object} [conditions] * @param {Object} [update] * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean). * @param {Function} [callback] * @return {Query} * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command * @api public */ Model.findOneAndUpdate = function(conditions, update, options, callback) { if (typeof options === 'function') { callback = options; options = null; } else if (arguments.length === 1) { if (typeof conditions === 'function') { var msg = 'Model.findOneAndUpdate(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options, callback)\n' + ' ' + this.modelName + '.findOneAndUpdate(conditions, update, options)\n' + ' ' + this.modelName + '.findOneAndUpdate(conditions, update)\n' + ' ' + this.modelName + '.findOneAndUpdate(update)\n' + ' ' + this.modelName + '.findOneAndUpdate()\n'; throw new TypeError(msg); } update = conditions; conditions = undefined; } if (callback) { callback = this.$wrapCallback(callback); } var fields; if (options && options.fields) { fields = options.fields; } var retainKeyOrder = get(options, 'retainKeyOrder') || get(this, 'schema.options.retainKeyOrder') || false; update = utils.clone(update, { depopulate: true, _isNested: true, retainKeyOrder: retainKeyOrder }); if (this.schema.options.versionKey && options && options.upsert) { var updatedPaths = modifiedPaths(update); if (!updatedPaths[this.schema.options.versionKey]) { if (options.overwrite) { update[this.schema.options.versionKey] = 0; } else { if (!update.$setOnInsert) { update.$setOnInsert = {}; } update.$setOnInsert[this.schema.options.versionKey] = 0; } } } var mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndUpdate(conditions, update, options, callback); }; /** * Issues a mongodb findAndModify update command by a document's _id field. * `findByIdAndUpdate(id, ...)` is equivalent to `findOneAndUpdate({ _id: id }, ...)`. * * Finds a matching document, updates it according to the `update` arg, * passing any `options`, and returns the found document (if any) to the * callback. The query executes immediately if `callback` is passed else a * Query object is returned. * * This function triggers the following middleware. * * - `findOneAndUpdate()` * * ####Options: * * - `new`: bool - true to return the modified document rather than the original. defaults to false * - `upsert`: bool - creates the object if it doesn't exist. defaults to false. * - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `select`: sets the document fields to return * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findByIdAndUpdate(id, update, options, callback) // executes * A.findByIdAndUpdate(id, update, options) // returns Query * A.findByIdAndUpdate(id, update, callback) // executes * A.findByIdAndUpdate(id, update) // returns Query * A.findByIdAndUpdate() // returns Query * * ####Note: * * All top level update keys which are not `atomic` operation names are treated as set operations: * * ####Example: * * Model.findByIdAndUpdate(id, { name: 'jason bourne' }, options, callback) * * // is sent as * Model.findByIdAndUpdate(id, { $set: { name: 'jason bourne' }}, options, callback) * * This helps prevent accidentally overwriting your document with `{ name: 'jason bourne' }`. * * ####Note: * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object|Number|String} id value of `_id` to query by * @param {Object} [update] * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean). * @param {Function} [callback] * @return {Query} * @see Model.findOneAndUpdate #model_Model.findOneAndUpdate * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command * @api public */ Model.findByIdAndUpdate = function(id, update, options, callback) { if (callback) { callback = this.$wrapCallback(callback); } if (arguments.length === 1) { if (typeof id === 'function') { var msg = 'Model.findByIdAndUpdate(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findByIdAndUpdate(id, callback)\n' + ' ' + this.modelName + '.findByIdAndUpdate(id)\n' + ' ' + this.modelName + '.findByIdAndUpdate()\n'; throw new TypeError(msg); } return this.findOneAndUpdate({_id: id}, undefined); } // if a model is passed in instead of an id if (id instanceof Document) { id = id._id; } return this.findOneAndUpdate.call(this, {_id: id}, update, options, callback); }; /** * Issue a MongoDB `findOneAndDelete()` command. * * Finds a matching document, removes it, and passes the found document * (if any) to the callback. * * Executes immediately if `callback` is passed else a Query object is returned. * * This function triggers the following middleware. * * - `findOneAndDelete()` * * This function differs slightly from `Model.findOneAndRemove()` in that * `findOneAndRemove()` becomes a [MongoDB `findAndModify()` command](https://docs.mongodb.com/manual/reference/method/db.collection.findAndModify/), * as opposed to a `findOneAndDelete()` command. For most mongoose use cases, * this distinction is purely pedantic. You should use `findOneAndDelete()` * unless you have a good reason not to. * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `select`: sets the document fields to return * - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }` * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndDelete(conditions, options, callback) // executes * A.findOneAndDelete(conditions, options) // return Query * A.findOneAndDelete(conditions, callback) // executes * A.findOneAndDelete(conditions) // returns Query * A.findOneAndDelete() // returns Query * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object} conditions * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.findOneAndDelete = function(conditions, options, callback) { if (arguments.length === 1 && typeof conditions === 'function') { var msg = 'Model.findOneAndDelete(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndDelete(conditions, callback)\n' + ' ' + this.modelName + '.findOneAndDelete(conditions)\n' + ' ' + this.modelName + '.findOneAndDelete()\n'; throw new TypeError(msg); } if (typeof options === 'function') { callback = options; options = undefined; } if (callback) { callback = this.$wrapCallback(callback); } var fields; if (options) { fields = options.select; options.select = undefined; } var mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndDelete(conditions, options, callback); }; /** * Issue a MongoDB `findOneAndDelete()` command by a document's _id field. * In other words, `findByIdAndDelete(id)` is a shorthand for * `findOneAndDelete({ _id: id })`. * * This function triggers the following middleware. * * - `findOneAndDelete()` * * @param {Object|Number|String} id value of `_id` to query by * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see Model.findOneAndRemove #model_Model.findOneAndRemove * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command */ Model.findByIdAndDelete = function(id, options, callback) { if (arguments.length === 1 && typeof id === 'function') { var msg = 'Model.findByIdAndDelete(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findByIdAndDelete(id, callback)\n' + ' ' + this.modelName + '.findByIdAndDelete(id)\n' + ' ' + this.modelName + '.findByIdAndDelete()\n'; throw new TypeError(msg); } if (callback) { callback = this.$wrapCallback(callback); } return this.findOneAndDelete({_id: id}, options, callback); }; /** * Issue a mongodb findAndModify remove command. * * Finds a matching document, removes it, passing the found document (if any) to the callback. * * Executes immediately if `callback` is passed else a Query object is returned. * * This function triggers the following middleware. * * - `findOneAndRemove()` * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `maxTimeMS`: puts a time limit on the query - requires mongodb >= 2.6.0 * - `select`: sets the document fields to return * - `projection`: like select, it determines which fields to return, ex. `{ projection: { _id: 0 } }` * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findOneAndRemove(conditions, options, callback) // executes * A.findOneAndRemove(conditions, options) // return Query * A.findOneAndRemove(conditions, callback) // executes * A.findOneAndRemove(conditions) // returns Query * A.findOneAndRemove() // returns Query * * Values are cast to their appropriate types when using the findAndModify helpers. * However, the below are not executed by default. * * - defaults. Use the `setDefaultsOnInsert` option to override. * * `findAndModify` helpers support limited validation. You can * enable these by setting the `runValidators` options, * respectively. * * If you need full-fledged validation, use the traditional approach of first * retrieving the document. * * Model.findById(id, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }); * * @param {Object} conditions * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command * @api public */ Model.findOneAndRemove = function(conditions, options, callback) { if (arguments.length === 1 && typeof conditions === 'function') { var msg = 'Model.findOneAndRemove(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findOneAndRemove(conditions, callback)\n' + ' ' + this.modelName + '.findOneAndRemove(conditions)\n' + ' ' + this.modelName + '.findOneAndRemove()\n'; throw new TypeError(msg); } if (typeof options === 'function') { callback = options; options = undefined; } if (callback) { callback = this.$wrapCallback(callback); } var fields; if (options) { fields = options.select; options.select = undefined; } var mq = new this.Query({}, {}, this, this.collection); mq.select(fields); return mq.findOneAndRemove(conditions, options, callback); }; /** * Issue a mongodb findAndModify remove command by a document's _id field. `findByIdAndRemove(id, ...)` is equivalent to `findOneAndRemove({ _id: id }, ...)`. * * Finds a matching document, removes it, passing the found document (if any) to the callback. * * Executes immediately if `callback` is passed, else a `Query` object is returned. * * This function triggers the following middleware. * * - `findOneAndRemove()` * * ####Options: * * - `sort`: if multiple docs are found by the conditions, sets the sort order to choose which doc to update * - `select`: sets the document fields to return * - `rawResult`: if true, returns the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.0/api/Collection.html#findAndModify) * - `strict`: overwrites the schema's [strict mode option](http://mongoosejs.com/docs/guide.html#strict) for this update * * ####Examples: * * A.findByIdAndRemove(id, options, callback) // executes * A.findByIdAndRemove(id, options) // return Query * A.findByIdAndRemove(id, callback) // executes * A.findByIdAndRemove(id) // returns Query * A.findByIdAndRemove() // returns Query * * @param {Object|Number|String} id value of `_id` to query by * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @see Model.findOneAndRemove #model_Model.findOneAndRemove * @see mongodb http://www.mongodb.org/display/DOCS/findAndModify+Command */ Model.findByIdAndRemove = function(id, options, callback) { if (arguments.length === 1 && typeof id === 'function') { var msg = 'Model.findByIdAndRemove(): First argument must not be a function.\n\n' + ' ' + this.modelName + '.findByIdAndRemove(id, callback)\n' + ' ' + this.modelName + '.findByIdAndRemove(id)\n' + ' ' + this.modelName + '.findByIdAndRemove()\n'; throw new TypeError(msg); } if (callback) { callback = this.$wrapCallback(callback); } return this.findOneAndRemove({_id: id}, options, callback); }; /** * Shortcut for saving one or more documents to the database. * `MyModel.create(docs)` does `new MyModel(doc).save()` for every doc in * docs. * * This function triggers the following middleware. * * - `save()` * * ####Example: * * // pass a spread of docs and a callback * Candy.create({ type: 'jelly bean' }, { type: 'snickers' }, function (err, jellybean, snickers) { * if (err) // ... * }); * * // pass an array of docs * var array = [{ type: 'jelly bean' }, { type: 'snickers' }]; * Candy.create(array, function (err, candies) { * if (err) // ... * * var jellybean = candies[0]; * var snickers = candies[1]; * // ... * }); * * // callback is optional; use the returned promise if you like: * var promise = Candy.create({ type: 'jawbreaker' }); * promise.then(function (jawbreaker) { * // ... * }) * * @param {Array|Object} docs Documents to insert, as a spread or array * @param {Object} [options] Options passed down to `save()`. To specify `options`, `docs` **must** be an array, not a spread. * @param {Function} [callback] callback * @return {Promise} * @api public */ Model.create = function create(doc, options, callback) { let args; let cb; const discriminatorKey = this.schema.options.discriminatorKey; if (Array.isArray(doc)) { args = doc; cb = typeof options === 'function' ? options : callback; options = options != null && typeof options === 'object' ? options : {}; } else { let last = arguments[arguments.length - 1]; options = {}; // Handle falsy callbacks re: #5061 if (typeof last === 'function' || !last) { cb = last; args = utils.args(arguments, 0, arguments.length - 1); } else { args = utils.args(arguments); } } if (cb) { cb = this.$wrapCallback(cb); } return utils.promiseOrCallback(cb, cb => { if (args.length === 0) { return cb(null); } const toExecute = []; let firstError; args.forEach(doc => { toExecute.push(callback => { const Model = this.discriminators && doc[discriminatorKey] != null ? this.discriminators[doc[discriminatorKey]] || getDiscriminatorByValue(this, doc[discriminatorKey]) : this; if (Model == null) { throw new Error(`Discriminator "${doc[discriminatorKey]}" not ` + `found for model "${this.modelName}"`); } let toSave = doc; const callbackWrapper = (error, doc) => { if (error) { if (!firstError) { firstError = error; } return callback(null, { error: error }); } callback(null, { doc: doc }); }; if (!(toSave instanceof Model)) { try { toSave = new Model(toSave); } catch (error) { return callbackWrapper(error); } } toSave.save(options, callbackWrapper); }); }); parallel(toExecute, (error, res) => { const savedDocs = []; const len = res.length; for (let i = 0; i < len; ++i) { if (res[i].doc) { savedDocs.push(res[i].doc); } } if (firstError) { return cb(firstError, savedDocs); } if (doc instanceof Array) { cb(null, savedDocs); } else { cb.apply(this, [null].concat(savedDocs)); } }); }); }; /** * _Requires a replica set running MongoDB >= 3.6.0._ Watches the * underlying collection for changes using * [MongoDB change streams](https://docs.mongodb.com/manual/changeStreams/). * * This function does **not** trigger any middleware. In particular, it * does **not** trigger aggregate middleware. * * ####Example: * * const doc = await Person.create({ name: 'Ned Stark' }); * Person.watch().on('change', change => console.log(change)); * // Will print from the above `console.log()`: * // { _id: { _data: ... }, * // operationType: 'delete', * // ns: { db: 'mydb', coll: 'Person' }, * // documentKey: { _id: 5a51b125c5500f5aa094c7bd } } * await doc.remove(); * * @param {Array} [pipeline] * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html#watch) * @return {ChangeStream} mongoose-specific change stream wrapper, inherits from EventEmitter * @api public */ Model.watch = function(pipeline, options) { return new ChangeStream(this, pipeline, options); }; /** * _Requires MongoDB >= 3.6.0._ Starts a [MongoDB session](https://docs.mongodb.com/manual/release-notes/3.6/#client-sessions) * for benefits like causal consistency, [retryable writes](https://docs.mongodb.com/manual/core/retryable-writes/), * and [transactions](http://thecodebarbarian.com/a-node-js-perspective-on-mongodb-4-transactions.html). * * Calling `MyModel.startSession()` is equivalent to calling `MyModel.db.startSession()`. * * This function does not trigger any middleware. * * ####Example: * * const session = await Person.startSession(); * let doc = await Person.findOne({ name: 'Ned Stark' }, null, { session }); * await doc.remove(); * // `doc` will always be null, even if reading from a replica set * // secondary. Without causal consistency, it is possible to * // get a doc back from the below query if the query reads from a * // secondary that is experiencing replication lag. * doc = await Person.findOne({ name: 'Ned Stark' }, null, { session, readPreference: 'secondary' }); * * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/3.0/api/MongoClient.html#startSession) * @param {Boolean} [options.causalConsistency=true] set to false to disable causal consistency * @param {Function} [callback] * @return {Promise<ClientSession>} promise that resolves to a MongoDB driver `ClientSession` * @api public */ Model.startSession = function() { return this.db.startSession.apply(this.db, arguments); }; /** * Shortcut for validating an array of documents and inserting them into * MongoDB if they're all valid. This function is faster than `.create()` * because it only sends one operation to the server, rather than one for each * document. * * Mongoose always validates each document **before** sending `insertMany` * to MongoDB. So if one document has a validation error, no documents will * be saved, unless you set * [the `ordered` option to false](https://docs.mongodb.com/manual/reference/method/db.collection.insertMany/#error-handling). * * This function does **not** trigger save middleware. * * This function triggers the following middleware. * * - `insertMany()` * * ####Example: * * var arr = [{ name: 'Star Wars' }, { name: 'The Empire Strikes Back' }]; * Movies.insertMany(arr, function(error, docs) {}); * * @param {Array|Object|*} doc(s) * @param {Object} [options] see the [mongodb driver options](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#insertMany) * @param {Boolean} [options.ordered = true] if true, will fail fast on the first error encountered. If false, will insert all the documents it can and report errors later. An `insertMany()` with `ordered = false` is called an "unordered" `insertMany()`. * @param {Boolean} [options.rawResult = false] if false, the returned promise resolves to the documents that passed mongoose document validation. If `false`, will return the [raw result from the MongoDB driver](http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~insertWriteOpCallback) with a `mongoose` property that contains `validationErrors` if this is an unordered `insertMany`. * @param {Function} [callback] callback * @return {Promise} * @api public */ Model.insertMany = function(arr, options, callback) { if (typeof options === 'function') { callback = options; options = null; } return utils.promiseOrCallback(callback, cb => { this.$__insertMany(arr, options, cb); }); }; /*! * ignore */ Model.$__insertMany = function(arr, options, callback) { var _this = this; if (typeof options === 'function') { callback = options; options = null; } if (callback) { callback = this.$wrapCallback(callback); } callback = callback || utils.noop; options = options || {}; var limit = get(options, 'limit', 1000); var rawResult = get(options, 'rawResult', false); var ordered = get(options, 'ordered', true); if (!Array.isArray(arr)) { arr = [arr]; } var toExecute = []; var validationErrors = []; arr.forEach(function(doc) { toExecute.push(function(callback) { if (!(doc instanceof _this)) { doc = new _this(doc); } doc.validate({ __noPromise: true }, function(error) { if (error) { // Option `ordered` signals that insert should be continued after reaching // a failing insert. Therefore we delegate "null", meaning the validation // failed. It's up to the next function to filter out all failed models if (ordered === false) { validationErrors.push(error); return callback(null, null); } return callback(error); } callback(null, doc); }); }); }); parallelLimit(toExecute, limit, function(error, docs) { if (error) { callback(error, null); return; } // We filter all failed pre-validations by removing nulls var docAttributes = docs.filter(function(doc) { return doc != null; }); // Quickly escape while there aren't any valid docAttributes if (docAttributes.length < 1) { callback(null, []); return; } var docObjects = docAttributes.map(function(doc) { if (doc.schema.options.versionKey) { doc[doc.schema.options.versionKey] = 0; } if (doc.initializeTimestamps) { return doc.initializeTimestamps().toObject(internalToObjectOptions); } return doc.toObject(internalToObjectOptions); }); _this.collection.insertMany(docObjects, options, function(error, res) { if (error) { callback(error, null); return; } for (var i = 0; i < docAttributes.length; ++i) { docAttributes[i].isNew = false; docAttributes[i].emit('isNew', false); docAttributes[i].constructor.emit('isNew', false); } if (rawResult) { if (ordered === false) { // Decorate with mongoose validation errors in case of unordered, // because then still do `insertMany()` res.mongoose = { validationErrors: validationErrors }; } return callback(null, res); } callback(null, docAttributes); }); }); }; /** * Sends multiple `insertOne`, `updateOne`, `updateMany`, `replaceOne`, * `deleteOne`, and/or `deleteMany` operations to the MongoDB server in one * command. This is faster than sending multiple independent operations (like) * if you use `create()`) because with `bulkWrite()` there is only one round * trip to MongoDB. * * Mongoose will perform casting on all operations you provide. * * This function does **not** trigger any middleware, not `save()` nor `update()`. * If you need to trigger * `save()` middleware for every document use [`create()`](http://mongoosejs.com/docs/api.html#model_Model.create) instead. * * ####Example: * * Character.bulkWrite([ * { * insertOne: { * document: { * name: 'Eddard Stark', * title: 'Warden of the North' * } * } * }, * { * updateOne: { * filter: { name: 'Eddard Stark' }, * // If you were using the MongoDB driver directly, you'd need to do * // `update: { $set: { title: ... } }` but mongoose adds $set for * // you. * update: { title: 'Hand of the King' } * } * }, * { * deleteOne: { * { * filter: { name: 'Eddard Stark' } * } * } * } * ]).then(handleResult); * * @param {Array} ops * @param {Object} [options] * @param {Function} [callback] callback `function(error, bulkWriteOpResult) {}` * @return {Promise} resolves to a `BulkWriteOpResult` if the operation succeeds * @see writeOpResult http://mongodb.github.io/node-mongodb-native/2.2/api/Collection.html#~BulkWriteOpResult * @api public */ Model.bulkWrite = function(ops, options, callback) { if (typeof options === 'function') { callback = options; options = null; } if (callback) { callback = this.$wrapCallback(callback); } options = options || {}; var validations = ops.map((op) => { if (op['insertOne']) { return (callback) => { op['insertOne']['document'] = new this(op['insertOne']['document']); op['insertOne']['document'].validate({ __noPromise: true }, function(error) { if (error) { return callback(error, null); } callback(null); }); }; } else if (op['updateOne']) { op = op['updateOne']; return (callback) => { try { op['filter'] = cast(this.schema, op['filter']); op['update'] = castUpdate(this.schema, op['update'], this.schema.options.strict); if (op.setDefaultsOnInsert) { setDefaultsOnInsert(op['filter'], this.schema, op['update'], { setDefaultsOnInsert: true, upsert: op.upsert }); } } catch (error) { return callback(error, null); } callback(null); }; } else if (op['updateMany']) { op = op['updateMany']; return (callback) => { try { op['filter'] = cast(this.schema, op['filter']); op['update'] = castUpdate(this.schema, op['update'], { strict: this.schema.options.strict, overwrite: false }); if (op.setDefaultsOnInsert) { setDefaultsOnInsert(op['filter'], this.schema, op['update'], { setDefaultsOnInsert: true, upsert: op.upsert }); } } catch (error) { return callback(error, null); } callback(null); }; } else if (op['replaceOne']) { return (callback) => { try { op['replaceOne']['filter'] = cast(this.schema, op['replaceOne']['filter']); } catch (error) { return callback(error, null); } // set `skipId`, otherwise we get "_id field cannot be changed" op['replaceOne']['replacement'] = new this(op['replaceOne']['replacement'], null, true); op['replaceOne']['replacement'].validate({ __noPromise: true }, function(error) { if (error) { return callback(error, null); } callback(null); }); }; } else if (op['deleteOne']) { return (callback) => { try { op['deleteOne']['filter'] = cast(this.schema, op['deleteOne']['filter']); } catch (error) { return callback(error, null); } callback(null); }; } else if (op['deleteMany']) { return (callback) => { try { op['deleteMany']['filter'] = cast(this.schema, op['deleteMany']['filter']); } catch (error) { return callback(error, null); } callback(null); }; } else { return (callback) => { callback(new Error('Invalid op passed to `bulkWrite()`'), null); }; } }); return utils.promiseOrCallback(callback, cb => { parallel(validations, error => { if (error) { return cb(error); } this.collection.bulkWrite(ops, options, (error, res) => { if (error) { return cb(error); } cb(null, res); }); }); }); }; /** * Shortcut for creating a new Document from existing raw data, pre-saved in the DB. * The document returned has no paths marked as modified initially. * * ####Example: * * // hydrate previous data into a Mongoose document * var mongooseCandy = Candy.hydrate({ _id: '54108337212ffb6d459f854c', type: 'jelly bean' }); * * @param {Object} obj * @return {Model} document instance * @api public */ Model.hydrate = function(obj) { var model = require('./queryhelpers').createModel(this, obj); model.init(obj); return model; }; /** * Updates one document in the database without returning it. * * This function triggers the following middleware. * * - `update()` * * ####Examples: * * MyModel.update({ age: { $gt: 18 } }, { oldEnough: true }, fn); * MyModel.update({ name: 'Tobi' }, { ferret: true }, { multi: true }, function (err, raw) { * if (err) return handleError(err); * console.log('The raw response from Mongo was ', raw); * }); * * ####Valid options: * * - `safe` (boolean) safe mode (defaults to value set in schema (true)) * - `upsert` (boolean) whether to create the doc if it doesn't match (false) * - `multi` (boolean) whether multiple documents should be updated (false) * - `runValidators`: if true, runs [update validators](/docs/validation.html#update-validators) on this command. Update validators validate the update operation against the model's schema. * - `setDefaultsOnInsert`: if this and `upsert` are true, mongoose will apply the [defaults](http://mongoosejs.com/docs/defaults.html) specified in the model's schema if a new document is created. This option only works on MongoDB >= 2.4 because it relies on [MongoDB's `$setOnInsert` operator](https://docs.mongodb.org/v2.4/reference/operator/update/setOnInsert/). * - `strict` (boolean) overrides the `strict` option for this update * - `overwrite` (boolean) disables update-only mode, allowing you to overwrite the doc (false) * * All `update` values are cast to their appropriate SchemaTypes before being sent. * * The `callback` function receives `(err, rawResponse)`. * * - `err` is the error if any occurred * - `rawResponse` is the full response from Mongo * * ####Note: * * All top level keys which are not `atomic` operation names are treated as set operations: * * ####Example: * * var query = { name: 'borne' }; * Model.update(query, { name: 'jason bourne' }, options, callback) * * // is sent as * Model.update(query, { $set: { name: 'jason bourne' }}, options, callback) * // if overwrite option is false. If overwrite is true, sent without the $set wrapper. * * This helps prevent accidentally overwriting all documents in your collection with `{ name: 'jason bourne' }`. * * ####Note: * * Be careful to not use an existing model instance for the update clause (this won't work and can cause weird behavior like infinite loops). Also, ensure that the update clause does not have an _id property, which causes Mongo to return a "Mod on _id not allowed" error. * * ####Note: * * To update documents without waiting for a response from MongoDB, do not pass a `callback`, then call `exec` on the returned [Query](#query-js): * * Comment.update({ _id: id }, { $set: { text: 'changed' }}).exec(); * * ####Note: * * Although values are casted to their appropriate types when using update, the following are *not* applied: * * - defaults * - setters * - validators * - middleware * * If you need those features, use the traditional approach of first retrieving the document. * * Model.findOne({ name: 'borne' }, function (err, doc) { * if (err) .. * doc.name = 'jason bourne'; * doc.save(callback); * }) * * @see strict http://mongoosejs.com/docs/guide.html#strict * @see response http://docs.mongodb.org/v2.6/reference/command/update/#output * @param {Object} conditions * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.update = function update(conditions, doc, options, callback) { return _update(this, 'update', conditions, doc, options, callback); }; /** * Same as `update()`, except MongoDB will update _all_ documents that match * `criteria` (as opposed to just the first one) regardless of the value of * the `multi` option. * * **Note** updateMany will _not_ fire update middleware. Use `pre('updateMany')` * and `post('updateMany')` instead. * * This function triggers the following middleware. * * - `updateMany()` * * @param {Object} conditions * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.updateMany = function updateMany(conditions, doc, options, callback) { return _update(this, 'updateMany', conditions, doc, options, callback); }; /** * Same as `update()`, except MongoDB will update _only_ the first document that * matches `criteria` regardless of the value of the `multi` option. * * This function triggers the following middleware. * * - `updateOne()` * * @param {Object} conditions * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.updateOne = function updateOne(conditions, doc, options, callback) { return _update(this, 'updateOne', conditions, doc, options, callback); }; /** * Same as `update()`, except MongoDB replace the existing document with the * given document (no atomic operators like `$set`). * * This function triggers the following middleware. * * - `replaceOne()` * * @param {Object} conditions * @param {Object} doc * @param {Object} [options] optional see [`Query.prototype.setOptions()`](http://mongoosejs.com/docs/api.html#query_Query-setOptions) * @param {Function} [callback] * @return {Query} * @api public */ Model.replaceOne = function replaceOne(conditions, doc, options, callback) { return _update(this, 'replaceOne', conditions, doc, options, callback); }; /*! * ignore */ function _update(model, op, conditions, doc, options, callback) { var mq = new model.Query({}, {}, model, model.collection); if (callback) { callback = model.$wrapCallback(callback); } // gh-2406 // make local deep copy of conditions if (conditions instanceof Document) { conditions = conditions.toObject(); } else { conditions = utils.clone(conditions); } options = typeof options === 'function' ? options : utils.clone(options); var versionKey = get(model, 'schema.options.versionKey', null); if (versionKey && options && options.upsert) { if (options.overwrite) { doc[versionKey] = 0; } else { if (!doc.$setOnInsert) { doc.$setOnInsert = {}; } doc.$setOnInsert[versionKey] = 0; } } return mq[op](conditions, doc, options, callback); } /** * Executes a mapReduce command. * * `o` is an object specifying all mapReduce options as well as the map and reduce functions. All options are delegated to the driver implementation. See [node-mongodb-native mapReduce() documentation](http://mongodb.github.io/node-mongodb-native/api-generated/collection.html#mapreduce) for more detail about options. * * This function does not trigger any middleware. * * ####Example: * * var o = {}; * // `map()` and `reduce()` are run on the MongoDB server, not Node.js, * // these functions are converted to strings * o.map = function () { emit(this.name, 1) }; * o.reduce = function (k, vals) { return vals.length }; * User.mapReduce(o, function (err, results) { * console.log(results) * }) * * ####Other options: * * - `query` {Object} query filter object. * - `sort` {Object} sort input objects using this key * - `limit` {Number} max number of documents * - `keeptemp` {Boolean, default:false} keep temporary data * - `finalize` {Function} finalize function * - `scope` {Object} scope variables exposed to map/reduce/finalize during execution * - `jsMode` {Boolean, default:false} it is possible to make the execution stay in JS. Provided in MongoDB > 2.0.X * - `verbose` {Boolean, default:false} provide statistics on job execution time. * - `readPreference` {String} * - `out*` {Object, default: {inline:1}} sets the output target for the map reduce job. * * ####* out options: * * - `{inline:1}` the results are returned in an array * - `{replace: 'collectionName'}` add the results to collectionName: the results replace the collection * - `{reduce: 'collectionName'}` add the results to collectionName: if dups are detected, uses the reducer / finalize functions * - `{merge: 'collectionName'}` add the results to collectionName: if dups exist the new docs overwrite the old * * If `options.out` is set to `replace`, `merge`, or `reduce`, a Model instance is returned that can be used for further querying. Queries run against this model are all executed with the `lean` option; meaning only the js object is returned and no Mongoose magic is applied (getters, setters, etc). * * ####Example: * * var o = {}; * // You can also define `map()` and `reduce()` as strings if your * // linter complains about `emit()` not being defined * o.map = 'function () { emit(this.name, 1) }'; * o.reduce = 'function (k, vals) { return vals.length }'; * o.out = { replace: 'createdCollectionNameForResults' } * o.verbose = true; * * User.mapReduce(o, function (err, model, stats) { * console.log('map reduce took %d ms', stats.processtime) * model.find().where('value').gt(10).exec(function (err, docs) { * console.log(docs); * }); * }) * * // `mapReduce()` returns a promise. However, ES6 promises can only * // resolve to exactly one value, * o.resolveToObject = true; * var promise = User.mapReduce(o); * promise.then(function (res) { * var model = res.model; * var stats = res.stats; * console.log('map reduce took %d ms', stats.processtime) * return model.find().where('value').gt(10).exec(); * }).then(function (docs) { * console.log(docs); * }).then(null, handleError).end() * * @param {Object} o an object specifying map-reduce options * @param {Function} [callback] optional callback * @see http://www.mongodb.org/display/DOCS/MapReduce * @return {Promise} * @api public */ Model.mapReduce = function mapReduce(o, callback) { if (callback) { callback = this.$wrapCallback(callback); } return utils.promiseOrCallback(callback, cb => { if (!Model.mapReduce.schema) { var opts = {noId: true, noVirtualId: true, strict: false}; Model.mapReduce.schema = new Schema({}, opts); } if (!o.out) o.out = {inline: 1}; if (o.verbose !== false) o.verbose = true; o.map = String(o.map); o.reduce = String(o.reduce); if (o.query) { var q = new this.Query(o.query); q.cast(this); o.query = q._conditions; q = undefined; } this.collection.mapReduce(null, null, o, (err, res) => { if (err) { return cb(err); } if (res.collection) { // returned a collection, convert to Model var model = Model.compile('_mapreduce_' + res.collection.collectionName, Model.mapReduce.schema, res.collection.collectionName, this.db, this.base); model._mapreduce = true; res.model = model; return cb(null, res); } cb(null, res); }); }); }; /** * Performs [aggregations](http://docs.mongodb.org/manual/applications/aggregation/) on the models collection. * * If a `callback` is passed, the `aggregate` is executed and a `Promise` is returned. If a callback is not passed, the `aggregate` itself is returned. * * This function does not trigger any middleware. * * ####Example: * * // Find the max balance of all accounts * Users.aggregate([ * { $group: { _id: null, maxBalance: { $max: '$balance' }}}, * { $project: { _id: 0, maxBalance: 1 }} * ]). * then(function (res) { * console.log(res); // [ { maxBalance: 98000 } ] * }); * * // Or use the aggregation pipeline builder. * Users.aggregate(). * group({ _id: null, maxBalance: { $max: '$balance' } }). * project('-id maxBalance'). * exec(function (err, res) { * if (err) return handleError(err); * console.log(res); // [ { maxBalance: 98 } ] * }); * * ####NOTE: * * - Arguments are not cast to the model's schema because `$project` operators allow redefining the "shape" of the documents at any stage of the pipeline, which may leave documents in an incompatible format. * - The documents returned are plain javascript objects, not mongoose documents (since any shape of document can be returned). * - Requires MongoDB >= 2.1 * * @see Aggregate #aggregate_Aggregate * @see MongoDB http://docs.mongodb.org/manual/applications/aggregation/ * @param {Array} [pipeline] aggregation pipeline as an array of objects * @param {Function} [callback] * @return {Aggregate} * @api public */ Model.aggregate = function aggregate(pipeline, callback) { if (arguments.length > 2) { throw new Error('Mongoose 5.x disallows passing a spread of operators ' + 'to `Model.aggregate()`. Instead of ' + '`Model.aggregate({ $match }, { $skip })`, do ' + '`Model.aggregate([{ $match }, { $skip }])`'); } if (typeof pipeline === 'function') { callback = pipeline; pipeline = []; } var aggregate = new Aggregate(pipeline || []); aggregate.model(this); if (typeof callback === 'undefined') { return aggregate; } if (callback) { callback = this.$wrapCallback(callback); } aggregate.exec(callback); return aggregate; }; /** * Implements `$geoSearch` functionality for Mongoose * * This function does not trigger any middleware * * ####Example: * * var options = { near: [10, 10], maxDistance: 5 }; * Locations.geoSearch({ type : "house" }, options, function(err, res) { * console.log(res); * }); * * ####Options: * - `near` {Array} x,y point to search for * - `maxDistance` {Number} the maximum distance from the point near that a result can be * - `limit` {Number} The maximum number of results to return * - `lean` {Boolean} return the raw object instead of the Mongoose Model * * @param {Object} conditions an object that specifies the match condition (required) * @param {Object} options for the geoSearch, some (near, maxDistance) are required * @param {Object} [options.lean] if truthy, mongoose will return the document as a plain JavaScript object rather than a mongoose document. See [`Query.lean()`](http://mongoosejs.com/docs/api.html#query_Query-lean). * @param {Function} [callback] optional callback * @return {Promise} * @see http://docs.mongodb.org/manual/reference/command/geoSearch/ * @see http://docs.mongodb.org/manual/core/geohaystack/ * @api public */ Model.geoSearch = function(conditions, options, callback) { if (typeof options === 'function') { callback = options; options = {}; } if (callback) { callback = this.$wrapCallback(callback); } return utils.promiseOrCallback(callback, cb => { var error; if (conditions === undefined || !utils.isObject(conditions)) { error = new Error('Must pass conditions to geoSearch'); } else if (!options.near) { error = new Error('Must specify the near option in geoSearch'); } else if (!Array.isArray(options.near)) { error = new Error('near option must be an array [x, y]'); } if (error) { return cb(error); } // send the conditions in the options object options.search = conditions; this.collection.geoHaystackSearch(options.near[0], options.near[1], options, (err, res) => { if (err) { return cb(err); } var count = res.results.length; if (options.lean || count === 0) { return cb(null, res.results); } var errSeen = false; function init(err) { if (err && !errSeen) { return cb(err); } if (!--count && !errSeen) { cb(null, res.results); } } for (var i = 0; i < res.results.length; ++i) { var temp = res.results[i]; res.results[i] = new this(); res.results[i].init(temp, {}, init); } }); }); }; /** * Populates document references. * * ####Available options: * * - path: space delimited path(s) to populate * - select: optional fields to select * - match: optional query conditions to match * - model: optional name of the model to use for population * - options: optional query options like sort, limit, etc * * ####Examples: * * // populates a single object * User.findById(id, function (err, user) { * var opts = [ * { path: 'company', match: { x: 1 }, select: 'name' } * , { path: 'notes', options: { limit: 10 }, model: 'override' } * ] * * User.populate(user, opts, function (err, user) { * console.log(user); * }); * }); * * // populates an array of objects * User.find(match, function (err, users) { * var opts = [{ path: 'company', match: { x: 1 }, select: 'name' }] * * var promise = User.populate(users, opts); * promise.then(console.log).end(); * }) * * // imagine a Weapon model exists with two saved documents: * // { _id: 389, name: 'whip' } * // { _id: 8921, name: 'boomerang' } * // and this schema: * // new Schema({ * // name: String, * // weapon: { type: ObjectId, ref: 'Weapon' } * // }); * * var user = { name: 'Indiana Jones', weapon: 389 } * Weapon.populate(user, { path: 'weapon', model: 'Weapon' }, function (err, user) { * console.log(user.weapon.name) // whip * }) * * // populate many plain objects * var users = [{ name: 'Indiana Jones', weapon: 389 }] * users.push({ name: 'Batman', weapon: 8921 }) * Weapon.populate(users, { path: 'weapon' }, function (err, users) { * users.forEach(function (user) { * console.log('%s uses a %s', users.name, user.weapon.name) * // Indiana Jones uses a whip * // Batman uses a boomerang * }); * }); * // Note that we didn't need to specify the Weapon model because * // it is in the schema's ref * * @param {Document|Array} docs Either a single document or array of documents to populate. * @param {Object} options A hash of key/val (path, options) used for population. * @param {Boolean} [options.retainNullValues=false] by default, mongoose removes null and undefined values from populated arrays. Use this option to make `populate()` retain `null` and `undefined` array entries. * @param {Function} [callback(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`. * @return {Promise} * @api public */ Model.populate = function(docs, paths, callback) { var _this = this; if (callback) { callback = this.$wrapCallback(callback); } // normalized paths paths = utils.populate(paths); // data that should persist across subPopulate calls var cache = {}; return utils.promiseOrCallback(callback, cb => { _populate(_this, docs, paths, cache, cb); }); }; /*! * Populate helper * * @param {Model} model the model to use * @param {Document|Array} docs Either a single document or array of documents to populate. * @param {Object} paths * @param {Function} [cb(err,doc)] Optional callback, executed upon completion. Receives `err` and the `doc(s)`. * @return {Function} * @api private */ function _populate(model, docs, paths, cache, callback) { var pending = paths.length; if (pending === 0) { return callback(null, docs); } // each path has its own query options and must be executed separately var i = pending; var path; while (i--) { path = paths[i]; populate(model, docs, path, next); } function next(err) { if (err) { return callback(err, null); } if (--pending) { return; } callback(null, docs); } } /*! * Populates `docs` */ const excludeIdReg = /\s?-_id\s?/; const excludeIdRegGlobal = /\s?-_id\s?/g; function populate(model, docs, options, callback) { // normalize single / multiple docs passed if (!Array.isArray(docs)) { docs = [docs]; } if (docs.length === 0 || docs.every(utils.isNullOrUndefined)) { return callback(); } const modelsMap = getModelsMapForPopulate(model, docs, options); if (modelsMap instanceof Error) { return utils.immediate(function() { callback(modelsMap); }); } const len = modelsMap.length; let mod; let match; let select; let vals = []; function flatten(item) { // no need to include undefined values in our query return undefined !== item; } let _remaining = len; let hasOne = false; for (let i = 0; i < len; ++i) { mod = modelsMap[i]; select = mod.options.select; if (mod.options.match) { match = utils.object.shallowCopy(mod.options.match); } else { match = {}; } let ids = utils.array.flatten(mod.ids, flatten); ids = utils.array.unique(ids); if (ids.length === 0 || ids.every(utils.isNullOrUndefined)) { --_remaining; continue; } hasOne = true; if (mod.foreignField !== '_id' || !match['_id']) { match[mod.foreignField] = { $in: ids }; } const assignmentOpts = {}; assignmentOpts.sort = get(mod, 'options.options.sort', void 0); assignmentOpts.excludeId = excludeIdReg.test(select) || (select && select._id === 0); if (assignmentOpts.excludeId) { // override the exclusion from the query so we can use the _id // for document matching during assignment. we'll delete the // _id back off before returning the result. if (typeof select === 'string') { select = select.replace(excludeIdRegGlobal, ' '); } else { // preserve original select conditions by copying select = utils.object.shallowCopy(select); delete select._id; } } if (mod.options.options && mod.options.options.limit) { assignmentOpts.originalLimit = mod.options.options.limit; mod.options.options.limit = mod.options.options.limit * ids.length; } const subPopulate = utils.clone(mod.options.populate); const query = mod.Model.find(match, select, mod.options.options); // If we're doing virtual populate and projection is inclusive and foreign // field is not selected, automatically select it because mongoose needs it. // If projection is exclusive and client explicitly unselected the foreign // field, that's the client's fault. if (mod.foreignField !== '_id' && query.selectedInclusively() && !isPathSelectedInclusive(query._fields, mod.foreignField)) { query.select(mod.foreignField); } // If we need to sub-populate, call populate recursively if (subPopulate) { query.populate(subPopulate); } query.exec(next.bind(this, mod, assignmentOpts)); } if (!hasOne) { return callback(); } function next(options, assignmentOpts, err, valsFromDb) { if (mod.options.options && mod.options.options.limit) { mod.options.options.limit = assignmentOpts.originalLimit; } if (err) return callback(err, null); vals = vals.concat(valsFromDb); _assign(null, vals, options, assignmentOpts); if (--_remaining === 0) { callback(); } } function _assign(err, vals, mod, assignmentOpts) { if (err) return callback(err, null); var options = mod.options; var isVirtual = mod.isVirtual; var justOne = mod.justOne; var _val; var lean = options.options && options.options.lean; var len = vals.length; var rawOrder = {}; var rawDocs = {}; var key; var val; // Clone because `assignRawDocsToIdStructure` will mutate the array var allIds = utils.clone(mod.allIds); // optimization: // record the document positions as returned by // the query result. for (var i = 0; i < len; i++) { val = vals[i]; if (val) { _val = utils.getValue(mod.foreignField, val); if (Array.isArray(_val)) { var _valLength = _val.length; for (var j = 0; j < _valLength; ++j) { var __val = _val[j]; if (__val instanceof Document) { __val = __val._id; } key = String(__val); if (rawDocs[key]) { if (Array.isArray(rawDocs[key])) { rawDocs[key].push(val); rawOrder[key].push(i); } else { rawDocs[key] = [rawDocs[key], val]; rawOrder[key] = [rawOrder[key], i]; } } else { if (isVirtual && !justOne) { rawDocs[key] = [val]; rawOrder[key] = [i]; } else { rawDocs[key] = val; rawOrder[key] = i; } } } } else { if (_val instanceof Document) { _val = _val._id; } key = String(_val); if (rawDocs[key]) { if (Array.isArray(rawDocs[key])) { rawDocs[key].push(val); rawOrder[key].push(i); } else { rawDocs[key] = [rawDocs[key], val]; rawOrder[key] = [rawOrder[key], i]; } } else { rawDocs[key] = val; rawOrder[key] = i; } } // flag each as result of population if (!lean) { val.$__.wasPopulated = true; } } } assignVals({ originalModel: model, // If virtual, make sure to not mutate original field rawIds: mod.isVirtual ? allIds : mod.allIds, allIds: allIds, localField: mod.localField, foreignField: mod.foreignField, rawDocs: rawDocs, rawOrder: rawOrder, docs: mod.docs, path: options.path, options: assignmentOpts, justOne: mod.justOne, isVirtual: mod.isVirtual, allOptions: mod }); } } /*! * Assigns documents returned from a population query back * to the original document path. */ function assignVals(o) { // Glob all options together because `populateOptions` is confusing const retainNullValues = get(o, 'allOptions.options.options.retainNullValues', false); const populateOptions = Object.assign({}, o.options, { justOne: o.justOne, retainNullValues: retainNullValues }); // replace the original ids in our intermediate _ids structure // with the documents found by query assignRawDocsToIdStructure(o.rawIds, o.rawDocs, o.rawOrder, populateOptions, o.localField, o.foreignField); // now update the original documents being populated using the // result structure that contains real documents. const docs = o.docs; const rawIds = o.rawIds; const options = o.options; function setValue(val) { return valueFilter(val, options, populateOptions); } for (let i = 0; i < docs.length; ++i) { const existingVal = utils.getValue(o.path, docs[i]); if (existingVal == null && !getVirtual(o.originalModel.schema, o.path)) { continue; } if (o.isVirtual && !o.justOne && !Array.isArray(rawIds[i])) { if (rawIds[i] == null) { rawIds[i] = []; } else { rawIds[i] = [rawIds[i]]; } } else if (o.isVirtual && o.justOne && Array.isArray(rawIds[i])) { rawIds[i] = rawIds[i][0]; } // If we're populating a map, the existing value will be an object, so // we need to transform again const originalSchema = o.originalModel.schema; let isMap = isModel(docs[i]) ? existingVal instanceof Map : utils.isPOJO(existingVal); // If we pass the first check, also make sure the local field's schematype // is map (re: gh-6460) isMap = isMap && get(originalSchema._getSchema(o.path), '$isSchemaMap'); if (!o.isVirtual && isMap) { const _keys = existingVal instanceof Map ? Array.from(existingVal.keys()) : Object.keys(existingVal); rawIds[i] = rawIds[i].reduce((cur, v, i) => { // Avoid casting because that causes infinite recursion cur.$init(_keys[i], v); return cur; }, new MongooseMap({}, docs[i])); } if (o.isVirtual && docs[i].constructor.name === 'model') { // If virtual populate and doc is already init-ed, need to walk through // the actual doc to set rather than setting `_doc` directly mpath.set(o.path, rawIds[i], docs[i], setValue); } else { var parts = o.path.split('.'); var cur = docs[i]; for (var j = 0; j < parts.length - 1; ++j) { if (cur[parts[j]] == null) { cur[parts[j]] = {}; } cur = cur[parts[j]]; } if (docs[i].$__) { docs[i].populated(o.path, o.allIds[i], o.allOptions); } utils.setValue(o.path, rawIds[i], docs[i], setValue, false); } } } /*! * */ function isModel(obj) { return get(obj, '$__') != null; } /*! * Assign `vals` returned by mongo query to the `rawIds` * structure returned from utils.getVals() honoring * query sort order if specified by user. * * This can be optimized. * * Rules: * * if the value of the path is not an array, use findOne rules, else find. * for findOne the results are assigned directly to doc path (including null results). * for find, if user specified sort order, results are assigned directly * else documents are put back in original order of array if found in results * * @param {Array} rawIds * @param {Array} vals * @param {Boolean} sort * @api private */ function assignRawDocsToIdStructure(rawIds, resultDocs, resultOrder, options, localFields, foreignFields, recursed) { // honor user specified sort order var newOrder = []; var sorting = options.sort && rawIds.length > 1; var doc; var sid; var id; for (var i = 0; i < rawIds.length; ++i) { id = rawIds[i]; if (Array.isArray(id)) { // handle [ [id0, id2], [id3] ] assignRawDocsToIdStructure(id, resultDocs, resultOrder, options, localFields, foreignFields, true); newOrder.push(id); continue; } if (id === null && !sorting) { // keep nulls for findOne unless sorting, which always // removes them (backward compat) newOrder.push(id); continue; } sid = String(id); if (recursed) { // apply find behavior // assign matching documents in original order unless sorting doc = resultDocs[sid]; if (doc) { if (sorting) { newOrder[resultOrder[sid]] = doc; } else { newOrder.push(doc); } } else { newOrder.push(id); } } else { // apply findOne behavior - if document in results, assign, else assign null newOrder[i] = doc = resultDocs[sid] || null; } } rawIds.length = 0; if (newOrder.length) { // reassign the documents based on corrected order // forEach skips over sparse entries in arrays so we // can safely use this to our advantage dealing with sorted // result sets too. newOrder.forEach(function(doc, i) { rawIds[i] = doc; }); } } function getModelsMapForPopulate(model, docs, options) { let i; let doc; let len = docs.length; let available = {}; let map = []; let modelNameFromQuery = options.model && options.model.modelName || options.model; let schema; let refPath; let Model; let currentOptions; let modelNames; let modelName; let discriminatorKey; let modelForFindSchema; var originalModel = options.model; var isVirtual = false; var isRefPathArray = false; var modelSchema = model.schema; for (i = 0; i < len; i++) { doc = docs[i]; schema = getSchemaTypes(modelSchema, doc, options.path); const isUnderneathDocArray = schema && schema.$isUnderneathDocArray; if (isUnderneathDocArray && get(options, 'options.sort') != null) { return new Error('Cannot populate with `sort` on path ' + options.path + ' because it is a subproperty of a document array'); } modelNames = null; if (Array.isArray(schema)) { for (let j = 0; j < schema.length; ++j) { var _modelNames = _getModelNames(doc, schema[j]); if (!_modelNames) { continue; } modelNames = modelNames || []; for (let x = 0; x < _modelNames.length; ++x) { if (modelNames.indexOf(_modelNames[x]) === -1) { modelNames.push(_modelNames[x]); } } } } else { modelNames = _getModelNames(doc, schema); if (!modelNames) { continue; } } let virtual = getVirtual(model.schema, options.path); let localField; if (virtual && virtual.options) { let virtualPrefix = virtual.$nestedSchemaPath ? virtual.$nestedSchemaPath + '.' : ''; if (typeof virtual.options.localField === 'function') { localField = virtualPrefix + virtual.options.localField.call(doc, doc); } else { localField = virtualPrefix + virtual.options.localField; } } else { localField = options.path; } let foreignField = virtual && virtual.options ? virtual.options.foreignField : '_id'; let justOne = true; if (virtual && virtual.options && virtual.options.ref) { let normalizedRef; if (typeof virtual.options.ref === 'function') { normalizedRef = virtual.options.ref.call(doc, doc); } else { normalizedRef = virtual.options.ref; } justOne = virtual.options.justOne; isVirtual = true; if (!modelNames) { modelNames = [].concat(normalizedRef); } } else if (schema) { justOne = !schema.$isMongooseArray; } if (!modelNames) { continue; } if (virtual && (!localField || !foreignField)) { throw new Error('If you are populating a virtual, you must set the ' + 'localField and foreignField options'); } options.isVirtual = isVirtual; if (typeof localField === 'function') { localField = localField.call(doc, doc); } if (typeof foreignField === 'function') { foreignField = foreignField.call(doc); } const ret = convertTo_id(utils.getValue(localField, doc)); const id = String(utils.getValue(foreignField, doc)); options._docs[id] = Array.isArray(ret) ? ret.slice() : ret; let k = modelNames.length; while (k--) { modelName = modelNames[k]; if (modelName == null) { continue; } var _doc = Array.isArray(doc) && isRefPathArray ? doc[k] : doc; var _ret = Array.isArray(ret) && isRefPathArray && ret[k] ? ret[k] : ret; try { Model = originalModel && originalModel.modelName ? originalModel : model.db.model(modelName); } catch (error) { return error; } if (!available[modelName]) { currentOptions = { model: Model }; if (isVirtual && virtual.options && virtual.options.options) { currentOptions.options = utils.clone(virtual.options.options); } utils.merge(currentOptions, options); if (schema && !discriminatorKey) { currentOptions.model = Model; } options.model = Model; available[modelName] = { Model: Model, options: currentOptions, docs: [_doc], ids: [_ret], allIds: [ret], // Assume only 1 localField + foreignField localField: localField, foreignField: foreignField, justOne: justOne, isVirtual: isVirtual }; map.push(available[modelName]); } else { available[modelName].docs.push(_doc); available[modelName].ids.push(_ret); available[modelName].allIds.push(ret); } } } function _getModelNames(doc, schema) { var modelNames; var discriminatorKey; if (schema && schema.caster) { schema = schema.caster; } if (schema && schema.$isSchemaMap) { schema = schema.$__schemaType; } if (!schema && model.discriminators) { discriminatorKey = model.schema.discriminatorMapping.key; } refPath = schema && schema.options && schema.options.refPath; let normalizedRefPath; if (refPath && typeof refPath === 'function') { normalizedRefPath = refPath.call(doc, doc, options.path); } else { normalizedRefPath = refPath; } if (normalizedRefPath) { modelNames = utils.getValue(normalizedRefPath, doc); isRefPathArray = false; if (Array.isArray(modelNames)) { isRefPathArray = true; modelNames = utils.array.flatten(modelNames); } } else { if (!modelNameFromQuery) { var modelForCurrentDoc = model; var schemaForCurrentDoc; if (!schema && discriminatorKey) { modelForFindSchema = utils.getValue(discriminatorKey, doc); if (modelForFindSchema) { try { modelForCurrentDoc = model.db.model(modelForFindSchema); } catch (error) { return error; } schemaForCurrentDoc = modelForCurrentDoc.schema._getSchema(options.path); if (schemaForCurrentDoc && schemaForCurrentDoc.caster) { schemaForCurrentDoc = schemaForCurrentDoc.caster; } } } else { schemaForCurrentDoc = schema; } var virtual = getVirtual(modelForCurrentDoc.schema, options.path); var ref; if ((ref = get(schemaForCurrentDoc, 'options.ref')) != null) { modelNames = [ref]; } else if ((ref = get(virtual, 'options.ref')) != null) { if (typeof ref === 'function') { ref = ref.call(doc, doc); } // When referencing nested arrays, the ref should be an Array // of modelNames. if (Array.isArray(ref)) { modelNames = ref; } else { modelNames = [ref]; } isVirtual = true; } else { // We may have a discriminator, in which case we don't want to // populate using the base model by default modelNames = discriminatorKey ? null : [model.modelName]; } } else { modelNames = [modelNameFromQuery]; // query options } } if (!modelNames) { return; } if (!Array.isArray(modelNames)) { modelNames = [modelNames]; } return modelNames; } return map; } /*! * Retrieve the _id of `val` if a Document or Array of Documents. * * @param {Array|Document|Any} val * @return {Array|Document|Any} */ function convertTo_id(val) { if (val instanceof Model) return val._id; if (Array.isArray(val)) { for (let i = 0; i < val.length; ++i) { if (val[i] instanceof Model) { val[i] = val[i]._id; } } if (val.isMongooseArray && val._schema) { return val._schema.cast(val, val._parent); } return [].concat(val); } // `populate('map')` may be an object if populating on a doc that hasn't // been hydrated yet if (val != null && val.constructor.name === 'Object') { const ret = []; for (const key of Object.keys(val)) { ret.push(val[key]); } return ret; } // If doc has already been hydrated, e.g. `doc.populate('map').execPopulate()` // then `val` will already be a map if (val instanceof Map) { return Array.from(val.values()); } return val; } /*! * 1) Apply backwards compatible find/findOne behavior to sub documents * * find logic: * a) filter out non-documents * b) remove _id from sub docs when user specified * * findOne * a) if no doc found, set to null * b) remove _id from sub docs when user specified * * 2) Remove _ids when specified by users query. * * background: * _ids are left in the query even when user excludes them so * that population mapping can occur. */ function valueFilter(val, assignmentOpts, populateOptions) { if (Array.isArray(val)) { // find logic const ret = []; const numValues = val.length; for (let i = 0; i < numValues; ++i) { var subdoc = val[i]; if (!isDoc(subdoc) && (!populateOptions.retainNullValues || subdoc != null)) { continue; } maybeRemoveId(subdoc, assignmentOpts); ret.push(subdoc); if (assignmentOpts.originalLimit && ret.length >= assignmentOpts.originalLimit) { break; } } // Since we don't want to have to create a new mongoosearray, make sure to // modify the array in place while (val.length > ret.length) { Array.prototype.pop.apply(val, []); } for (let i = 0; i < ret.length; ++i) { val[i] = ret[i]; } return val; } // findOne if (isDoc(val)) { maybeRemoveId(val, assignmentOpts); return val; } return populateOptions.justOne ? (val == null ? val : null) : []; } /*! * Remove _id from `subdoc` if user specified "lean" query option */ function maybeRemoveId(subdoc, assignmentOpts) { if (assignmentOpts.excludeId) { if (typeof subdoc.setValue === 'function') { delete subdoc._doc._id; } else { delete subdoc._id; } } } /*! * Determine if `doc` is a document returned * by a populate query. */ function isDoc(doc) { if (doc == null) { return false; } var type = typeof doc; if (type === 'string') { return false; } if (type === 'number') { return false; } if (Buffer.isBuffer(doc)) { return false; } if (doc.constructor.name === 'ObjectID') { return false; } // only docs return true; } /*! * Compiler utility. * * @param {String|Function} name model name or class extending Model * @param {Schema} schema * @param {String} collectionName * @param {Connection} connection * @param {Mongoose} base mongoose instance */ Model.compile = function compile(name, schema, collectionName, connection, base) { var versioningEnabled = schema.options.versionKey !== false; if (versioningEnabled && !schema.paths[schema.options.versionKey]) { // add versioning to top level documents only var o = {}; o[schema.options.versionKey] = Number; schema.add(o); } var model; if (typeof name === 'function' && name.prototype instanceof Model) { model = name; name = model.name; schema.loadClass(model, false); model.prototype.$isMongooseModelPrototype = true; } else { // generate new class model = function model(doc, fields, skipId) { model.hooks.execPreSync('createModel', doc); if (!(this instanceof model)) { return new model(doc, fields, skipId); } Model.call(this, doc, fields, skipId); }; } model.hooks = schema.s.hooks.clone(); model.base = base; model.modelName = name; if (!(model.prototype instanceof Model)) { model.__proto__ = Model; model.prototype.__proto__ = Model.prototype; } model.model = Model.prototype.model; model.db = model.prototype.db = connection; model.discriminators = model.prototype.discriminators = undefined; model.prototype.$__setSchema(schema); var _userProvidedOptions = schema._userProvidedOptions || {}; // `bufferCommands` is true by default... var bufferCommands = true; // First, take the global option if (connection.base.get('bufferCommands') != null) { bufferCommands = connection.base.get('bufferCommands'); } // Connection-specific overrides the global option if (connection.config.bufferCommands != null) { bufferCommands = connection.config.bufferCommands; } // And schema options override global and connection if (_userProvidedOptions.bufferCommands != null) { bufferCommands = _userProvidedOptions.bufferCommands; } var collectionOptions = { bufferCommands: bufferCommands, capped: schema.options.capped }; model.prototype.collection = connection.collection( collectionName, collectionOptions ); // apply methods and statics applyMethods(model, schema); applyStatics(model, schema); applyHooks(model, schema); model.schema = model.prototype.schema; model.collection = model.prototype.collection; // Create custom query constructor model.Query = function() { Query.apply(this, arguments); }; model.Query.prototype = Object.create(Query.prototype); model.Query.base = Query.base; applyQueryMiddleware(model.Query, model); applyQueryMethods(model, schema.query); var kareemOptions = { useErrorHandlers: true, numCallbackParams: 1 }; model.$__insertMany = model.hooks.createWrapper('insertMany', model.$__insertMany, model, kareemOptions); return model; }; /*! * Register custom query methods for this model * * @param {Model} model * @param {Schema} schema */ function applyQueryMethods(model, methods) { for (var i in methods) { model.Query.prototype[i] = methods[i]; } } /*! * Apply query middleware * * @param {Model} model */ function applyQueryMiddleware(Query, model) { const kareemOptions = { useErrorHandlers: true, numCallbackParams: 1, nullResultByDefault: true }; // `update()` thunk has a different name because `_update` was already taken Query.prototype._execUpdate = model.hooks.createWrapper('update', Query.prototype._execUpdate, null, kareemOptions); [ 'count', 'countDocuments', 'estimatedDocumentCount', 'find', 'findOne', 'findOneAndDelete', 'findOneAndRemove', 'findOneAndUpdate', 'replaceOne', 'updateMany', 'updateOne' ].forEach(fn => { Query.prototype[`_${fn}`] = model.hooks.createWrapper(fn, Query.prototype[`_${fn}`], null, kareemOptions); }); } /*! * Subclass this model with `conn`, `schema`, and `collection` settings. * * @param {Connection} conn * @param {Schema} [schema] * @param {String} [collection] * @return {Model} */ Model.__subclass = function subclass(conn, schema, collection) { // subclass model using this connection and collection name var _this = this; var Model = function Model(doc, fields, skipId) { if (!(this instanceof Model)) { return new Model(doc, fields, skipId); } _this.call(this, doc, fields, skipId); }; Model.__proto__ = _this; Model.prototype.__proto__ = _this.prototype; Model.db = Model.prototype.db = conn; var s = schema && typeof schema !== 'string' ? schema : _this.prototype.schema; var options = s.options || {}; var _userProvidedOptions = s._userProvidedOptions || {}; if (!collection) { collection = _this.prototype.schema.get('collection') || utils.toCollectionName(_this.modelName, this.base.pluralize()); } var bufferCommands = true; if (s) { if (conn.config.bufferCommands != null) { bufferCommands = conn.config.bufferCommands; } if (_userProvidedOptions.bufferCommands != null) { bufferCommands = _userProvidedOptions.bufferCommands; } } var collectionOptions = { bufferCommands: bufferCommands, capped: s && options.capped }; Model.prototype.collection = conn.collection(collection, collectionOptions); Model.collection = Model.prototype.collection; // Errors handled internally, so ignore Model.init(() => {}); return Model; }; Model.$wrapCallback = function(callback) { if (callback == null) { return callback; } var _this = this; return function() { try { callback.apply(null, arguments); } catch (error) { _this.emit('error', error); } }; }; /*! * Module exports. */ module.exports = exports = Model;
1
13,822
Why do this instead of `localFieldPath.applyGetters(doc[localField], doc)` ?
Automattic-mongoose
js
@@ -186,15 +186,6 @@ SUBGRAPH_ISOMORPHISM_BADARG_TEST("Throws if match count is negative") { invalid_argument); } -SUBGRAPH_ISOMORPHISM_BADARG_TEST("Throws if match count is positive") { - REQUIRE_THROWS_AS( - (this->check_subgraph_isomorphism<double_triangle_target_type, double_triangle_target_type>( - false, - isomorphism_kind::induced, - 1)), - unimplemented); -} - // SUBGRAPH_ISOMORPHISM_BADARG_TEST("Throws if semantic match is true") { // REQUIRE_THROWS_AS( // (this->check_subgraph_isomorphism<double_triangle_target_type, double_triangle_target_type>(
1
/******************************************************************************* * Copyright 2021 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include <initializer_list> #include "oneapi/dal/algo/subgraph_isomorphism/graph_matching.hpp" #include "oneapi/dal/graph/undirected_adjacency_vector_graph.hpp" #include "oneapi/dal/graph/service_functions.hpp" #include "oneapi/dal/table/common.hpp" #include "oneapi/dal/exceptions.hpp" #include "oneapi/dal/test/engine/common.hpp" namespace oneapi::dal::algo::subgraph_isomorphism::test { typedef dal::preview::subgraph_isomorphism::kind isomorphism_kind; class graph_base_data { public: graph_base_data() = default; std::int64_t get_vertex_count() const { return vertex_count; } std::int64_t get_edge_count() const { return edge_count; } std::int64_t get_cols_count() const { return cols_count; } std::int64_t get_rows_count() const { return rows_count; } protected: std::int64_t vertex_count; std::int64_t edge_count; std::int64_t cols_count; std::int64_t rows_count; }; class double_triangle_target_type : public graph_base_data { public: double_triangle_target_type() { vertex_count = 9; edge_count = 16; cols_count = 32; rows_count = 10; } std::array<std::int32_t, 9> degrees = { 2, 4, 3, 4, 4, 5, 3, 5, 2 }; std::array<std::int32_t, 32> cols = { 1, 3, 0, 2, 5, 3, 4, 1, 3, 7, 0, 2, 1, 2, 6, 5, 7, 1, 8, 7, 4, 6, 4, 7, 5, 3, 5, 6, 4, 8, 5, 7 }; std::array<std::int64_t, 10> rows = { 0, 2, 6, 9, 13, 17, 22, 25, 30, 32 }; std::array<std::int32_t, 9> labels = { 1, 0, 1, 0, 0, 1, 0, 1, 0 }; }; class empty_graph_type : public graph_base_data { public: empty_graph_type() { vertex_count = 0; edge_count = 0; cols_count = 0; rows_count = 1; } std::array<std::int32_t, 0> degrees = {}; std::array<std::int32_t, 0> cols = {}; std::array<std::int64_t, 1> rows = { 0 }; std::array<std::int32_t, 0> labels = {}; }; class subgraph_isomorphism_badarg_test { public: using my_graph_type = dal::preview::undirected_adjacency_vector_graph<std::int32_t>; template <typename GraphType> auto create_graph() { GraphType graph_data; my_graph_type my_graph; auto &graph_impl = oneapi::dal::detail::get_impl(my_graph); auto &vertex_allocator = graph_impl._vertex_allocator; auto &edge_allocator = graph_impl._edge_allocator; const std::int64_t vertex_count = graph_data.get_vertex_count(); const std::int64_t edge_count = graph_data.get_edge_count(); const std::int64_t cols_count = graph_data.get_cols_count(); const std::int64_t rows_count = graph_data.get_rows_count(); typedef std::allocator_traits<std::allocator<char>>::rebind_traits<std::int32_t> int32_traits_t; typedef std::allocator_traits<std::allocator<char>>::rebind_traits<std::int64_t> int64_traits_t; std::int32_t *degrees = int32_traits_t::allocate(vertex_allocator, vertex_count); std::int32_t *cols = int32_traits_t::allocate(vertex_allocator, cols_count); std::int64_t *rows = int64_traits_t::allocate(edge_allocator, rows_count); std::int32_t *rows_vertex = int32_traits_t::allocate(vertex_allocator, rows_count); for (int i = 0; i < vertex_count; i++) { degrees[i] = graph_data.degrees[i]; } for (int i = 0; i < cols_count; i++) { cols[i] = graph_data.cols[i]; } for (int i = 0; i < rows_count; i++) { rows[i] = graph_data.rows[i]; rows_vertex[i] = graph_data.rows[i]; } graph_impl.set_topology(vertex_count, edge_count, rows, cols, cols_count, degrees); graph_impl.get_topology()._rows_vertex = oneapi::dal::preview::detail::container<std::int32_t>::wrap(rows_vertex, rows_count); return my_graph; } template <typename TargetGraphType, typename PatternGraphType> void check_subgraph_isomorphism(bool semantic_match, isomorphism_kind kind, std::int64_t max_match_count) { TargetGraphType target_graph_data; PatternGraphType pattern_graph_data; const auto target_graph = create_graph<TargetGraphType>(); const auto pattern_graph = create_graph<PatternGraphType>(); std::allocator<char> alloc; const auto subgraph_isomorphism_desc = dal::preview::subgraph_isomorphism::descriptor<>(alloc) .set_kind(kind) .set_semantic_match(semantic_match) .set_max_match_count(max_match_count); const auto result = dal::preview::graph_matching(subgraph_isomorphism_desc, target_graph, pattern_graph); } }; #define SUBGRAPH_ISOMORPHISM_BADARG_TEST(name) \ TEST_M(subgraph_isomorphism_badarg_test, name, "[subgraph_isomorphism][badarg]") SUBGRAPH_ISOMORPHISM_BADARG_TEST("Positive check") { REQUIRE_NOTHROW( this->check_subgraph_isomorphism<double_triangle_target_type, double_triangle_target_type>( false, isomorphism_kind::induced, 0)); } SUBGRAPH_ISOMORPHISM_BADARG_TEST("Empty target graph") { REQUIRE_THROWS_AS( (this->check_subgraph_isomorphism<empty_graph_type, double_triangle_target_type>( false, isomorphism_kind::induced, 0)), invalid_argument); } SUBGRAPH_ISOMORPHISM_BADARG_TEST("Empty pattern graph") { REQUIRE_THROWS_AS( (this->check_subgraph_isomorphism<double_triangle_target_type, empty_graph_type>( false, isomorphism_kind::induced, 0)), invalid_argument); } SUBGRAPH_ISOMORPHISM_BADARG_TEST("Throws if match count is negative") { REQUIRE_THROWS_AS( (this->check_subgraph_isomorphism<double_triangle_target_type, double_triangle_target_type>( false, isomorphism_kind::induced, -1)), invalid_argument); } SUBGRAPH_ISOMORPHISM_BADARG_TEST("Throws if match count is positive") { REQUIRE_THROWS_AS( (this->check_subgraph_isomorphism<double_triangle_target_type, double_triangle_target_type>( false, isomorphism_kind::induced, 1)), unimplemented); } // SUBGRAPH_ISOMORPHISM_BADARG_TEST("Throws if semantic match is true") { // REQUIRE_THROWS_AS( // (this->check_subgraph_isomorphism<double_triangle_target_type, double_triangle_target_type>( // true, // isomorphism_kind::induced, // 0)), // invalid_argument); // } } // namespace oneapi::dal::algo::subgraph_isomorphism::test
1
31,175
Just for a check. Do you really want to delete this?
oneapi-src-oneDAL
cpp
@@ -10,8 +10,10 @@ import ( "context" "os" + "github.com/golang/protobuf/proto" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/blockchain" + "github.com/iotexproject/iotex-core/blockchain/action" "github.com/iotexproject/iotex-core/blocksync" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/consensus"
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package itx import ( "context" "os" "github.com/iotexproject/iotex-core/actpool" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/blocksync" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/consensus" "github.com/iotexproject/iotex-core/dispatch" "github.com/iotexproject/iotex-core/dispatch/dispatcher" "github.com/iotexproject/iotex-core/explorer" "github.com/iotexproject/iotex-core/logger" "github.com/iotexproject/iotex-core/network" "github.com/pkg/errors" ) // Server is the iotex server instance containing all components. type Server struct { cfg *config.Config chain blockchain.Blockchain actPool actpool.ActPool p2p network.Overlay consensus consensus.Consensus blocksync blocksync.BlockSync dispatcher dispatcher.Dispatcher explorer *explorer.Server } // NewServer creates a new server func NewServer(cfg *config.Config) *Server { // create Blockchain chain := blockchain.NewBlockchain(cfg, blockchain.DefaultStateFactoryOption(), blockchain.BoltDBDaoOption()) if chain == nil && cfg.Chain.EnableFallBackToFreshDB { logger.Warn().Msg("Chain db and trie db are falling back to fresh ones") if err := os.Rename(cfg.Chain.ChainDBPath, cfg.Chain.ChainDBPath+".old"); err != nil { logger.Error().Err(err).Msg("Failed to rename old chain db") return nil } if err := os.Rename(cfg.Chain.TrieDBPath, cfg.Chain.TrieDBPath+".old"); err != nil { logger.Error().Err(err).Msg("Failed to rename old trie db") return nil } chain = blockchain.NewBlockchain(cfg, blockchain.DefaultStateFactoryOption(), blockchain.BoltDBDaoOption()) } return newServer(cfg, chain) } // NewInMemTestServer creates a test server in memory func NewInMemTestServer(cfg *config.Config) *Server { chain := blockchain.NewBlockchain(cfg, blockchain.InMemStateFactoryOption(), blockchain.InMemDaoOption()) return newServer(cfg, chain) } // Start starts the server func (s *Server) Start(ctx context.Context) error { if err := s.chain.Start(ctx); err != nil { return errors.Wrap(err, "error when starting blockchain") } if err := s.dispatcher.Start(ctx); err != nil { return errors.Wrap(err, "error when starting dispatcher") } if err := s.consensus.Start(ctx); err != nil { return errors.Wrap(err, "error when starting consensus") } if err := s.blocksync.Start(ctx); err != nil { return errors.Wrap(err, "error when starting blocksync") } if err := s.p2p.Start(ctx); err != nil { return errors.Wrap(err, "error when starting P2P networks") } if err := s.explorer.Start(ctx); err != nil { return errors.Wrap(err, "error when starting explorer") } return nil } // Stop stops the server func (s *Server) Stop(ctx context.Context) error { if err := s.explorer.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping explorer") } if err := s.p2p.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping P2P networks") } if err := s.consensus.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping consensus") } if err := s.blocksync.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping blocksync") } if err := s.dispatcher.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping dispatcher") } if err := s.chain.Stop(ctx); err != nil { return errors.Wrap(err, "error when stopping blockchain") } return nil } // Blockchain returns the Blockchain func (s *Server) Blockchain() blockchain.Blockchain { return s.chain } // ActionPool returns the Action pool func (s *Server) ActionPool() actpool.ActPool { return s.actPool } // P2P returns the P2P network func (s *Server) P2P() network.Overlay { return s.p2p } // Dispatcher returns the Dispatcher func (s *Server) Dispatcher() dispatcher.Dispatcher { return s.dispatcher } // Consensus returns the consensus instance func (s *Server) Consensus() consensus.Consensus { return s.consensus } // BlockSync returns the block syncer func (s *Server) BlockSync() blocksync.BlockSync { return s.blocksync } // Explorer returns the explorer instance func (s *Server) Explorer() *explorer.Server { return s.explorer } func newServer(cfg *config.Config, chain blockchain.Blockchain) *Server { // create P2P network and BlockSync p2p := network.NewOverlay(&cfg.Network) // Create ActPool actPool, err := actpool.NewActPool(chain, cfg.ActPool) if err != nil { logger.Fatal().Err(err).Msg("Fail to create actpool") } bs, err := blocksync.NewBlockSyncer(cfg, chain, actPool, p2p) if err != nil { logger.Fatal().Err(err).Msg("Fail to create blockSyncer") } consensus := consensus.NewConsensus(cfg, chain, actPool, p2p) if consensus == nil { logger.Fatal().Msg("Failed to create Consensus") } // create dispatcher instance dispatcher, err := dispatch.NewDispatcher(cfg, actPool, bs, consensus) if err != nil { logger.Fatal().Err(err).Msg("Fail to create dispatcher") } p2p.AttachDispatcher(dispatcher) var exp *explorer.Server if cfg.Explorer.IsTest || os.Getenv("APP_ENV") == "development" { logger.Warn().Msg("Using test server with fake data...") exp = explorer.NewTestSever(cfg.Explorer) } else { exp = explorer.NewServer(cfg.Explorer, chain, consensus, dispatcher, actPool, p2p) } return &Server{ cfg: cfg, chain: chain, actPool: actPool, p2p: p2p, consensus: consensus, blocksync: bs, dispatcher: dispatcher, explorer: exp, } }
1
12,056
github.com/golang/protobuf/proto group in next group
iotexproject-iotex-core
go
@@ -64,6 +64,7 @@ var rootSubcmdsDaemon = map[string]*cmds.Command{ "mining": miningCmd, "mpool": mpoolCmd, "orderbook": orderbookCmd, + "paych": paymentChannelCmd, "ping": pingCmd, "show": showCmd, "swarm": swarmCmd,
1
package commands import ( "context" "net" "os" "gx/ipfs/QmUf5GFfV2Be3UtSAPKDVkoRd1TwEBTmx9TSSCFGGjNgdQ/go-ipfs-cmds" cmdhttp "gx/ipfs/QmUf5GFfV2Be3UtSAPKDVkoRd1TwEBTmx9TSSCFGGjNgdQ/go-ipfs-cmds/http" "gx/ipfs/QmceUdzxkimdYsgtX733uNgzf1DLHyBKN6ehGSp85ayppM/go-ipfs-cmdkit" ) const ( // OptionAPI is the name of the option for specifying the api port. OptionAPI = "cmdapiaddr" // OptionRepoDir is the name of the option for specifying the directory of the repo. OptionRepoDir = "repodir" // APIPrefix is the prefix for the http version of the api. APIPrefix = "/api" // OfflineMode tells us if we should try to connect this Filecoin node to the network OfflineMode = "offline" // SwarmListen is the multiaddr for this Filecoin node SwarmListen = "swarmlisten" ) func defaultAPIAddr() string { // Until we have a config file, we need an easy way to influence the API // address for testing if envapi := os.Getenv("FIL_API"); envapi != "" { return envapi } return ":3453" } var rootCmd = &cmds.Command{ Helptext: cmdkit.HelpText{ Tagline: "A decentralized storage network", }, Options: []cmdkit.Option{ cmdkit.StringOption(OptionAPI, "set the api port to use").WithDefault(defaultAPIAddr()), cmdkit.StringOption(OptionRepoDir, "set the directory of the reop, defaults to ~/.filecoin"), cmds.OptionEncodingType, cmdkit.BoolOption("help", "Show the full command help text."), cmdkit.BoolOption("h", "Show a short version of the command help text."), }, Subcommands: make(map[string]*cmds.Command), } // all top level commands. set during init() to avoid configuration loops. var rootSubcmdsDaemon = map[string]*cmds.Command{ "actor": actorCmd, "address": addrsCmd, "bootstrap": bootstrapCmd, "chain": chainCmd, "client": clientCmd, "daemon": daemonCmd, "dag": dagCmd, "id": idCmd, "init": initCmd, "log": logCmd, "message": msgCmd, "miner": minerCmd, "mining": miningCmd, "mpool": mpoolCmd, "orderbook": orderbookCmd, "ping": pingCmd, "show": showCmd, "swarm": swarmCmd, "version": versionCmd, "wallet": walletCmd, } func init() { for k, v := range rootSubcmdsDaemon { rootCmd.Subcommands[k] = v } } // Run processes the arguments and stdin func Run(args []string, stdin, stdout, stderr *os.File) (int, error) { return CliRun(context.Background(), rootCmd, args, stdin, stdout, stderr, buildEnv, makeExecutor) } func buildEnv(ctx context.Context, req *cmds.Request) (cmds.Environment, error) { return &Env{ctx: ctx}, nil } type executor struct { api string running bool exec cmds.Executor } func (e *executor) Execute(req *cmds.Request, re cmds.ResponseEmitter, env cmds.Environment) error { if !e.running { return e.exec.Execute(req, re, env) } client := cmdhttp.NewClient(e.api, cmdhttp.ClientWithAPIPrefix(APIPrefix)) res, err := client.Send(req) if err != nil { return err } // send request to server wait := make(chan struct{}) // copy received result into cli emitter go func() { err := cmds.Copy(re, res) if err != nil { re.SetError(err, cmdkit.ErrNormal|cmdkit.ErrFatal) } close(wait) }() <-wait return nil } func makeExecutor(req *cmds.Request, env interface{}) (cmds.Executor, error) { api := req.Options[OptionAPI].(string) isDaemonRunning, err := daemonRunning(api) if err != nil { return nil, err } if isDaemonRunning && req.Command == daemonCmd { return nil, ErrAlreadyRunning } if !isDaemonRunning && requiresDaemon(req) { return nil, ErrMissingDaemon } return &executor{ api: api, exec: cmds.NewExecutor(rootCmd), running: isDaemonRunning, }, nil } func requiresDaemon(req *cmds.Request) bool { if req.Command == daemonCmd { return false } if req.Command == initCmd { return false } return true } func daemonRunning(api string) (bool, error) { // TODO: use lockfile once implemented // for now we just check if the port is available ln, err := net.Listen("tcp", api) if err != nil { return true, nil } if err := ln.Close(); err != nil { return false, err } return false, nil }
1
12,155
Nit: what about just `pay`? Is there some other subcommand that would conflict with?
filecoin-project-venus
go
@@ -55,8 +55,13 @@ public class Scalars { if (!(input instanceof StringValue)) { throw new CoercingParseLiteralException("Value is not any Address : '" + input + "'"); } + String inputValue = ((StringValue) input).getValue(); + if (!inputValue.startsWith("0x")) { + throw new CoercingParseLiteralException( + "Address value '" + inputValue + "' is not prefixed with 0x"); + } try { - return Address.fromHexStringStrict(((StringValue) input).getValue()); + return Address.fromHexStringStrict(inputValue); } catch (final IllegalArgumentException e) { throw new CoercingParseLiteralException("Value is not any Address : '" + input + "'"); }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.graphql.internal; import org.hyperledger.besu.ethereum.core.Address; import org.hyperledger.besu.ethereum.core.Hash; import graphql.language.IntValue; import graphql.language.StringValue; import graphql.schema.Coercing; import graphql.schema.CoercingParseLiteralException; import graphql.schema.CoercingParseValueException; import graphql.schema.CoercingSerializeException; import graphql.schema.GraphQLScalarType; import org.apache.tuweni.bytes.Bytes; import org.apache.tuweni.bytes.Bytes32; import org.apache.tuweni.units.bigints.UInt256; import org.apache.tuweni.units.bigints.UInt256Value; public class Scalars { private static final Coercing<Object, Object> ADDRESS_COERCING = new Coercing<Object, Object>() { @Override public String serialize(final Object input) throws CoercingSerializeException { if (input instanceof Address) { return input.toString(); } throw new CoercingSerializeException("Unable to serialize " + input + " as an Address"); } @Override public String parseValue(final Object input) throws CoercingParseValueException { if (input instanceof Address) { return input.toString(); } throw new CoercingParseValueException( "Unable to parse variable value " + input + " as an Address"); } @Override public Address parseLiteral(final Object input) throws CoercingParseLiteralException { if (!(input instanceof StringValue)) { throw new CoercingParseLiteralException("Value is not any Address : '" + input + "'"); } try { return Address.fromHexStringStrict(((StringValue) input).getValue()); } catch (final IllegalArgumentException e) { throw new CoercingParseLiteralException("Value is not any Address : '" + input + "'"); } } }; private static final Coercing<Object, Object> BIG_INT_COERCING = new Coercing<Object, Object>() { @Override public String serialize(final Object input) throws CoercingSerializeException { if (input instanceof UInt256Value) { return ((UInt256Value) input).toShortHexString(); } throw new CoercingSerializeException("Unable to serialize " + input + " as an BigInt"); } @Override public String parseValue(final Object input) throws CoercingParseValueException { if (input instanceof UInt256Value) { return ((UInt256Value) input).toShortHexString(); } throw new CoercingParseValueException( "Unable to parse variable value " + input + " as an BigInt"); } @Override public UInt256 parseLiteral(final Object input) throws CoercingParseLiteralException { try { if (input instanceof StringValue) { return UInt256.fromHexString(((StringValue) input).getValue()); } else if (input instanceof IntValue) { return UInt256.valueOf(((IntValue) input).getValue()); } } catch (final IllegalArgumentException e) { // fall through } throw new CoercingParseLiteralException("Value is not any BigInt : '" + input + "'"); } }; private static final Coercing<Object, Object> BYTES_COERCING = new Coercing<Object, Object>() { @Override public String serialize(final Object input) throws CoercingSerializeException { if (input instanceof Bytes) { return input.toString(); } throw new CoercingSerializeException("Unable to serialize " + input + " as an Bytes"); } @Override public String parseValue(final Object input) throws CoercingParseValueException { if (input instanceof Bytes) { return input.toString(); } throw new CoercingParseValueException( "Unable to parse variable value " + input + " as an Bytes"); } @Override public Bytes parseLiteral(final Object input) throws CoercingParseLiteralException { if (!(input instanceof StringValue)) { throw new CoercingParseLiteralException("Value is not any Bytes : '" + input + "'"); } try { return Bytes.fromHexStringLenient(((StringValue) input).getValue()); } catch (final IllegalArgumentException e) { throw new CoercingParseLiteralException("Value is not any Bytes : '" + input + "'"); } } }; private static final Coercing<Object, Object> BYTES32_COERCING = new Coercing<Object, Object>() { @Override public String serialize(final Object input) throws CoercingSerializeException { if (input instanceof Hash) { return ((Hash) input).toString(); } if (input instanceof Bytes32) { return input.toString(); } throw new CoercingSerializeException("Unable to serialize " + input + " as an Bytes32"); } @Override public String parseValue(final Object input) throws CoercingParseValueException { if (input instanceof Bytes32) { return input.toString(); } throw new CoercingParseValueException( "Unable to parse variable value " + input + " as an Bytes32"); } @Override public Bytes32 parseLiteral(final Object input) throws CoercingParseLiteralException { if (!(input instanceof StringValue)) { throw new CoercingParseLiteralException("Value is not any Bytes32 : '" + input + "'"); } try { return Bytes32.fromHexStringLenient(((StringValue) input).getValue()); } catch (final IllegalArgumentException e) { throw new CoercingParseLiteralException("Value is not any Bytes32 : '" + input + "'"); } } }; private static final Coercing<Object, Object> LONG_COERCING = new Coercing<Object, Object>() { @Override public Number serialize(final Object input) throws CoercingSerializeException { if (input instanceof Number) { return (Number) input; } else if (input instanceof String) { final String value = ((String) input).toLowerCase(); if (value.startsWith("0x")) { return Bytes.fromHexStringLenient(value).toLong(); } else { return Long.parseLong(value); } } throw new CoercingSerializeException("Unable to serialize " + input + " as an Long"); } @Override public Number parseValue(final Object input) throws CoercingParseValueException { if (input instanceof Number) { return (Number) input; } else if (input instanceof String) { final String value = ((String) input).toLowerCase(); if (value.startsWith("0x")) { return Bytes.fromHexStringLenient(value).toLong(); } else { return Long.parseLong(value); } } throw new CoercingParseValueException( "Unable to parse variable value " + input + " as an Long"); } @Override public Object parseLiteral(final Object input) throws CoercingParseLiteralException { try { if (input instanceof IntValue) { return ((IntValue) input).getValue().longValue(); } else if (input instanceof StringValue) { final String value = ((StringValue) input).getValue().toLowerCase(); if (value.startsWith("0x")) { return Bytes.fromHexStringLenient(value).toLong(); } else { return Long.parseLong(value); } } } catch (final NumberFormatException e) { // fall through } throw new CoercingParseLiteralException("Value is not any Long : '" + input + "'"); } }; public static GraphQLScalarType addressScalar() { return GraphQLScalarType.newScalar() .name("Address") .description("Address scalar") .coercing(ADDRESS_COERCING) .build(); } public static GraphQLScalarType bigIntScalar() { return GraphQLScalarType.newScalar() .name("BigInt") .description("A BigInt (UInt256) scalar") .coercing(BIG_INT_COERCING) .build(); } public static GraphQLScalarType bytesScalar() { return GraphQLScalarType.newScalar() .name("Bytes") .description("A Bytes scalar") .coercing(BYTES_COERCING) .build(); } public static GraphQLScalarType bytes32Scalar() { return GraphQLScalarType.newScalar() .name("Bytes32") .description("A Bytes32 scalar") .coercing(BYTES32_COERCING) .build(); } public static GraphQLScalarType longScalar() { return GraphQLScalarType.newScalar() .name("Long") .description("A Long (UInt64) scalar") .coercing(LONG_COERCING) .build(); } }
1
22,834
Just for the sake of keeping things logically co-located, I'd like to see this functionality in `Quantity.java` which has a lot of utility methods related to this. Maybe a `static` method like `Quantity.isValid(String string)`?
hyperledger-besu
java
@@ -72,7 +72,7 @@ public abstract class FlinkTestBase extends AbstractTestBase { return tEnv; } - List<Object[]> sql(String query, Object... args) { + public List<Object[]> sql(String query, Object... args) { TableResult tableResult = getTableEnv().executeSql(String.format(query, args)); tableResult.getJobClient().ifPresent(c -> { try {
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.flink; import java.util.List; import java.util.concurrent.ExecutionException; import java.util.stream.IntStream; import org.apache.flink.table.api.EnvironmentSettings; import org.apache.flink.table.api.TableEnvironment; import org.apache.flink.table.api.TableResult; import org.apache.flink.test.util.AbstractTestBase; import org.apache.flink.types.Row; import org.apache.flink.util.CloseableIterator; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.iceberg.hive.HiveCatalog; import org.apache.iceberg.hive.TestHiveMetastore; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.junit.AfterClass; import org.junit.BeforeClass; public abstract class FlinkTestBase extends AbstractTestBase { private static TestHiveMetastore metastore = null; protected static HiveConf hiveConf = null; protected static HiveCatalog catalog = null; private volatile TableEnvironment tEnv = null; @BeforeClass public static void startMetastore() { FlinkTestBase.metastore = new TestHiveMetastore(); metastore.start(); FlinkTestBase.hiveConf = metastore.hiveConf(); FlinkTestBase.catalog = new HiveCatalog(metastore.hiveConf()); } @AfterClass public static void stopMetastore() { metastore.stop(); catalog.close(); FlinkTestBase.catalog = null; } protected TableEnvironment getTableEnv() { if (tEnv == null) { synchronized (this) { if (tEnv == null) { this.tEnv = TableEnvironment.create(EnvironmentSettings .newInstance() .useBlinkPlanner() .inBatchMode().build()); } } } return tEnv; } List<Object[]> sql(String query, Object... args) { TableResult tableResult = getTableEnv().executeSql(String.format(query, args)); tableResult.getJobClient().ifPresent(c -> { try { c.getJobExecutionResult(Thread.currentThread().getContextClassLoader()).get(); } catch (InterruptedException | ExecutionException e) { throw new RuntimeException(e); } }); CloseableIterator<Row> iter = tableResult.collect(); List<Object[]> results = Lists.newArrayList(); while (iter.hasNext()) { Row row = iter.next(); results.add(IntStream.range(0, row.getArity()).mapToObj(row::getField).toArray(Object[]::new)); } return results; } }
1
26,653
How about use `protected` ?
apache-iceberg
java
@@ -32,6 +32,12 @@ func (ih *IntermediateHashes) WillUnloadBranchNode(prefixAsNibbles []byte, nodeH if len(prefixAsNibbles) == 0 || len(prefixAsNibbles)%2 == 1 { return } + + // special case. Store Account.Root for long time + if len(prefixAsNibbles) == common.HashLength*2 { + return + } + InsertCounter.Inc(1) buf := pool.GetBuffer(keyBufferSize)
1
package state import ( "github.com/ledgerwatch/turbo-geth/common" "github.com/ledgerwatch/turbo-geth/common/dbutils" "github.com/ledgerwatch/turbo-geth/common/pool" "github.com/ledgerwatch/turbo-geth/ethdb" "github.com/ledgerwatch/turbo-geth/log" "github.com/ledgerwatch/turbo-geth/metrics" "github.com/ledgerwatch/turbo-geth/trie" ) var ( InsertCounter = metrics.NewRegisteredCounter("db/ih/insert", nil) DeleteCounter = metrics.NewRegisteredCounter("db/ih/delete", nil) ) const keyBufferSize = 64 type IntermediateHashes struct { trie.NoopObserver // make sure that we don't need to subscribe to unnecessary methods putter ethdb.Putter deleter ethdb.Deleter } func NewIntermediateHashes(putter ethdb.Putter, deleter ethdb.Deleter) *IntermediateHashes { return &IntermediateHashes{putter: putter, deleter: deleter} } func (ih *IntermediateHashes) WillUnloadBranchNode(prefixAsNibbles []byte, nodeHash common.Hash, incarnation uint64) { // only put to bucket prefixes with even number of nibbles if len(prefixAsNibbles) == 0 || len(prefixAsNibbles)%2 == 1 { return } InsertCounter.Inc(1) buf := pool.GetBuffer(keyBufferSize) defer pool.PutBuffer(buf) trie.CompressNibbles(prefixAsNibbles, &buf.B) var key []byte if len(buf.B) > common.HashLength { if incarnation == 0 { panic("0 incarnation") } key = dbutils.GenerateCompositeStoragePrefix(buf.B[:common.HashLength], incarnation, buf.B[common.HashLength:]) } else { key = common.CopyBytes(buf.B) } if err := ih.putter.Put(dbutils.IntermediateTrieHashBucket, key, common.CopyBytes(nodeHash[:])); err != nil { log.Warn("could not put intermediate trie hash", "err", err) } } func (ih *IntermediateHashes) BranchNodeLoaded(prefixAsNibbles []byte, incarnation uint64) { // only put to bucket prefixes with even number of nibbles if len(prefixAsNibbles) == 0 || len(prefixAsNibbles)%2 == 1 { return } DeleteCounter.Inc(1) buf := pool.GetBuffer(keyBufferSize) defer pool.PutBuffer(buf) trie.CompressNibbles(prefixAsNibbles, &buf.B) var key []byte if len(buf.B) > common.HashLength { if incarnation == 0 { panic("0 incarnation") } key = dbutils.GenerateCompositeStoragePrefix(buf.B[:common.HashLength], incarnation, buf.B[common.HashLength:]) } else { key = common.CopyBytes(buf.B) } if err := ih.deleter.Delete(dbutils.IntermediateTrieHashBucket, key); err != nil { log.Warn("could not delete intermediate trie hash", "err", err) return } }
1
21,470
Should it not be `common.HashLength*2 + common.IncarnationLength` ?
ledgerwatch-erigon
go
@@ -422,7 +422,8 @@ public class FlowRunner extends EventHandler implements Runnable { // If a job is seen as failed or killed due to failing SLA, then we set the parent flow to // FAILED_FINISHING - if (node.getStatus() == Status.FAILED || (node.getStatus() == Status.KILLED && node.isKilledBySLA())) { + if (node.getStatus() == Status.FAILED || (node.getStatus() == Status.KILLED && node + .isKilledBySLA())) { // The job cannot be retried or has run out of retry attempts. We will // fail the job and its flow now. if (!retryJobIfPossible(node)) {
1
/* * Copyright 2013 LinkedIn Corp * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.execapp; import azkaban.ServiceProvider; import azkaban.event.Event; import azkaban.event.Event.Type; import azkaban.event.EventData; import azkaban.event.EventHandler; import azkaban.event.EventListener; import azkaban.execapp.event.FlowWatcher; import azkaban.execapp.event.JobCallbackManager; import azkaban.execapp.jmx.JmxJobMBeanManager; import azkaban.execapp.metric.NumFailedJobMetric; import azkaban.execapp.metric.NumRunningJobMetric; import azkaban.executor.ExecutableFlow; import azkaban.executor.ExecutableFlowBase; import azkaban.executor.ExecutableNode; import azkaban.executor.ExecutionOptions; import azkaban.executor.ExecutionOptions.FailureAction; import azkaban.executor.ExecutorLoader; import azkaban.executor.ExecutorManagerException; import azkaban.executor.Status; import azkaban.flow.FlowProps; import azkaban.jobExecutor.ProcessJob; import azkaban.jobtype.JobTypeManager; import azkaban.metric.MetricReportManager; import azkaban.project.ProjectLoader; import azkaban.project.ProjectManagerException; import azkaban.sla.SlaOption; import azkaban.utils.Props; import azkaban.utils.PropsUtils; import azkaban.utils.SwapQueue; import com.google.common.collect.ImmutableSet; import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.RejectedExecutionException; import org.apache.log4j.Appender; import org.apache.log4j.FileAppender; import org.apache.log4j.Layout; import org.apache.log4j.Logger; import org.apache.log4j.PatternLayout; /** * Class that handles the running of a ExecutableFlow DAG */ public class FlowRunner extends EventHandler implements Runnable { private static final Layout DEFAULT_LAYOUT = new PatternLayout( "%d{dd-MM-yyyy HH:mm:ss z} %c{1} %p - %m\n"); // We check update every 5 minutes, just in case things get stuck. But for the // most part, we'll be idling. private static final long CHECK_WAIT_MS = 5 * 60 * 1000; private final ExecutableFlow flow; // Sync object for queuing private final Object mainSyncObj = new Object(); private final JobTypeManager jobtypeManager; private final Layout loggerLayout = DEFAULT_LAYOUT; private final ExecutorLoader executorLoader; private final ProjectLoader projectLoader; private final int execId; private final File execDir; private final ExecutionOptions.FailureAction failureAction; // Properties map private final Props azkabanProps; private final Map<String, Props> sharedProps = new HashMap<>(); private final JobRunnerEventListener listener = new JobRunnerEventListener(); private final Set<JobRunner> activeJobRunners = Collections .newSetFromMap(new ConcurrentHashMap<JobRunner, Boolean>()); // Thread safe swap queue for finishedExecutions. private final SwapQueue<ExecutableNode> finishedNodes; private Logger logger; private Appender flowAppender; private File logFile; private ExecutorService executorService; private Thread flowRunnerThread; private int numJobThreads = 10; // Used for pipelining private Integer pipelineLevel = null; private Integer pipelineExecId = null; // Watches external flows for execution. private FlowWatcher watcher = null; private Set<String> proxyUsers = null; private boolean validateUserProxy; private String jobLogFileSize = "5MB"; private int jobLogNumFiles = 4; private boolean flowPaused = false; private boolean flowFailed = false; private boolean flowFinished = false; private boolean flowKilled = false; // The following is state that will trigger a retry of all failed jobs private boolean retryFailedJobs = false; /** * Constructor. This will create its own ExecutorService for thread pools */ public FlowRunner(final ExecutableFlow flow, final ExecutorLoader executorLoader, final ProjectLoader projectLoader, final JobTypeManager jobtypeManager, final Props azkabanProps) throws ExecutorManagerException { this(flow, executorLoader, projectLoader, jobtypeManager, null, azkabanProps); } /** * Constructor. If executorService is null, then it will create it's own for thread pools. */ public FlowRunner(final ExecutableFlow flow, final ExecutorLoader executorLoader, final ProjectLoader projectLoader, final JobTypeManager jobtypeManager, final ExecutorService executorService, final Props azkabanProps) throws ExecutorManagerException { this.execId = flow.getExecutionId(); this.flow = flow; this.executorLoader = executorLoader; this.projectLoader = projectLoader; this.execDir = new File(flow.getExecutionPath()); this.jobtypeManager = jobtypeManager; final ExecutionOptions options = flow.getExecutionOptions(); this.pipelineLevel = options.getPipelineLevel(); this.pipelineExecId = options.getPipelineExecutionId(); this.failureAction = options.getFailureAction(); this.proxyUsers = flow.getProxyUsers(); this.executorService = executorService; this.finishedNodes = new SwapQueue<>(); this.azkabanProps = azkabanProps; // Create logger and execution dir in flowRunner initialization instead of flow runtime to avoid NPE // where the uninitialized logger is used in flow preparing state createLogger(this.flow.getFlowId()); } public FlowRunner setFlowWatcher(final FlowWatcher watcher) { this.watcher = watcher; return this; } public FlowRunner setNumJobThreads(final int jobs) { this.numJobThreads = jobs; return this; } public FlowRunner setJobLogSettings(final String jobLogFileSize, final int jobLogNumFiles) { this.jobLogFileSize = jobLogFileSize; this.jobLogNumFiles = jobLogNumFiles; return this; } public FlowRunner setValidateProxyUser(final boolean validateUserProxy) { this.validateUserProxy = validateUserProxy; return this; } public File getExecutionDir() { return this.execDir; } @Override public void run() { try { if (this.executorService == null) { this.executorService = Executors.newFixedThreadPool(this.numJobThreads); } setupFlowExecution(); this.flow.setStartTime(System.currentTimeMillis()); this.logger.info("Updating initial flow directory."); updateFlow(); this.logger.info("Fetching job and shared properties."); loadAllProperties(); this.fireEventListeners( Event.create(this, Type.FLOW_STARTED, new EventData(this.getExecutableFlow()))); runFlow(); } catch (final Throwable t) { if (this.logger != null) { this.logger .error( "An error has occurred during the running of the flow. Quiting.", t); } this.flow.setStatus(Status.FAILED); } finally { if (this.watcher != null) { this.logger.info("Watcher is attached. Stopping watcher."); this.watcher.stopWatcher(); this.logger .info("Watcher cancelled status is " + this.watcher.isWatchCancelled()); } this.flow.setEndTime(System.currentTimeMillis()); this.logger.info("Setting end time for flow " + this.execId + " to " + System.currentTimeMillis()); closeLogger(); updateFlow(); this.fireEventListeners(Event.create(this, Type.FLOW_FINISHED, new EventData(this.flow))); } } private void setupFlowExecution() { final int projectId = this.flow.getProjectId(); final int version = this.flow.getVersion(); final String flowId = this.flow.getFlowId(); // Add a bunch of common azkaban properties Props commonFlowProps = PropsUtils.addCommonFlowProperties(null, this.flow); if (this.flow.getJobSource() != null) { final String source = this.flow.getJobSource(); final Props flowProps = this.sharedProps.get(source); flowProps.setParent(commonFlowProps); commonFlowProps = flowProps; } // If there are flow overrides, we apply them now. final Map<String, String> flowParam = this.flow.getExecutionOptions().getFlowParameters(); if (flowParam != null && !flowParam.isEmpty()) { commonFlowProps = new Props(commonFlowProps, flowParam); } this.flow.setInputProps(commonFlowProps); if (this.watcher != null) { this.watcher.setLogger(this.logger); } // Avoid NPE in unit tests when the static app instance is not set if (AzkabanExecutorServer.getApp() != null) { this.logger .info("Assigned executor : " + AzkabanExecutorServer.getApp().getExecutorHostPort()); } this.logger.info("Running execid:" + this.execId + " flow:" + flowId + " project:" + projectId + " version:" + version); if (this.pipelineExecId != null) { this.logger.info("Running simulateously with " + this.pipelineExecId + ". Pipelining level " + this.pipelineLevel); } // The current thread is used for interrupting blocks this.flowRunnerThread = Thread.currentThread(); this.flowRunnerThread.setName("FlowRunner-exec-" + this.flow.getExecutionId()); } private void updateFlow() { updateFlow(System.currentTimeMillis()); } private synchronized void updateFlow(final long time) { try { this.flow.setUpdateTime(time); this.executorLoader.updateExecutableFlow(this.flow); } catch (final ExecutorManagerException e) { this.logger.error("Error updating flow.", e); } } /** * setup logger and execution dir for the flowId */ private void createLogger(final String flowId) { // Create logger final String loggerName = this.execId + "." + flowId; this.logger = Logger.getLogger(loggerName); // Create file appender final String logName = "_flow." + loggerName + ".log"; this.logFile = new File(this.execDir, logName); final String absolutePath = this.logFile.getAbsolutePath(); this.flowAppender = null; try { this.flowAppender = new FileAppender(this.loggerLayout, absolutePath, false); this.logger.addAppender(this.flowAppender); } catch (final IOException e) { this.logger.error("Could not open log file in " + this.execDir, e); } } private void closeLogger() { if (this.logger != null) { this.logger.removeAppender(this.flowAppender); this.flowAppender.close(); try { this.executorLoader.uploadLogFile(this.execId, "", 0, this.logFile); } catch (final ExecutorManagerException e) { e.printStackTrace(); } } } private void loadAllProperties() throws IOException { // First load all the properties for (final FlowProps fprops : this.flow.getFlowProps()) { final String source = fprops.getSource(); final File propsPath = new File(this.execDir, source); final Props props = new Props(null, propsPath); this.sharedProps.put(source, props); } // Resolve parents for (final FlowProps fprops : this.flow.getFlowProps()) { if (fprops.getInheritedSource() != null) { final String source = fprops.getSource(); final String inherit = fprops.getInheritedSource(); final Props props = this.sharedProps.get(source); final Props inherits = this.sharedProps.get(inherit); props.setParent(inherits); } } } /** * Main method that executes the jobs. */ private void runFlow() throws Exception { this.logger.info("Starting flows"); runReadyJob(this.flow); updateFlow(); while (!this.flowFinished) { synchronized (this.mainSyncObj) { if (this.flowPaused) { try { this.mainSyncObj.wait(CHECK_WAIT_MS); } catch (final InterruptedException e) { } continue; } else { if (this.retryFailedJobs) { retryAllFailures(); } else if (!progressGraph()) { try { this.mainSyncObj.wait(CHECK_WAIT_MS); } catch (final InterruptedException e) { } } } } } this.logger.info("Finishing up flow. Awaiting Termination"); this.executorService.shutdown(); updateFlow(); this.logger.info("Finished Flow"); } private void retryAllFailures() throws IOException { this.logger.info("Restarting all failed jobs"); this.retryFailedJobs = false; this.flowKilled = false; this.flowFailed = false; this.flow.setStatus(Status.RUNNING); final ArrayList<ExecutableNode> retryJobs = new ArrayList<>(); resetFailedState(this.flow, retryJobs); for (final ExecutableNode node : retryJobs) { if (node.getStatus() == Status.READY || node.getStatus() == Status.DISABLED) { runReadyJob(node); } else if (node.getStatus() == Status.SUCCEEDED) { for (final String outNodeId : node.getOutNodes()) { final ExecutableFlowBase base = node.getParentFlow(); runReadyJob(base.getExecutableNode(outNodeId)); } } runReadyJob(node); } updateFlow(); } private boolean progressGraph() throws IOException { this.finishedNodes.swap(); // The following nodes are finished, so we'll collect a list of outnodes // that are candidates for running next. final HashSet<ExecutableNode> nodesToCheck = new HashSet<>(); for (final ExecutableNode node : this.finishedNodes) { Set<String> outNodeIds = node.getOutNodes(); ExecutableFlowBase parentFlow = node.getParentFlow(); // If a job is seen as failed or killed due to failing SLA, then we set the parent flow to // FAILED_FINISHING if (node.getStatus() == Status.FAILED || (node.getStatus() == Status.KILLED && node.isKilledBySLA())) { // The job cannot be retried or has run out of retry attempts. We will // fail the job and its flow now. if (!retryJobIfPossible(node)) { propagateStatus(node.getParentFlow(), Status.FAILED_FINISHING); if (this.failureAction == FailureAction.CANCEL_ALL) { this.kill(); } this.flowFailed = true; } else { nodesToCheck.add(node); continue; } } if (outNodeIds.isEmpty()) { // There's no outnodes means it's the end of a flow, so we finalize // and fire an event. finalizeFlow(parentFlow); finishExecutableNode(parentFlow); // If the parent has a parent, then we process if (!(parentFlow instanceof ExecutableFlow)) { outNodeIds = parentFlow.getOutNodes(); parentFlow = parentFlow.getParentFlow(); } } // Add all out nodes from the finished job. We'll check against this set // to // see if any are candidates for running. for (final String nodeId : outNodeIds) { final ExecutableNode outNode = parentFlow.getExecutableNode(nodeId); nodesToCheck.add(outNode); } } // Runs candidate jobs. The code will check to see if they are ready to run // before // Instant kill or skip if necessary. boolean jobsRun = false; for (final ExecutableNode node : nodesToCheck) { if (Status.isStatusFinished(node.getStatus()) || Status.isStatusRunning(node.getStatus())) { // Really shouldn't get in here. continue; } jobsRun |= runReadyJob(node); } if (jobsRun || this.finishedNodes.getSize() > 0) { updateFlow(); return true; } return false; } private boolean runReadyJob(final ExecutableNode node) throws IOException { if (Status.isStatusFinished(node.getStatus()) || Status.isStatusRunning(node.getStatus())) { return false; } final Status nextNodeStatus = getImpliedStatus(node); if (nextNodeStatus == null) { return false; } if (nextNodeStatus == Status.CANCELLED) { this.logger.info("Cancelling '" + node.getNestedId() + "' due to prior errors."); node.cancelNode(System.currentTimeMillis()); finishExecutableNode(node); } else if (nextNodeStatus == Status.SKIPPED) { this.logger.info("Skipping disabled job '" + node.getId() + "'."); node.skipNode(System.currentTimeMillis()); finishExecutableNode(node); } else if (nextNodeStatus == Status.READY) { if (node instanceof ExecutableFlowBase) { final ExecutableFlowBase flow = ((ExecutableFlowBase) node); this.logger.info("Running flow '" + flow.getNestedId() + "'."); flow.setStatus(Status.RUNNING); flow.setStartTime(System.currentTimeMillis()); prepareJobProperties(flow); for (final String startNodeId : ((ExecutableFlowBase) node).getStartNodes()) { final ExecutableNode startNode = flow.getExecutableNode(startNodeId); runReadyJob(startNode); } } else { runExecutableNode(node); } } return true; } private boolean retryJobIfPossible(final ExecutableNode node) { if (node instanceof ExecutableFlowBase) { return false; } if (node.getRetries() > node.getAttempt()) { this.logger.info("Job '" + node.getId() + "' will be retried. Attempt " + node.getAttempt() + " of " + node.getRetries()); node.setDelayedExecution(node.getRetryBackoff()); node.resetForRetry(); return true; } else { if (node.getRetries() > 0) { this.logger.info("Job '" + node.getId() + "' has run out of retry attempts"); // Setting delayed execution to 0 in case this is manually re-tried. node.setDelayedExecution(0); } return false; } } private void propagateStatus(final ExecutableFlowBase base, final Status status) { if (!Status.isStatusFinished(base.getStatus())) { this.logger.info("Setting " + base.getNestedId() + " to " + status); base.setStatus(status); if (base.getParentFlow() != null) { propagateStatus(base.getParentFlow(), status); } } } private void finishExecutableNode(final ExecutableNode node) { this.finishedNodes.add(node); final EventData eventData = new EventData(node.getStatus(), node.getNestedId()); fireEventListeners(Event.create(this, Type.JOB_FINISHED, eventData)); } private void finalizeFlow(final ExecutableFlowBase flow) { final String id = flow == this.flow ? "" : flow.getNestedId(); // If it's not the starting flow, we'll create set of output props // for the finished flow. boolean succeeded = true; Props previousOutput = null; for (final String end : flow.getEndNodes()) { final ExecutableNode node = flow.getExecutableNode(end); if (node.getStatus() == Status.KILLED || node.getStatus() == Status.FAILED || node.getStatus() == Status.CANCELLED) { succeeded = false; } Props output = node.getOutputProps(); if (output != null) { output = Props.clone(output); output.setParent(previousOutput); previousOutput = output; } } flow.setOutputProps(previousOutput); if (!succeeded && (flow.getStatus() == Status.RUNNING)) { flow.setStatus(Status.KILLED); } flow.setEndTime(System.currentTimeMillis()); flow.setUpdateTime(System.currentTimeMillis()); final long durationSec = (flow.getEndTime() - flow.getStartTime()) / 1000; switch (flow.getStatus()) { case FAILED_FINISHING: this.logger.info("Setting flow '" + id + "' status to FAILED in " + durationSec + " seconds"); flow.setStatus(Status.FAILED); break; case FAILED: case KILLED: case CANCELLED: case FAILED_SUCCEEDED: this.logger.info("Flow '" + id + "' is set to " + flow.getStatus().toString() + " in " + durationSec + " seconds"); break; default: flow.setStatus(Status.SUCCEEDED); this.logger.info("Flow '" + id + "' is set to " + flow.getStatus().toString() + " in " + durationSec + " seconds"); } // If the finalized flow is actually the top level flow, than we finish // the main loop. if (flow instanceof ExecutableFlow) { this.flowFinished = true; } } private void prepareJobProperties(final ExecutableNode node) throws IOException { if (node instanceof ExecutableFlow) { return; } Props props = null; // 1. Shared properties (i.e. *.properties) for the jobs only. This takes // the // least precedence if (!(node instanceof ExecutableFlowBase)) { final String sharedProps = node.getPropsSource(); if (sharedProps != null) { props = this.sharedProps.get(sharedProps); } } // The following is the hiearchical ordering of dependency resolution // 2. Parent Flow Properties final ExecutableFlowBase parentFlow = node.getParentFlow(); if (parentFlow != null) { final Props flowProps = Props.clone(parentFlow.getInputProps()); flowProps.setEarliestAncestor(props); props = flowProps; } // 3. Output Properties. The call creates a clone, so we can overwrite it. final Props outputProps = collectOutputProps(node); if (outputProps != null) { outputProps.setEarliestAncestor(props); props = outputProps; } // 4. The job source. final Props jobSource = loadJobProps(node); if (jobSource != null) { jobSource.setParent(props); props = jobSource; } node.setInputProps(props); } /** * @param props This method is to put in any job properties customization before feeding to the * job. */ private void customizeJobProperties(final Props props) { final boolean memoryCheck = this.flow.getExecutionOptions().getMemoryCheck(); props.put(ProcessJob.AZKABAN_MEMORY_CHECK, Boolean.toString(memoryCheck)); } private Props loadJobProps(final ExecutableNode node) throws IOException { Props props = null; final String source = node.getJobSource(); if (source == null) { return null; } // load the override props if any try { props = this.projectLoader.fetchProjectProperty(this.flow.getProjectId(), this.flow.getVersion(), node.getId() + ".jor"); } catch (final ProjectManagerException e) { e.printStackTrace(); this.logger.error("Error loading job override property for job " + node.getId()); } final File path = new File(this.execDir, source); if (props == null) { // if no override prop, load the original one on disk try { props = new Props(null, path); } catch (final IOException e) { e.printStackTrace(); this.logger.error("Error loading job file " + source + " for job " + node.getId()); } } // setting this fake source as this will be used to determine the location // of log files. if (path.getPath() != null) { props.setSource(path.getPath()); } customizeJobProperties(props); return props; } @SuppressWarnings("FutureReturnValueIgnored") private void runExecutableNode(final ExecutableNode node) throws IOException { // Collect output props from the job's dependencies. prepareJobProperties(node); node.setStatus(Status.QUEUED); final JobRunner runner = createJobRunner(node); this.logger.info("Submitting job '" + node.getNestedId() + "' to run."); try { this.executorService.submit(runner); this.activeJobRunners.add(runner); } catch (final RejectedExecutionException e) { this.logger.error(e); } } /** * Determines what the state of the next node should be. Returns null if the node should not be * run. */ public Status getImpliedStatus(final ExecutableNode node) { // If it's running or finished with 'SUCCEEDED', than don't even // bother starting this job. if (Status.isStatusRunning(node.getStatus()) || node.getStatus() == Status.SUCCEEDED) { return null; } // Go through the node's dependencies. If all of the previous job's // statuses is finished and not FAILED or KILLED, than we can safely // run this job. final ExecutableFlowBase flow = node.getParentFlow(); boolean shouldKill = false; for (final String dependency : node.getInNodes()) { final ExecutableNode dependencyNode = flow.getExecutableNode(dependency); final Status depStatus = dependencyNode.getStatus(); if (!Status.isStatusFinished(depStatus)) { return null; } else if (depStatus == Status.FAILED || depStatus == Status.CANCELLED || depStatus == Status.KILLED) { // We propagate failures as KILLED states. shouldKill = true; } } // If it's disabled but ready to run, we want to make sure it continues // being disabled. if (node.getStatus() == Status.DISABLED || node.getStatus() == Status.SKIPPED) { return Status.SKIPPED; } // If the flow has failed, and we want to finish only the currently running // jobs, we just // kill everything else. We also kill, if the flow has been cancelled. if (this.flowFailed && this.failureAction == ExecutionOptions.FailureAction.FINISH_CURRENTLY_RUNNING) { return Status.CANCELLED; } else if (shouldKill || isKilled()) { return Status.CANCELLED; } // All good to go, ready to run. return Status.READY; } private Props collectOutputProps(final ExecutableNode node) { Props previousOutput = null; // Iterate the in nodes again and create the dependencies for (final String dependency : node.getInNodes()) { Props output = node.getParentFlow().getExecutableNode(dependency).getOutputProps(); if (output != null) { output = Props.clone(output); output.setParent(previousOutput); previousOutput = output; } } return previousOutput; } private JobRunner createJobRunner(final ExecutableNode node) { // Load job file. final File path = new File(this.execDir, node.getJobSource()); final JobRunner jobRunner = new JobRunner(node, path.getParentFile(), this.executorLoader, this.jobtypeManager, this.azkabanProps); if (this.watcher != null) { jobRunner.setPipeline(this.watcher, this.pipelineLevel); } if (this.validateUserProxy) { jobRunner.setValidatedProxyUsers(this.proxyUsers); } jobRunner.setDelayStart(node.getDelayedExecution()); jobRunner.setLogSettings(this.logger, this.jobLogFileSize, this.jobLogNumFiles); jobRunner.addListener(this.listener); if (JobCallbackManager.isInitialized()) { jobRunner.addListener(JobCallbackManager.getInstance()); } configureJobLevelMetrics(jobRunner); return jobRunner; } /** * Configure Azkaban metrics tracking for a new jobRunner instance */ private void configureJobLevelMetrics(final JobRunner jobRunner) { this.logger.info("Configuring Azkaban metrics tracking for jobrunner object"); if (MetricReportManager.isAvailable()) { final MetricReportManager metricManager = MetricReportManager.getInstance(); // Adding NumRunningJobMetric listener jobRunner.addListener((NumRunningJobMetric) metricManager .getMetricFromName(NumRunningJobMetric.NUM_RUNNING_JOB_METRIC_NAME)); // Adding NumFailedJobMetric listener jobRunner.addListener((NumFailedJobMetric) metricManager .getMetricFromName(NumFailedJobMetric.NUM_FAILED_JOB_METRIC_NAME)); } jobRunner.addListener(JmxJobMBeanManager.getInstance()); } public void pause(final String user) { synchronized (this.mainSyncObj) { if (!this.flowFinished) { this.logger.info("Flow paused by " + user); this.flowPaused = true; this.flow.setStatus(Status.PAUSED); updateFlow(); } else { this.logger.info("Cannot pause finished flow. Called by user " + user); } } interrupt(); } public void resume(final String user) { synchronized (this.mainSyncObj) { if (!this.flowPaused) { this.logger.info("Cannot resume flow that isn't paused"); } else { this.logger.info("Flow resumed by " + user); this.flowPaused = false; if (this.flowFailed) { this.flow.setStatus(Status.FAILED_FINISHING); } else if (this.flowKilled) { this.flow.setStatus(Status.KILLED); } else { this.flow.setStatus(Status.RUNNING); } updateFlow(); } } interrupt(); } public void kill(final String user) { this.logger.info("Flow killed by " + user); kill(); } public void kill() { synchronized (this.mainSyncObj) { if (this.flowKilled) { return; } this.logger.info("Kill has been called on flow " + this.execId); this.flow.setStatus(Status.KILLED); // If the flow is paused, then we'll also unpause this.flowPaused = false; this.flowKilled = true; if (this.watcher != null) { this.logger.info("Watcher is attached. Stopping watcher."); this.watcher.stopWatcher(); this.logger .info("Watcher cancelled status is " + this.watcher.isWatchCancelled()); } this.logger.info("Killing " + this.activeJobRunners.size() + " jobs."); for (final JobRunner runner : this.activeJobRunners) { runner.kill(); } updateFlow(); } interrupt(); } public void retryFailures(final String user) { synchronized (this.mainSyncObj) { this.logger.info("Retrying failures invoked by " + user); this.retryFailedJobs = true; interrupt(); } } private void resetFailedState(final ExecutableFlowBase flow, final List<ExecutableNode> nodesToRetry) { // bottom up final LinkedList<ExecutableNode> queue = new LinkedList<>(); for (final String id : flow.getEndNodes()) { final ExecutableNode node = flow.getExecutableNode(id); queue.add(node); } long maxStartTime = -1; while (!queue.isEmpty()) { final ExecutableNode node = queue.poll(); final Status oldStatus = node.getStatus(); maxStartTime = Math.max(node.getStartTime(), maxStartTime); final long currentTime = System.currentTimeMillis(); if (node.getStatus() == Status.SUCCEEDED) { // This is a candidate parent for restart nodesToRetry.add(node); continue; } else if (node.getStatus() == Status.RUNNING) { continue; } else if (node.getStatus() == Status.SKIPPED) { node.setStatus(Status.DISABLED); node.setEndTime(-1); node.setStartTime(-1); node.setUpdateTime(currentTime); } else if (node instanceof ExecutableFlowBase) { final ExecutableFlowBase base = (ExecutableFlowBase) node; switch (base.getStatus()) { case CANCELLED: node.setStatus(Status.READY); node.setEndTime(-1); node.setStartTime(-1); node.setUpdateTime(currentTime); // Break out of the switch. We'll reset the flow just like a normal // node break; case KILLED: case FAILED: case FAILED_FINISHING: resetFailedState(base, nodesToRetry); continue; default: // Continue the while loop. If the job is in a finished state that's // not // a failure, we don't want to reset the job. continue; } } else if (node.getStatus() == Status.CANCELLED) { // Not a flow, but killed node.setStatus(Status.READY); node.setStartTime(-1); node.setEndTime(-1); node.setUpdateTime(currentTime); } else if (node.getStatus() == Status.FAILED || node.getStatus() == Status.KILLED) { node.resetForRetry(); nodesToRetry.add(node); } if (!(node instanceof ExecutableFlowBase) && node.getStatus() != oldStatus) { this.logger.info("Resetting job '" + node.getNestedId() + "' from " + oldStatus + " to " + node.getStatus()); } for (final String inId : node.getInNodes()) { final ExecutableNode nodeUp = flow.getExecutableNode(inId); queue.add(nodeUp); } } // At this point, the following code will reset the flow final Status oldFlowState = flow.getStatus(); if (maxStartTime == -1) { // Nothing has run inside the flow, so we assume the flow hasn't even // started running yet. flow.setStatus(Status.READY); } else { flow.setStatus(Status.RUNNING); // Add any READY start nodes. Usually it means the flow started, but the // start node has not. for (final String id : flow.getStartNodes()) { final ExecutableNode node = flow.getExecutableNode(id); if (node.getStatus() == Status.READY || node.getStatus() == Status.DISABLED) { nodesToRetry.add(node); } } } flow.setUpdateTime(System.currentTimeMillis()); flow.setEndTime(-1); this.logger.info("Resetting flow '" + flow.getNestedId() + "' from " + oldFlowState + " to " + flow.getStatus()); } private void interrupt() { this.flowRunnerThread.interrupt(); } public boolean isKilled() { return this.flowKilled; } public ExecutableFlow getExecutableFlow() { return this.flow; } public File getFlowLogFile() { return this.logFile; } public File getJobLogFile(final String jobId, final int attempt) { final ExecutableNode node = this.flow.getExecutableNodePath(jobId); final File path = new File(this.execDir, node.getJobSource()); final String logFileName = JobRunner.createLogFileName(node, attempt); final File logFile = new File(path.getParentFile(), logFileName); if (!logFile.exists()) { return null; } return logFile; } public File getJobAttachmentFile(final String jobId, final int attempt) { final ExecutableNode node = this.flow.getExecutableNodePath(jobId); final File path = new File(this.execDir, node.getJobSource()); final String attachmentFileName = JobRunner.createAttachmentFileName(node, attempt); final File attachmentFile = new File(path.getParentFile(), attachmentFileName); if (!attachmentFile.exists()) { return null; } return attachmentFile; } public File getJobMetaDataFile(final String jobId, final int attempt) { final ExecutableNode node = this.flow.getExecutableNodePath(jobId); final File path = new File(this.execDir, node.getJobSource()); final String metaDataFileName = JobRunner.createMetaDataFileName(node, attempt); final File metaDataFile = new File(path.getParentFile(), metaDataFileName); if (!metaDataFile.exists()) { return null; } return metaDataFile; } public boolean isRunnerThreadAlive() { if (this.flowRunnerThread != null) { return this.flowRunnerThread.isAlive(); } return false; } public boolean isThreadPoolShutdown() { return this.executorService.isShutdown(); } public int getNumRunningJobs() { return this.activeJobRunners.size(); } public int getExecutionId() { return this.execId; } private class JobRunnerEventListener implements EventListener { public JobRunnerEventListener() { } @Override public synchronized void handleEvent(final Event event) { if (event.getType() == Type.JOB_STATUS_CHANGED) { updateFlow(); } else if (event.getType() == Type.JOB_FINISHED) { final JobRunner runner = (JobRunner) event.getRunner(); final ExecutableNode node = runner.getNode(); final EventData eventData = event.getData(); final long seconds = (node.getEndTime() - node.getStartTime()) / 1000; synchronized (FlowRunner.this.mainSyncObj) { FlowRunner.this.logger.info("Job " + eventData.getNestedId() + " finished with status " + eventData.getStatus() + " in " + seconds + " seconds"); // Cancellation is handled in the main thread, but if the flow is // paused, the main thread is paused too. // This unpauses the flow for cancellation. if (FlowRunner.this.flowPaused && eventData.getStatus() == Status.FAILED && FlowRunner.this.failureAction == FailureAction.CANCEL_ALL) { FlowRunner.this.flowPaused = false; } FlowRunner.this.finishedNodes.add(node); activeJobRunners.remove(runner); node.getParentFlow().setUpdateTime(System.currentTimeMillis()); interrupt(); fireEventListeners(event); } } else if (event.getType() == Type.JOB_STARTED) { // add job level checker final TriggerManager triggerManager = ServiceProvider.SERVICE_PROVIDER .getInstance(TriggerManager.class); triggerManager .addTrigger(FlowRunner.this.flow.getExecutionId(), SlaOption.getJobLevelSLAOptions( FlowRunner.this.flow)); } } } public Set<JobRunner> getActiveJobRunners() { return ImmutableSet.copyOf(this.activeJobRunners); } }
1
14,449
those change are done by save plugin.
azkaban-azkaban
java
@@ -37,7 +37,9 @@ public class MimeHeader { }; protected ArrayList<Field> mFields = new ArrayList<Field>(); - private String mCharset = null; + + // use UTF-8 as standard to prevent NullPointerExceptions + private String mCharset = "UTF-8"; public void clear() { mFields.clear();
1
package com.fsck.k9.mail.internet; import com.fsck.k9.helper.Utility; import java.io.BufferedWriter; import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.nio.charset.Charset; import java.util.*; public class MimeHeader { private static final String[] EMPTY_STRING_ARRAY = new String[0]; /** * Application specific header that contains Store specific information about an attachment. * In IMAP this contains the IMAP BODYSTRUCTURE part id so that the ImapStore can later * retrieve the attachment at will from the server. * The info is recorded from this header on LocalStore.appendMessages and is put back * into the MIME data by LocalStore.fetch. */ public static final String HEADER_ANDROID_ATTACHMENT_STORE_DATA = "X-Android-Attachment-StoreData"; public static final String HEADER_CONTENT_TYPE = "Content-Type"; public static final String HEADER_CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding"; public static final String HEADER_CONTENT_DISPOSITION = "Content-Disposition"; public static final String HEADER_CONTENT_ID = "Content-ID"; /** * Fields that should be omitted when writing the header using writeTo() */ private static final String[] writeOmitFields = { // HEADER_ANDROID_ATTACHMENT_DOWNLOADED, // HEADER_ANDROID_ATTACHMENT_ID, HEADER_ANDROID_ATTACHMENT_STORE_DATA }; protected ArrayList<Field> mFields = new ArrayList<Field>(); private String mCharset = null; public void clear() { mFields.clear(); } public String getFirstHeader(String name) { String[] header = getHeader(name); if (header == null) { return null; } return header[0]; } public void addHeader(String name, String value) { mFields.add(new Field(name, MimeUtility.foldAndEncode(value))); } public void setHeader(String name, String value) { if (name == null || value == null) { return; } removeHeader(name); addHeader(name, value); } public Set<String> getHeaderNames() { Set<String> names = new HashSet<String>(); for (Field field : mFields) { names.add(field.name); } return names; } public String[] getHeader(String name) { ArrayList<String> values = new ArrayList<String>(); for (Field field : mFields) { if (field.name.equalsIgnoreCase(name)) { values.add(field.value); } } if (values.size() == 0) { return null; } return values.toArray(EMPTY_STRING_ARRAY); } public void removeHeader(String name) { ArrayList<Field> removeFields = new ArrayList<Field>(); for (Field field : mFields) { if (field.name.equalsIgnoreCase(name)) { removeFields.add(field); } } mFields.removeAll(removeFields); } public void writeTo(OutputStream out) throws IOException { BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(out), 1024); for (Field field : mFields) { if (!Utility.arrayContains(writeOmitFields, field.name)) { String v = field.value; if (hasToBeEncoded(v)) { Charset charset = null; if (mCharset != null) { charset = Charset.forName(mCharset); } v = EncoderUtil.encodeEncodedWord(field.value, charset); } writer.write(field.name + ": " + v + "\r\n"); } } writer.flush(); } // encode non printable characters except LF/CR codes. public boolean hasToBeEncoded(String text) { for (int i = 0; i < text.length(); i++) { char c = text.charAt(i); if (c < 0x20 || 0x7e < c) { // non printable if (c != 0x0a && c != 0x0d) { // non LF/CR return true; } } } return false; } static class Field { String name; String value; public Field(String name, String value) { this.name = name; this.value = value; } @Override public String toString() { StringBuilder sb = new StringBuilder("("); sb.append(name).append('=').append(value).append(')'); return sb.toString(); } } public void setCharset(String charset) { mCharset = charset; } }
1
11,413
There were problems with NullPointerException and with german Umlauts in message titles (I know they're not allowed in the specifiation but they caused my K-9-Inbox to not work at all) I don't know if this is related to PGP/Mime
k9mail-k-9
java
@@ -0,0 +1,11 @@ +// <copyright file="IEvent.cs" company="Datadog"> +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. +// </copyright> + +namespace Datadog.Trace.Abstractions +{ + internal interface IEvent + { + } +}
1
1
21,551
This is an appsec concept right? Should it live in the Appsec namespace?
DataDog-dd-trace-dotnet
.cs
@@ -11,11 +11,10 @@ func main() { machine.I2C0.Configure(machine.I2CConfig{}) // Init BlinkM - machine.I2C0.WriteTo(0x09, []byte("o")) + machine.I2C0.WriteRegister(0x09, 'o', nil) version := []byte{0, 0} - machine.I2C0.WriteTo(0x09, []byte("Z")) - machine.I2C0.ReadFrom(0x09, version) + machine.I2C0.ReadRegister(0x09, 'Z', version) println("Firmware version:", string(version[0]), string(version[1])) count := 0
1
// Connects to an BlinkM I2C RGB LED. // http://thingm.com/fileadmin/thingm/downloads/BlinkM_datasheet.pdf package main import ( "machine" "time" ) func main() { machine.I2C0.Configure(machine.I2CConfig{}) // Init BlinkM machine.I2C0.WriteTo(0x09, []byte("o")) version := []byte{0, 0} machine.I2C0.WriteTo(0x09, []byte("Z")) machine.I2C0.ReadFrom(0x09, version) println("Firmware version:", string(version[0]), string(version[1])) count := 0 for { switch count { case 0: // Crimson machine.I2C0.WriteTo(0x09, []byte{'n', 0xdc, 0x14, 0x3c}) count = 1 case 1: // MediumPurple machine.I2C0.WriteTo(0x09, []byte{'n', 0x93, 0x70, 0xdb}) count = 2 case 2: // MediumSeaGreen machine.I2C0.WriteTo(0x09, []byte{'n', 0x3c, 0xb3, 0x71}) count = 0 } time.Sleep(100 * time.Millisecond) } }
1
6,091
I'd personally recommend having the demo check for error, unless it's worthless.
tinygo-org-tinygo
go
@@ -582,7 +582,7 @@ public class DistributorTest { ); Session firefoxSession = distributor.newSession(createRequest(firefoxPayload)).getSession(); - LOG.info(String.format("Firefox Session %d assigned to %s", i, chromeSession.getUri())); + LOG.finer(String.format("Firefox Session %d assigned to %s", i, chromeSession.getUri())); boolean inFirefoxNodes = firefoxNodes.stream().anyMatch(node -> node.getUri().equals(firefoxSession.getUri())); boolean inChromeNodes = chromeNodes.stream().anyMatch(node -> node.getUri().equals(chromeSession.getUri()));
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.distributor; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import io.opentracing.Tracer; import io.opentracing.noop.NoopTracerFactory; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.openqa.selenium.Capabilities; import org.openqa.selenium.ImmutableCapabilities; import org.openqa.selenium.MutableCapabilities; import org.openqa.selenium.NoSuchSessionException; import org.openqa.selenium.SessionNotCreatedException; import org.openqa.selenium.events.EventBus; import org.openqa.selenium.events.local.GuavaEventBus; import org.openqa.selenium.grid.component.HealthCheck; import org.openqa.selenium.grid.data.CreateSessionRequest; import org.openqa.selenium.grid.data.DistributorStatus; import org.openqa.selenium.grid.data.NodeStatus; import org.openqa.selenium.grid.data.Session; import org.openqa.selenium.grid.distributor.local.LocalDistributor; import org.openqa.selenium.grid.distributor.remote.RemoteDistributor; import org.openqa.selenium.grid.node.Node; import org.openqa.selenium.grid.node.local.LocalNode; import org.openqa.selenium.grid.sessionmap.SessionMap; import org.openqa.selenium.grid.sessionmap.local.LocalSessionMap; import org.openqa.selenium.grid.testing.PassthroughHttpClient; import org.openqa.selenium.grid.testing.TestSessionFactory; import org.openqa.selenium.grid.web.CombinedHandler; import org.openqa.selenium.net.PortProber; import org.openqa.selenium.remote.Dialect; import org.openqa.selenium.remote.NewSessionPayload; import org.openqa.selenium.remote.SessionId; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpHandler; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.support.ui.FluentWait; import org.openqa.selenium.support.ui.Wait; import java.io.IOException; import java.io.UncheckedIOException; import java.net.MalformedURLException; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.time.Duration; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Logger; import java.util.stream.Collectors; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatExceptionOfType; import static org.assertj.core.api.Assertions.fail; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.openqa.selenium.remote.http.Contents.utf8String; import static org.openqa.selenium.remote.http.HttpMethod.POST; public class DistributorTest { private Tracer tracer; private EventBus bus; private HttpClient.Factory clientFactory; private Distributor local; private ImmutableCapabilities caps; private static final Logger LOG = Logger.getLogger("Distributor Test"); @Before public void setUp() { tracer = NoopTracerFactory.create(); bus = new GuavaEventBus(); clientFactory = HttpClient.Factory.createDefault(); LocalSessionMap sessions = new LocalSessionMap(bus); local = new LocalDistributor(tracer, bus, HttpClient.Factory.createDefault(), sessions); caps = new ImmutableCapabilities("browserName", "cheese"); } @Test public void creatingANewSessionWithoutANodeEndsInFailure() throws MalformedURLException { Distributor distributor = new RemoteDistributor( tracer, new PassthroughHttpClient.Factory(local), new URL("http://does.not.exist/")); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } } @Test public void shouldBeAbleToAddANodeAndCreateASession() throws URISyntaxException { URI nodeUri = new URI("http://example:5678"); URI routableUri = new URI("http://localhost:1234"); LocalSessionMap sessions = new LocalSessionMap(bus); LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri) .add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c))) .build(); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(node), sessions); distributor.add(node); MutableCapabilities sessionCaps = new MutableCapabilities(caps); sessionCaps.setCapability("sausages", "gravy"); try (NewSessionPayload payload = NewSessionPayload.create(sessionCaps)) { Session session = distributor.newSession(createRequest(payload)).getSession(); assertThat(session.getCapabilities()).isEqualTo(sessionCaps); assertThat(session.getUri()).isEqualTo(routableUri); } } @Test public void creatingASessionAddsItToTheSessionMap() throws URISyntaxException { URI nodeUri = new URI("http://example:5678"); URI routableUri = new URI("http://localhost:1234"); LocalSessionMap sessions = new LocalSessionMap(bus); LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri) .add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c))) .build(); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(node), sessions); distributor.add(node); MutableCapabilities sessionCaps = new MutableCapabilities(caps); sessionCaps.setCapability("sausages", "gravy"); try (NewSessionPayload payload = NewSessionPayload.create(sessionCaps)) { Session returned = distributor.newSession(createRequest(payload)).getSession(); Session session = sessions.get(returned.getId()); assertThat(session.getCapabilities()).isEqualTo(sessionCaps); assertThat(session.getUri()).isEqualTo(routableUri); } } @Test public void shouldBeAbleToRemoveANode() throws URISyntaxException, MalformedURLException { URI nodeUri = new URI("http://example:5678"); URI routableUri = new URI("http://localhost:1234"); LocalSessionMap sessions = new LocalSessionMap(bus); LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri) .add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c))) .build(); Distributor local = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(node), sessions); Distributor distributor = new RemoteDistributor( tracer, new PassthroughHttpClient.Factory(local), new URL("http://does.not.exist")); distributor.add(node); distributor.remove(node.getId()); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } } @Test public void registeringTheSameNodeMultipleTimesOnlyCountsTheFirstTime() throws URISyntaxException { URI nodeUri = new URI("http://example:5678"); URI routableUri = new URI("http://localhost:1234"); LocalNode node = LocalNode.builder(tracer, bus, clientFactory, routableUri) .add(caps, new TestSessionFactory((id, c) -> new Session(id, nodeUri, c))) .build(); local.add(node); local.add(node); DistributorStatus status = local.getStatus(); assertThat(status.getNodes().size()).isEqualTo(1); } @Test public void theMostLightlyLoadedNodeIsSelectedFirst() { // Create enough hosts so that we avoid the scheduler returning hosts in: // * insertion order // * reverse insertion order // * sorted with most heavily used first SessionMap sessions = new LocalSessionMap(bus); Node lightest = createNode(caps, 10, 0); Node medium = createNode(caps, 10, 4); Node heavy = createNode(caps, 10, 6); Node massive = createNode(caps, 10, 8); CombinedHandler handler = new CombinedHandler(); handler.addHandler(lightest); handler.addHandler(medium); handler.addHandler(heavy); handler.addHandler(massive); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions) .add(heavy) .add(medium) .add(lightest) .add(massive); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { Session session = distributor.newSession(createRequest(payload)).getSession(); assertThat(session.getUri()).isEqualTo(lightest.getStatus().getUri()); } } @Test public void shouldUseLastSessionCreatedTimeAsTieBreaker() { SessionMap sessions = new LocalSessionMap(bus); Node leastRecent = createNode(caps, 5, 0); CombinedHandler handler = new CombinedHandler(); handler.addHandler(sessions); handler.addHandler(leastRecent); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions) .add(leastRecent); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { distributor.newSession(createRequest(payload)); // Will be "leastRecent" by default } Node middle = createNode(caps, 5, 0); handler.addHandler(middle); distributor.add(middle); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { Session session = distributor.newSession(createRequest(payload)).getSession(); // Least lightly loaded is middle assertThat(session.getUri()).isEqualTo(middle.getStatus().getUri()); } Node mostRecent = createNode(caps, 5, 0); handler.addHandler(mostRecent); distributor.add(mostRecent); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { Session session = distributor.newSession(createRequest(payload)).getSession(); // Least lightly loaded is most recent assertThat(session.getUri()).isEqualTo(mostRecent.getStatus().getUri()); } // All the nodes should be equally loaded. Map<Capabilities, Integer> expected = mostRecent.getStatus().getStereotypes(); assertThat(leastRecent.getStatus().getStereotypes()).isEqualTo(expected); assertThat(middle.getStatus().getStereotypes()).isEqualTo(expected); // All nodes are now equally loaded. We should be going in time order now try (NewSessionPayload payload = NewSessionPayload.create(caps)) { Session session = distributor.newSession(createRequest(payload)).getSession(); assertThat(session.getUri()).isEqualTo(leastRecent.getStatus().getUri()); } } @Test public void shouldIncludeHostsThatAreUpInHostList() { CombinedHandler handler = new CombinedHandler(); SessionMap sessions = new LocalSessionMap(bus); handler.addHandler(sessions); URI uri = createUri(); Node alwaysDown = LocalNode.builder(tracer, bus, clientFactory, uri) .add(caps, new TestSessionFactory((id, c) -> new Session(id, uri, c))) .advanced() .healthCheck(() -> new HealthCheck.Result(false, "Boo!")) .build(); handler.addHandler(alwaysDown); Node alwaysUp = LocalNode.builder(tracer, bus, clientFactory, uri) .add(caps, new TestSessionFactory((id, c) -> new Session(id, uri, c))) .advanced() .healthCheck(() -> new HealthCheck.Result(true, "Yay!")) .build(); handler.addHandler(alwaysUp); LocalDistributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); distributor.add(alwaysDown); // Should be unable to create a session because the node is down. try (NewSessionPayload payload = NewSessionPayload.create(caps)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } distributor.add(alwaysUp); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { distributor.newSession(createRequest(payload)); } } @Test public void shouldNotScheduleAJobIfAllSlotsAreBeingUsed() { SessionMap sessions = new LocalSessionMap(bus); CombinedHandler handler = new CombinedHandler(); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); Node node = createNode(caps, 1, 0); handler.addHandler(node); distributor.add(node); // Use up the one slot available try (NewSessionPayload payload = NewSessionPayload.create(caps)) { distributor.newSession(createRequest(payload)); } // Now try and create a session. try (NewSessionPayload payload = NewSessionPayload.create(caps)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } } @Test public void shouldReleaseSlotOnceSessionEnds() { SessionMap sessions = new LocalSessionMap(bus); CombinedHandler handler = new CombinedHandler(); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); Node node = createNode(caps, 1, 0); handler.addHandler(node); distributor.add(node); // Use up the one slot available Session session; try (NewSessionPayload payload = NewSessionPayload.create(caps)) { session = distributor.newSession(createRequest(payload)).getSession(); } // Make sure the session map has the session sessions.get(session.getId()); node.stop(session.getId()); // Now wait for the session map to say the session is gone. Wait<Object> wait = new FluentWait<>(new Object()).withTimeout(Duration.ofSeconds(2)); wait.until(obj -> { try { sessions.get(session.getId()); return false; } catch (NoSuchSessionException e) { return true; } }); wait.until(obj -> distributor.getStatus().hasCapacity()); // And we should now be able to create another session. try (NewSessionPayload payload = NewSessionPayload.create(caps)) { distributor.newSession(createRequest(payload)); } } @Test public void shouldNotStartASessionIfTheCapabilitiesAreNotSupported() { CombinedHandler handler = new CombinedHandler(); LocalSessionMap sessions = new LocalSessionMap(bus); handler.addHandler(handler); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); Node node = createNode(caps, 1, 0); handler.addHandler(node); distributor.add(node); ImmutableCapabilities unmatched = new ImmutableCapabilities("browserName", "transit of venus"); try (NewSessionPayload payload = NewSessionPayload.create(unmatched)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } } @Test public void attemptingToStartASessionWhichFailsMarksAsTheSlotAsAvailable() { CombinedHandler handler = new CombinedHandler(); SessionMap sessions = new LocalSessionMap(bus); handler.addHandler(sessions); URI uri = createUri(); Node node = LocalNode.builder(tracer, bus, clientFactory, uri) .add(caps, new TestSessionFactory((id, caps) -> { throw new SessionNotCreatedException("OMG"); })) .build(); handler.addHandler(node); Distributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); distributor.add(node); try (NewSessionPayload payload = NewSessionPayload.create(caps)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } assertThat(distributor.getStatus().hasCapacity()).isTrue(); } @Test public void shouldReturnNodesThatWereDownToPoolOfNodesOnceTheyMarkTheirHealthCheckPasses() { CombinedHandler handler = new CombinedHandler(); SessionMap sessions = new LocalSessionMap(bus); handler.addHandler(sessions); AtomicBoolean isUp = new AtomicBoolean(false); URI uri = createUri(); Node node = LocalNode.builder(tracer, bus, clientFactory, uri) .add(caps, new TestSessionFactory((id, caps) -> new Session(id, uri, caps))) .advanced() .healthCheck(() -> new HealthCheck.Result(isUp.get(), "TL;DR")) .build(); handler.addHandler(node); LocalDistributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); distributor.add(node); // Should be unable to create a session because the node is down. try (NewSessionPayload payload = NewSessionPayload.create(caps)) { assertThatExceptionOfType(SessionNotCreatedException.class) .isThrownBy(() -> distributor.newSession(createRequest(payload))); } // Mark the node as being up isUp.set(true); // Kick the machinery to ensure that everything is fine. distributor.refresh(); // Because the node is now up and running, we should now be able to create a session try (NewSessionPayload payload = NewSessionPayload.create(caps)) { distributor.newSession(createRequest(payload)); } } private Set<Node> createNodeSet(Distributor distributor, int count, Capabilities...capabilities) { Set<Node> nodeSet = new HashSet<>(); for (int i=0; i<count; i++) { URI uri = createUri(); LocalNode.Builder builder = LocalNode.builder(tracer, bus, clientFactory, uri); for (Capabilities caps: capabilities) { builder.add(caps, new TestSessionFactory((id, hostCaps) -> new HandledSession(uri, hostCaps))); } Node node = builder.build(); distributor.add(node); nodeSet.add(node); } return nodeSet; } @Test public void shouldPrioritizeHostsWithTheMostSlotsAvailableForASessionType() { //SS: Consider the case where you have 1 Windows machine and 5 linux machines. All of these hosts // can run Chrome and Firefox sessions, but only one can run Edge sessions. Ideally, the machine // able to run Edge would be sorted last. //Create the Distributor CombinedHandler handler = new CombinedHandler(); SessionMap sessions = new LocalSessionMap(bus); handler.addHandler(sessions); LocalDistributor distributor = new LocalDistributor( tracer, bus, new PassthroughHttpClient.Factory(handler), sessions); handler.addHandler(distributor); //Create all three Capability types Capabilities edgeCapabilities = new ImmutableCapabilities("browserName", "edge"); Capabilities firefoxCapabilities = new ImmutableCapabilities("browserName", "firefox"); Capabilities chromeCapabilities = new ImmutableCapabilities("browserName", "chrome"); //TODO This should probably be a map of browser -> all nodes that support <browser> //Store our "expected results" sets for the various browser-specific nodes Set<Node> edgeNodes = createNodeSet(distributor, 3, edgeCapabilities, chromeCapabilities, firefoxCapabilities); //chromeNodes is all these new nodes PLUS all the Edge nodes from before Set<Node> chromeNodes = createNodeSet(distributor,5, chromeCapabilities, firefoxCapabilities); chromeNodes.addAll(edgeNodes); //all nodes support firefox, so add them to the firefoxNodes set Set<Node> firefoxNodes = createNodeSet(distributor,3, firefoxCapabilities); firefoxNodes.addAll(edgeNodes); firefoxNodes.addAll(chromeNodes); //Assign 5 Chrome and 5 Firefox sessions to the distributor, make sure they don't go to the Edge node for (int i=0; i<5; i++) { try (NewSessionPayload chromePayload = NewSessionPayload.create(chromeCapabilities); NewSessionPayload firefoxPayload = NewSessionPayload.create(firefoxCapabilities)) { Session chromeSession = distributor.newSession(createRequest(chromePayload)).getSession(); assertThat( //Ensure the Uri of the Session matches one of the Chrome Nodes, not the Edge Node chromeSession.getUri()).isIn( chromeNodes .stream().map(Node::getStatus).collect(Collectors.toList()) //List of getStatus() from the Set .stream().map(NodeStatus::getUri).collect(Collectors.toList()) //List of getUri() from the Set ); Session firefoxSession = distributor.newSession(createRequest(firefoxPayload)).getSession(); LOG.info(String.format("Firefox Session %d assigned to %s", i, chromeSession.getUri())); boolean inFirefoxNodes = firefoxNodes.stream().anyMatch(node -> node.getUri().equals(firefoxSession.getUri())); boolean inChromeNodes = chromeNodes.stream().anyMatch(node -> node.getUri().equals(chromeSession.getUri())); //This could be either, or, or both assertTrue(inFirefoxNodes || inChromeNodes); } } //The Chrome Nodes should be full at this point, but Firefox isn't... so send an Edge session and make sure it routes to an Edge node try (NewSessionPayload edgePayload = NewSessionPayload.create(edgeCapabilities)) { Session edgeSession = distributor.newSession(createRequest(edgePayload)).getSession(); assertTrue(edgeNodes.stream().anyMatch(node -> node.getUri().equals(edgeSession.getUri()))); } } private Node createNode(Capabilities stereotype, int count, int currentLoad) { URI uri = createUri(); LocalNode.Builder builder = LocalNode.builder(tracer, bus, clientFactory, uri); for (int i = 0; i < count; i++) { builder.add(stereotype, new TestSessionFactory((id, caps) -> new HandledSession(uri, caps))); } LocalNode node = builder.build(); for (int i = 0; i < currentLoad; i++) { // Ignore the session. We're just creating load. node.newSession(new CreateSessionRequest( ImmutableSet.copyOf(Dialect.values()), stereotype, ImmutableMap.of())); } return node; } @Test @Ignore public void shouldCorrectlySetSessionCountsWhenStartedAfterNodeWithSession() { fail("write me!"); } @Test public void statusShouldIndicateThatDistributorIsNotAvailableIfNodesAreDown() throws URISyntaxException { Capabilities capabilities = new ImmutableCapabilities("cheese", "peas"); URI uri = new URI("http://example.com"); Node node = LocalNode.builder(tracer, bus, clientFactory, uri) .add(capabilities, new TestSessionFactory((id, caps) -> new Session(id, uri, caps))) .advanced() .healthCheck(() -> new HealthCheck.Result(false, "TL;DR")) .build(); local.add(node); DistributorStatus status = local.getStatus(); assertFalse(status.hasCapacity()); } private HttpRequest createRequest(NewSessionPayload payload) { StringBuilder builder = new StringBuilder(); try { payload.writeTo(builder); } catch (IOException e) { throw new UncheckedIOException(e); } HttpRequest request = new HttpRequest(POST, "/se/grid/distributor/session"); request.setContent(utf8String(builder.toString())); return request; } private URI createUri() { try { return new URI("http://localhost:" + PortProber.findFreePort()); } catch (URISyntaxException e) { throw new RuntimeException(e); } } class HandledSession extends Session implements HttpHandler { HandledSession(URI uri, Capabilities caps) { super(new SessionId(UUID.randomUUID()), uri, caps); } @Override public HttpResponse execute(HttpRequest req) throws UncheckedIOException { // no-op return new HttpResponse(); } } }
1
17,126
Since this is in a test, I imagine that the choice of `info` level was deliberate.
SeleniumHQ-selenium
rb
@@ -1,10 +1,10 @@ const Plugin = require('../../core/Plugin') const Translator = require('../../core/Translator') const dragDrop = require('drag-drop') -const Dashboard = require('./Dashboard') +const DashboardUI = require('./Dashboard') const StatusBar = require('../StatusBar') const Informer = require('../Informer') -const { findAllDOMElements } = require('../../core/Utils') +const { findAllDOMElements, toArray } = require('../../core/Utils') const prettyBytes = require('prettier-bytes') const { defaultTabIcon } = require('./icons')
1
const Plugin = require('../../core/Plugin') const Translator = require('../../core/Translator') const dragDrop = require('drag-drop') const Dashboard = require('./Dashboard') const StatusBar = require('../StatusBar') const Informer = require('../Informer') const { findAllDOMElements } = require('../../core/Utils') const prettyBytes = require('prettier-bytes') const { defaultTabIcon } = require('./icons') const FOCUSABLE_ELEMENTS = [ 'a[href]', 'area[href]', 'input:not([disabled]):not([type="hidden"])', 'select:not([disabled])', 'textarea:not([disabled])', 'button:not([disabled])', 'iframe', 'object', 'embed', '[contenteditable]', '[tabindex]:not([tabindex^="-"])' ] /** * Dashboard UI with previews, metadata editing, tabs for various services and more */ module.exports = class DashboardUI extends Plugin { constructor (uppy, opts) { super(uppy, opts) this.id = this.opts.id || 'Dashboard' this.title = 'Dashboard' this.type = 'orchestrator' const defaultLocale = { strings: { selectToUpload: 'Select files to upload', closeModal: 'Close Modal', upload: 'Upload', importFrom: 'Import files from', dashboardWindowTitle: 'Uppy Dashboard Window (Press escape to close)', dashboardTitle: 'Uppy Dashboard', copyLinkToClipboardSuccess: 'Link copied to clipboard.', copyLinkToClipboardFallback: 'Copy the URL below', fileSource: 'File source', done: 'Done', localDisk: 'Local Disk', myDevice: 'My Device', dropPasteImport: 'Drop files here, paste, import from one of the locations above or', dropPaste: 'Drop files here, paste or', browse: 'browse', fileProgress: 'File progress: upload speed and ETA', numberOfSelectedFiles: 'Number of selected files', uploadAllNewFiles: 'Upload all new files', emptyFolderAdded: 'No files were added from empty folder', folderAdded: { 0: 'Added %{smart_count} file from %{folder}', 1: 'Added %{smart_count} files from %{folder}' } } } // set default options const defaultOptions = { target: 'body', getMetaFromForm: true, metaFields: [], trigger: '#uppy-select-files', inline: false, width: 750, height: 550, semiTransparent: false, defaultTabIcon: defaultTabIcon(), showProgressDetails: false, hideUploadButton: false, note: null, closeModalOnClickOutside: false, locale: defaultLocale, onRequestCloseModal: () => this.closeModal() } // merge default options with the ones set by user this.opts = Object.assign({}, defaultOptions, opts) this.locale = Object.assign({}, defaultLocale, this.opts.locale) this.locale.strings = Object.assign({}, defaultLocale.strings, this.opts.locale.strings) this.translator = new Translator({locale: this.locale}) this.i18n = this.translator.translate.bind(this.translator) this.closeModal = this.closeModal.bind(this) this.requestCloseModal = this.requestCloseModal.bind(this) this.openModal = this.openModal.bind(this) this.isModalOpen = this.isModalOpen.bind(this) this.addTarget = this.addTarget.bind(this) this.hideAllPanels = this.hideAllPanels.bind(this) this.showPanel = this.showPanel.bind(this) this.getFocusableNodes = this.getFocusableNodes.bind(this) this.setFocusToFirstNode = this.setFocusToFirstNode.bind(this) this.maintainFocus = this.maintainFocus.bind(this) this.initEvents = this.initEvents.bind(this) this.onKeydown = this.onKeydown.bind(this) this.handleClickOutside = this.handleClickOutside.bind(this) this.handleFileCard = this.handleFileCard.bind(this) this.handleDrop = this.handleDrop.bind(this) this.pauseAll = this.pauseAll.bind(this) this.resumeAll = this.resumeAll.bind(this) this.cancelAll = this.cancelAll.bind(this) this.updateDashboardElWidth = this.updateDashboardElWidth.bind(this) this.render = this.render.bind(this) this.install = this.install.bind(this) } addTarget (plugin) { const callerPluginId = plugin.id || plugin.constructor.name const callerPluginName = plugin.title || callerPluginId const callerPluginType = plugin.type if (callerPluginType !== 'acquirer' && callerPluginType !== 'progressindicator' && callerPluginType !== 'presenter') { let msg = 'Dashboard: Modal can only be used by plugins of types: acquirer, progressindicator, presenter' this.uppy.log(msg) return } const target = { id: callerPluginId, name: callerPluginName, type: callerPluginType, isHidden: true } const state = this.getPluginState() const newTargets = state.targets.slice() newTargets.push(target) this.setPluginState({ targets: newTargets }) return this.el } hideAllPanels () { this.setPluginState({ activePanel: false }) } showPanel (id) { const { targets } = this.getPluginState() const activePanel = targets.filter((target) => { return target.type === 'acquirer' && target.id === id })[0] this.setPluginState({ activePanel: activePanel }) } requestCloseModal () { if (this.opts.onRequestCloseModal) { return this.opts.onRequestCloseModal() } else { this.closeModal() } } getFocusableNodes () { const nodes = this.el.querySelectorAll(FOCUSABLE_ELEMENTS) return Object.keys(nodes).map((key) => nodes[key]) } setFocusToFirstNode () { const focusableNodes = this.getFocusableNodes() // console.log(focusableNodes) // console.log(focusableNodes[0]) if (focusableNodes.length) focusableNodes[0].focus() } maintainFocus (event) { var focusableNodes = this.getFocusableNodes() var focusedItemIndex = focusableNodes.indexOf(document.activeElement) if (event.shiftKey && focusedItemIndex === 0) { focusableNodes[focusableNodes.length - 1].focus() event.preventDefault() } if (!event.shiftKey && focusedItemIndex === focusableNodes.length - 1) { focusableNodes[0].focus() event.preventDefault() } } openModal () { this.setPluginState({ isHidden: false }) // save scroll position this.savedDocumentScrollPosition = window.scrollY // add class to body that sets position fixed, move everything back // to scroll position document.body.classList.add('is-UppyDashboard-open') document.body.style.top = `-${this.savedDocumentScrollPosition}px` // timeout is needed because yo-yo/morphdom/nanoraf; not needed without nanoraf setTimeout(this.setFocusToFirstNode, 100) setTimeout(this.updateDashboardElWidth, 100) } closeModal () { this.setPluginState({ isHidden: true }) document.body.classList.remove('is-UppyDashboard-open') window.scrollTo(0, this.savedDocumentScrollPosition) } isModalOpen () { return !this.getPluginState().isHidden || false } onKeydown (event) { // close modal on esc key press if (event.keyCode === 27) this.requestCloseModal(event) // maintainFocus on tab key press if (event.keyCode === 9) this.maintainFocus(event) } handleClickOutside () { if (this.opts.closeModalOnClickOutside) this.requestCloseModal() } initEvents () { // Modal open button const showModalTrigger = findAllDOMElements(this.opts.trigger) if (!this.opts.inline && showModalTrigger) { showModalTrigger.forEach(trigger => trigger.addEventListener('click', this.openModal)) } if (!this.opts.inline && !showModalTrigger) { this.uppy.log('Dashboard modal trigger not found, you won’t be able to select files. Make sure `trigger` is set correctly in Dashboard options', 'error') } if (!this.opts.inline) { document.addEventListener('keydown', this.onKeydown) } // Drag Drop this.removeDragDropListener = dragDrop(this.el, (files) => { this.handleDrop(files) }) this.uppy.on('dashboard:file-card', this.handleFileCard) window.addEventListener('resize', this.updateDashboardElWidth) } removeEvents () { const showModalTrigger = findAllDOMElements(this.opts.trigger) if (!this.opts.inline && showModalTrigger) { showModalTrigger.forEach(trigger => trigger.removeEventListener('click', this.openModal)) } if (!this.opts.inline) { document.removeEventListener('keydown', this.onKeydown) } this.removeDragDropListener() this.uppy.off('dashboard:file-card', this.handleFileCard) window.removeEventListener('resize', this.updateDashboardElWidth) } updateDashboardElWidth () { const dashboardEl = this.el.querySelector('.UppyDashboard-inner') this.uppy.log(`Dashboard width: ${dashboardEl.offsetWidth}`) this.setPluginState({ containerWidth: dashboardEl.offsetWidth }) } handleFileCard (fileId) { this.setPluginState({ fileCardFor: fileId || false }) } handleDrop (files) { this.uppy.log('[Dashboard] Files were dropped') files.forEach((file) => { this.uppy.addFile({ source: this.id, name: file.name, type: file.type, data: file }) }) } cancelAll () { this.uppy.emit('cancel-all') } pauseAll () { this.uppy.emit('pause-all') } resumeAll () { this.uppy.emit('resume-all') } render (state) { const pluginState = this.getPluginState() const files = state.files const newFiles = Object.keys(files).filter((file) => { return !files[file].progress.uploadStarted }) const inProgressFiles = Object.keys(files).filter((file) => { return !files[file].progress.uploadComplete && files[file].progress.uploadStarted && !files[file].isPaused }) let inProgressFilesArray = [] inProgressFiles.forEach((file) => { inProgressFilesArray.push(files[file]) }) let totalSize = 0 let totalUploadedSize = 0 inProgressFilesArray.forEach((file) => { totalSize = totalSize + (file.progress.bytesTotal || 0) totalUploadedSize = totalUploadedSize + (file.progress.bytesUploaded || 0) }) totalSize = prettyBytes(totalSize) totalUploadedSize = prettyBytes(totalUploadedSize) const attachRenderFunctionToTarget = (target) => { const plugin = this.uppy.getPlugin(target.id) return Object.assign({}, target, { icon: plugin.icon || this.opts.defaultTabIcon, render: plugin.render }) } const isSupported = (target) => { const plugin = this.uppy.getPlugin(target.id) // If the plugin does not provide a `supported` check, assume the plugin works everywhere. if (typeof plugin.isSupported !== 'function') { return true } return plugin.isSupported() } const acquirers = pluginState.targets .filter(target => target.type === 'acquirer' && isSupported(target)) .map(attachRenderFunctionToTarget) const progressindicators = pluginState.targets .filter(target => target.type === 'progressindicator') .map(attachRenderFunctionToTarget) const startUpload = (ev) => { this.uppy.upload().catch((err) => { // Log error. this.uppy.log(err.stack || err.message || err) }) } const cancelUpload = (fileID) => { this.uppy.emit('upload-cancel', fileID) this.uppy.removeFile(fileID) } const showFileCard = (fileID) => { this.uppy.emit('dashboard:file-card', fileID) } const fileCardDone = (meta, fileID) => { this.uppy.setFileMeta(fileID, meta) this.uppy.emit('dashboard:file-card') } return Dashboard({ state: state, modal: pluginState, newFiles: newFiles, files: files, totalFileCount: Object.keys(files).length, totalProgress: state.totalProgress, acquirers: acquirers, activePanel: pluginState.activePanel, getPlugin: this.uppy.getPlugin, progressindicators: progressindicators, autoProceed: this.uppy.opts.autoProceed, hideUploadButton: this.opts.hideUploadButton, id: this.id, closeModal: this.requestCloseModal, handleClickOutside: this.handleClickOutside, showProgressDetails: this.opts.showProgressDetails, inline: this.opts.inline, semiTransparent: this.opts.semiTransparent, showPanel: this.showPanel, hideAllPanels: this.hideAllPanels, log: this.uppy.log, i18n: this.i18n, pauseAll: this.pauseAll, resumeAll: this.resumeAll, addFile: this.uppy.addFile, removeFile: this.uppy.removeFile, info: this.uppy.info, note: this.opts.note, metaFields: this.getPluginState().metaFields, resumableUploads: this.uppy.state.capabilities.resumableUploads || false, startUpload: startUpload, pauseUpload: this.uppy.pauseResume, retryUpload: this.uppy.retryUpload, cancelUpload: cancelUpload, fileCardFor: pluginState.fileCardFor, showFileCard: showFileCard, fileCardDone: fileCardDone, updateDashboardElWidth: this.updateDashboardElWidth, maxWidth: this.opts.maxWidth, maxHeight: this.opts.maxHeight, currentWidth: pluginState.containerWidth, isWide: pluginState.containerWidth > 400 }) } discoverProviderPlugins () { this.uppy.iteratePlugins((plugin) => { if (plugin && !plugin.target && plugin.opts && plugin.opts.target === this.constructor) { this.addTarget(plugin) } }) } install () { // Set default state for Modal this.setPluginState({ isHidden: true, showFileCard: false, activePanel: false, metaFields: this.opts.metaFields, targets: [] }) const target = this.opts.target if (target) { this.mount(target, this) } const plugins = this.opts.plugins || [] plugins.forEach((pluginID) => { const plugin = this.uppy.getPlugin(pluginID) if (plugin) plugin.mount(this, plugin) }) if (!this.opts.disableStatusBar) { this.uppy.use(StatusBar, { target: this, hideUploadButton: this.opts.hideUploadButton }) } if (!this.opts.disableInformer) { this.uppy.use(Informer, { target: this }) } this.discoverProviderPlugins() this.initEvents() } uninstall () { if (!this.opts.disableInformer) { const informer = this.uppy.getPlugin('Informer') if (informer) this.uppy.removePlugin(informer) } if (!this.opts.disableStatusBar) { const statusBar = this.uppy.getPlugin('StatusBar') // Checking if this plugin exists, in case it was removed by uppy-core // before the Dashboard was. if (statusBar) this.uppy.removePlugin(statusBar) } const plugins = this.opts.plugins || [] plugins.forEach((pluginID) => { const plugin = this.uppy.getPlugin(pluginID) if (plugin) plugin.unmount() }) this.unmount() this.removeEvents() } }
1
10,283
good call swapping these names! makes more sense this way i think
transloadit-uppy
js
@@ -13,8 +13,12 @@ import options from './options'; */ export function render(vnode, parentDom, replaceNode) { if (options._root) options._root(vnode, parentDom); - let oldVNode = replaceNode && replaceNode._children || parentDom._children; + let oldVNode = replaceNode && replaceNode._children || parentDom._children; + let isHydrating = false; + if (replaceNode === null) { + isHydrating = true; + } vnode = createElement(Fragment, null, [vnode]); let mounts = [];
1
import { EMPTY_OBJ, EMPTY_ARR } from './constants'; import { commitRoot, diff } from './diff/index'; import { createElement, Fragment } from './create-element'; import options from './options'; /** * Render a Preact virtual node into a DOM element * @param {import('./index').ComponentChild} vnode The virtual node to render * @param {import('./internal').PreactElement} parentDom The DOM element to * render into * @param {Element | Text} [replaceNode] Attempt to re-use an * existing DOM tree rooted at `replaceNode` */ export function render(vnode, parentDom, replaceNode) { if (options._root) options._root(vnode, parentDom); let oldVNode = replaceNode && replaceNode._children || parentDom._children; vnode = createElement(Fragment, null, [vnode]); let mounts = []; diff( parentDom, (replaceNode || parentDom)._children = vnode, oldVNode || EMPTY_OBJ, EMPTY_OBJ, parentDom.ownerSVGElement !== undefined, replaceNode ? [replaceNode] : oldVNode ? null : EMPTY_ARR.slice.call(parentDom.childNodes), mounts, false, replaceNode || EMPTY_OBJ, ); commitRoot(mounts, vnode); } /** * Update an existing DOM element with data from a Preact virtual node * @param {import('./index').ComponentChild} vnode The virtual node to render * @param {import('./internal').PreactElement} parentDom The DOM element to * update */ export function hydrate(vnode, parentDom) { parentDom._children = null; render(vnode, parentDom); }
1
13,965
Maybe leaving this out will save some bytes as in let `let isHydrating = replaceNode === null`
preactjs-preact
js
@@ -298,6 +298,10 @@ class RustGenerator : public BaseGenerator { assert(!cur_name_space_); + // Generate imports for the global scope in case no namespace is used + // in the schema file. + GenNamespaceImports(0); + // Generate all code in their namespaces, once, because Rust does not // permit re-opening modules. //
1
/* * Copyright 2018 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // independent from idl_parser, since this code is not needed for most clients #include "flatbuffers/code_generators.h" #include "flatbuffers/flatbuffers.h" #include "flatbuffers/idl.h" #include "flatbuffers/util.h" namespace flatbuffers { static std::string GeneratedFileName(const std::string &path, const std::string &file_name) { return path + file_name + "_generated.rs"; } // Convert a camelCaseIdentifier or CamelCaseIdentifier to a // snake_case_indentifier. std::string MakeSnakeCase(const std::string &in) { std::string s; for (size_t i = 0; i < in.length(); i++) { if (i == 0) { s += static_cast<char>(tolower(in[0])); } else if (in[i] == '_') { s += '_'; } else if (!islower(in[i])) { // Prevent duplicate underscores for Upper_Snake_Case strings // and UPPERCASE strings. if (islower(in[i - 1])) { s += '_'; } s += static_cast<char>(tolower(in[i])); } else { s += in[i]; } } return s; } // Convert a string to all uppercase. std::string MakeUpper(const std::string &in) { std::string s; for (size_t i = 0; i < in.length(); i++) { s += static_cast<char>(toupper(in[i])); } return s; } // Encapsulate all logical field types in this enum. This allows us to write // field logic based on type switches, instead of branches on the properties // set on the Type. // TODO(rw): for backwards compatibility, we can't use a strict `enum class` // declaration here. could we use the `-Wswitch-enum` warning to // achieve the same effect? enum FullType { ftInteger = 0, ftFloat = 1, ftBool = 2, ftStruct = 3, ftTable = 4, ftEnumKey = 5, ftUnionKey = 6, ftUnionValue = 7, // TODO(rw): bytestring? ftString = 8, ftVectorOfInteger = 9, ftVectorOfFloat = 10, ftVectorOfBool = 11, ftVectorOfEnumKey = 12, ftVectorOfStruct = 13, ftVectorOfTable = 14, ftVectorOfString = 15, ftVectorOfUnionValue = 16, }; // Convert a Type to a FullType (exhaustive). FullType GetFullType(const Type &type) { // N.B. The order of these conditionals matters for some types. if (type.base_type == BASE_TYPE_STRING) { return ftString; } else if (type.base_type == BASE_TYPE_STRUCT) { if (type.struct_def->fixed) { return ftStruct; } else { return ftTable; } } else if (type.base_type == BASE_TYPE_VECTOR) { switch (GetFullType(type.VectorType())) { case ftInteger: { return ftVectorOfInteger; } case ftFloat: { return ftVectorOfFloat; } case ftBool: { return ftVectorOfBool; } case ftStruct: { return ftVectorOfStruct; } case ftTable: { return ftVectorOfTable; } case ftString: { return ftVectorOfString; } case ftEnumKey: { return ftVectorOfEnumKey; } case ftUnionKey: case ftUnionValue: { FLATBUFFERS_ASSERT(false && "vectors of unions are unsupported"); } default: { FLATBUFFERS_ASSERT(false && "vector of vectors are unsupported"); } } } else if (type.enum_def != nullptr) { if (type.enum_def->is_union) { if (type.base_type == BASE_TYPE_UNION) { return ftUnionValue; } else if (IsInteger(type.base_type)) { return ftUnionKey; } else { FLATBUFFERS_ASSERT(false && "unknown union field type"); } } else { return ftEnumKey; } } else if (IsScalar(type.base_type)) { if (IsBool(type.base_type)) { return ftBool; } else if (IsInteger(type.base_type)) { return ftInteger; } else if (IsFloat(type.base_type)) { return ftFloat; } else { FLATBUFFERS_ASSERT(false && "unknown number type"); } } FLATBUFFERS_ASSERT(false && "completely unknown type"); // this is only to satisfy the compiler's return analysis. return ftBool; } // If the second parameter is false then wrap the first with Option<...> std::string WrapInOptionIfNotRequired(std::string s, bool required) { if (required) { return s; } else { return "Option<" + s + ">"; } } // If the second parameter is false then add .unwrap() std::string AddUnwrapIfRequired(std::string s, bool required) { if (required) { return s + ".unwrap()"; } else { return s; } } namespace rust { class RustGenerator : public BaseGenerator { public: RustGenerator(const Parser &parser, const std::string &path, const std::string &file_name) : BaseGenerator(parser, path, file_name, "", "::"), cur_name_space_(nullptr) { const char *keywords[] = { // list taken from: // https://doc.rust-lang.org/book/second-edition/appendix-01-keywords.html // // we write keywords one per line so that we can easily compare them with // changes to that webpage in the future. // currently-used keywords "as", "break", "const", "continue", "crate", "else", "enum", "extern", "false", "fn", "for", "if", "impl", "in", "let", "loop", "match", "mod", "move", "mut", "pub", "ref", "return", "Self", "self", "static", "struct", "super", "trait", "true", "type", "unsafe", "use", "where", "while", // future possible keywords "abstract", "alignof", "become", "box", "do", "final", "macro", "offsetof", "override", "priv", "proc", "pure", "sizeof", "typeof", "unsized", "virtual", "yield", // other rust terms we should not use "std", "usize", "isize", "u8", "i8", "u16", "i16", "u32", "i32", "u64", "i64", "u128", "i128", "f32", "f64", // These are terms the code generator can implement on types. // // In Rust, the trait resolution rules (as described at // https://github.com/rust-lang/rust/issues/26007) mean that, as long // as we impl table accessors as inherent methods, we'll never create // conflicts with these keywords. However, that's a fairly nuanced // implementation detail, and how we implement methods could change in // the future. as a result, we proactively block these out as reserved // words. "follow", "push", "size", "alignment", "to_little_endian", "from_little_endian", nullptr }; for (auto kw = keywords; *kw; kw++) keywords_.insert(*kw); } // Iterate through all definitions we haven't generated code for (enums, // structs, and tables) and output them to a single file. bool generate() { code_.Clear(); code_ += "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n"; assert(!cur_name_space_); // Generate all code in their namespaces, once, because Rust does not // permit re-opening modules. // // TODO(rw): Use a set data structure to reduce namespace evaluations from // O(n**2) to O(n). for (auto ns_it = parser_.namespaces_.begin(); ns_it != parser_.namespaces_.end(); ++ns_it) { const auto &ns = *ns_it; // Generate code for all the enum declarations. for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end(); ++it) { const auto &enum_def = **it; if (enum_def.defined_namespace != ns) { continue; } if (!enum_def.generated) { SetNameSpace(enum_def.defined_namespace); GenEnum(enum_def); } } // Generate code for all structs. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (struct_def.defined_namespace != ns) { continue; } if (struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenStruct(struct_def); } } // Generate code for all tables. for (auto it = parser_.structs_.vec.begin(); it != parser_.structs_.vec.end(); ++it) { const auto &struct_def = **it; if (struct_def.defined_namespace != ns) { continue; } if (!struct_def.fixed && !struct_def.generated) { SetNameSpace(struct_def.defined_namespace); GenTable(struct_def); } } // Generate global helper functions. if (parser_.root_struct_def_) { auto &struct_def = *parser_.root_struct_def_; if (struct_def.defined_namespace != ns) { continue; } SetNameSpace(struct_def.defined_namespace); GenRootTableFuncs(struct_def); } } if (cur_name_space_) SetNameSpace(nullptr); const auto file_path = GeneratedFileName(path_, file_name_); const auto final_code = code_.ToString(); return SaveFile(file_path.c_str(), final_code, false); } private: CodeWriter code_; std::set<std::string> keywords_; // This tracks the current namespace so we can insert namespace declarations. const Namespace *cur_name_space_; const Namespace *CurrentNameSpace() const { return cur_name_space_; } // Determine if a Type needs a lifetime template parameter when used in the // Rust builder args. bool TableBuilderTypeNeedsLifetime(const Type &type) const { switch (GetFullType(type)) { case ftInteger: case ftFloat: case ftBool: case ftEnumKey: case ftUnionKey: case ftUnionValue: { return false; } default: { return true; } } } // Determine if a table args rust type needs a lifetime template parameter. bool TableBuilderArgsNeedsLifetime(const StructDef &struct_def) const { FLATBUFFERS_ASSERT(!struct_def.fixed); for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { continue; } if (TableBuilderTypeNeedsLifetime(field.value.type)) { return true; } } return false; } // Determine if a Type needs to be copied (for endian safety) when used in a // Struct. bool StructMemberAccessNeedsCopy(const Type &type) const { switch (GetFullType(type)) { case ftInteger: // requires endian swap case ftFloat: // requires endian swap case ftBool: // no endian-swap, but do the copy for UX consistency case ftEnumKey: { return true; } // requires endian swap case ftStruct: { return false; } // no endian swap default: { // logic error: no other types can be struct members. FLATBUFFERS_ASSERT(false && "invalid struct member type"); return false; // only to satisfy compiler's return analysis } } } std::string EscapeKeyword(const std::string &name) const { return keywords_.find(name) == keywords_.end() ? name : name + "_"; } std::string Name(const Definition &def) const { return EscapeKeyword(def.name); } std::string Name(const EnumVal &ev) const { return EscapeKeyword(ev.name); } std::string WrapInNameSpace(const Definition &def) const { return WrapInNameSpace(def.defined_namespace, Name(def)); } std::string WrapInNameSpace(const Namespace *ns, const std::string &name) const { if (CurrentNameSpace() == ns) return name; std::string prefix = GetRelativeNamespaceTraversal(CurrentNameSpace(), ns); return prefix + name; } // Determine the namespace traversal needed from the Rust crate root. // This may be useful in the future for referring to included files, but is // currently unused. std::string GetAbsoluteNamespaceTraversal(const Namespace *dst) const { std::stringstream stream; stream << "::"; for (auto d = dst->components.begin(); d != dst->components.end(); d++) { stream << MakeSnakeCase(*d) + "::"; } return stream.str(); } // Determine the relative namespace traversal needed to reference one // namespace from another namespace. This is useful because it does not force // the user to have a particular file layout. (If we output absolute // namespace paths, that may require users to organize their Rust crates in a // particular way.) std::string GetRelativeNamespaceTraversal(const Namespace *src, const Namespace *dst) const { // calculate the path needed to reference dst from src. // example: f(A::B::C, A::B::C) -> (none) // example: f(A::B::C, A::B) -> super:: // example: f(A::B::C, A::B::D) -> super::D // example: f(A::B::C, A) -> super::super:: // example: f(A::B::C, D) -> super::super::super::D // example: f(A::B::C, D::E) -> super::super::super::D::E // example: f(A, D::E) -> super::D::E // does not include leaf object (typically a struct type). size_t i = 0; std::stringstream stream; auto s = src->components.begin(); auto d = dst->components.begin(); for(;;) { if (s == src->components.end()) { break; } if (d == dst->components.end()) { break; } if (*s != *d) { break; } s++; d++; i++; } for (; s != src->components.end(); s++) { stream << "super::"; } for (; d != dst->components.end(); d++) { stream << MakeSnakeCase(*d) + "::"; } return stream.str(); } // Generate a comment from the schema. void GenComment(const std::vector<std::string> &dc, const char *prefix = "") { std::string text; ::flatbuffers::GenComment(dc, &text, nullptr, prefix); code_ += text + "\\"; } // Return a Rust type from the table in idl.h. std::string GetTypeBasic(const Type &type) const { switch (GetFullType(type)) { case ftInteger: case ftFloat: case ftBool: case ftEnumKey: case ftUnionKey: { break; } default: { FLATBUFFERS_ASSERT(false && "incorrect type given");} } // clang-format off static const char * const ctypename[] = { #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \ RTYPE) \ #RTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD // clang-format on }; if (type.enum_def) { return WrapInNameSpace(*type.enum_def); } return ctypename[type.base_type]; } // Look up the native type for an enum. This will always be an integer like // u8, i32, etc. std::string GetEnumTypeForDecl(const Type &type) { const auto ft = GetFullType(type); if (!(ft == ftEnumKey || ft == ftUnionKey)) { FLATBUFFERS_ASSERT(false && "precondition failed in GetEnumTypeForDecl"); } static const char *ctypename[] = { // clang-format off #define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE, \ RTYPE) \ #RTYPE, FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD) #undef FLATBUFFERS_TD // clang-format on }; // Enums can be bools, but their Rust representation must be a u8, as used // in the repr attribute (#[repr(bool)] is an invalid attribute). if (type.base_type == BASE_TYPE_BOOL) return "u8"; return ctypename[type.base_type]; } // Return a Rust type for any type (scalar, table, struct) specifically for // using a FlatBuffer. std::string GetTypeGet(const Type &type) const { switch (GetFullType(type)) { case ftInteger: case ftFloat: case ftBool: case ftEnumKey: case ftUnionKey: { return GetTypeBasic(type); } case ftTable: { return WrapInNameSpace(type.struct_def->defined_namespace, type.struct_def->name) + "<'a>"; } default: { return WrapInNameSpace(type.struct_def->defined_namespace, type.struct_def->name); } } } std::string GetEnumValUse(const EnumDef &enum_def, const EnumVal &enum_val) const { return Name(enum_def) + "::" + Name(enum_val); } // Generate an enum declaration, // an enum string lookup table, // an enum match function, // and an enum array of values void GenEnum(const EnumDef &enum_def) { code_.SetValue("ENUM_NAME", Name(enum_def)); code_.SetValue("BASE_TYPE", GetEnumTypeForDecl(enum_def.underlying_type)); GenComment(enum_def.doc_comment); code_ += "#[allow(non_camel_case_types)]"; code_ += "#[repr({{BASE_TYPE}})]"; code_ += "#[derive(Clone, Copy, PartialEq, Debug)]"; code_ += "pub enum " + Name(enum_def) + " {"; int64_t anyv = 0; const EnumVal *minv = nullptr, *maxv = nullptr; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; GenComment(ev.doc_comment, " "); code_.SetValue("KEY", Name(ev)); code_.SetValue("VALUE", NumToString(ev.value)); code_ += " {{KEY}} = {{VALUE}},"; minv = !minv || minv->value > ev.value ? &ev : minv; maxv = !maxv || maxv->value < ev.value ? &ev : maxv; anyv |= ev.value; } code_ += ""; code_ += "}"; code_ += ""; code_.SetValue("ENUM_NAME", Name(enum_def)); code_.SetValue("ENUM_NAME_SNAKE", MakeSnakeCase(Name(enum_def))); code_.SetValue("ENUM_NAME_CAPS", MakeUpper(MakeSnakeCase(Name(enum_def)))); code_.SetValue("ENUM_MIN_BASE_VALUE", NumToString(minv->value)); code_.SetValue("ENUM_MAX_BASE_VALUE", NumToString(maxv->value)); // Generate enum constants, and impls for Follow, EndianScalar, and Push. code_ += "const ENUM_MIN_{{ENUM_NAME_CAPS}}: {{BASE_TYPE}} = \\"; code_ += "{{ENUM_MIN_BASE_VALUE}};"; code_ += "const ENUM_MAX_{{ENUM_NAME_CAPS}}: {{BASE_TYPE}} = \\"; code_ += "{{ENUM_MAX_BASE_VALUE}};"; code_ += ""; code_ += "impl<'a> flatbuffers::Follow<'a> for {{ENUM_NAME}} {"; code_ += " type Inner = Self;"; code_ += " #[inline]"; code_ += " fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {"; code_ += " flatbuffers::read_scalar_at::<Self>(buf, loc)"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "impl flatbuffers::EndianScalar for {{ENUM_NAME}} {"; code_ += " #[inline]"; code_ += " fn to_little_endian(self) -> Self {"; code_ += " let n = {{BASE_TYPE}}::to_le(self as {{BASE_TYPE}});"; code_ += " let p = &n as *const {{BASE_TYPE}} as *const {{ENUM_NAME}};"; code_ += " unsafe { *p }"; code_ += " }"; code_ += " #[inline]"; code_ += " fn from_little_endian(self) -> Self {"; code_ += " let n = {{BASE_TYPE}}::from_le(self as {{BASE_TYPE}});"; code_ += " let p = &n as *const {{BASE_TYPE}} as *const {{ENUM_NAME}};"; code_ += " unsafe { *p }"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "impl flatbuffers::Push for {{ENUM_NAME}} {"; code_ += " type Output = {{ENUM_NAME}};"; code_ += " #[inline]"; code_ += " fn push(&self, dst: &mut [u8], _rest: &[u8]) {"; code_ += " flatbuffers::emplace_scalar::<{{ENUM_NAME}}>" "(dst, *self);"; code_ += " }"; code_ += "}"; code_ += ""; // Generate an array of all enumeration values. auto num_fields = NumToString(enum_def.vals.vec.size()); code_ += "#[allow(non_camel_case_types)]"; code_ += "const ENUM_VALUES_{{ENUM_NAME_CAPS}}:[{{ENUM_NAME}}; " + num_fields + "] = ["; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; auto value = GetEnumValUse(enum_def, ev); auto suffix = *it != enum_def.vals.vec.back() ? "," : ""; code_ += " " + value + suffix; } code_ += "];"; code_ += ""; // Generate a string table for enum values. // Problem is, if values are very sparse that could generate really big // tables. Ideally in that case we generate a map lookup instead, but for // the moment we simply don't output a table at all. auto range = enum_def.vals.vec.back()->value - enum_def.vals.vec.front()->value + 1; // Average distance between values above which we consider a table // "too sparse". Change at will. static const int kMaxSparseness = 5; if (range / static_cast<int64_t>(enum_def.vals.vec.size()) < kMaxSparseness) { code_ += "#[allow(non_camel_case_types)]"; code_ += "const ENUM_NAMES_{{ENUM_NAME_CAPS}}:[&'static str; " + NumToString(range) + "] = ["; auto val = enum_def.vals.vec.front()->value; for (auto it = enum_def.vals.vec.begin(); it != enum_def.vals.vec.end(); ++it) { const auto &ev = **it; while (val++ != ev.value) { code_ += " \"\","; } auto suffix = *it != enum_def.vals.vec.back() ? "," : ""; code_ += " \"" + Name(ev) + "\"" + suffix; } code_ += "];"; code_ += ""; code_ += "pub fn enum_name_{{ENUM_NAME_SNAKE}}(e: {{ENUM_NAME}}) -> " "&'static str {"; code_ += " let index: usize = e as usize\\"; if (enum_def.vals.vec.front()->value) { auto vals = GetEnumValUse(enum_def, *enum_def.vals.vec.front()); code_ += " - " + vals + " as usize\\"; } code_ += ";"; code_ += " ENUM_NAMES_{{ENUM_NAME_CAPS}}[index]"; code_ += "}"; code_ += ""; } if (enum_def.is_union) { // Generate tyoesafe offset(s) for unions code_.SetValue("NAME", Name(enum_def)); code_.SetValue("UNION_OFFSET_NAME", Name(enum_def) + "UnionTableOffset"); code_ += "pub struct {{UNION_OFFSET_NAME}} {}"; } } std::string GetFieldOffsetName(const FieldDef &field) { return "VT_" + MakeUpper(Name(field)); } std::string GetDefaultConstant(const FieldDef &field) { return field.value.type.base_type == BASE_TYPE_FLOAT ? field.value.constant + "" : field.value.constant; } std::string GetDefaultScalarValue(const FieldDef &field) { switch (GetFullType(field.value.type)) { case ftInteger: { return GetDefaultConstant(field); } case ftFloat: { return GetDefaultConstant(field); } case ftBool: { return field.value.constant == "0" ? "false" : "true"; } case ftUnionKey: case ftEnumKey: { auto ev = field.value.type.enum_def->ReverseLookup( StringToInt(field.value.constant.c_str()), false); assert(ev); return WrapInNameSpace(field.value.type.enum_def->defined_namespace, GetEnumValUse(*field.value.type.enum_def, *ev)); } // All pointer-ish types have a default value of None, because they are // wrapped in Option. default: { return "None"; } } } // Create the return type for fields in the *BuilderArgs structs that are // used to create Tables. // // Note: we could make all inputs to the BuilderArgs be an Option, as well // as all outputs. But, the UX of Flatbuffers is that the user doesn't get to // know if the value is default or not, because there are three ways to // return a default value: // 1) return a stored value that happens to be the default, // 2) return a hardcoded value because the relevant vtable field is not in // the vtable, or // 3) return a hardcoded value because the vtable field value is set to zero. std::string TableBuilderArgsDefnType(const FieldDef &field, const std::string lifetime) { const Type& type = field.value.type; switch (GetFullType(type)) { case ftInteger: case ftFloat: case ftBool: { const auto typname = GetTypeBasic(type); return typname; } case ftStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return "Option<&" + lifetime + " " + typname + ">"; } case ftTable: { const auto typname = WrapInNameSpace(*type.struct_def); return "Option<flatbuffers::WIPOffset<" + typname + "<" + lifetime + \ ">>>"; } case ftString: { return "Option<flatbuffers::WIPOffset<&" + lifetime + " str>>"; } case ftEnumKey: case ftUnionKey: { const auto typname = WrapInNameSpace(*type.enum_def); return typname; } case ftUnionValue: { const auto typname = WrapInNameSpace(*type.enum_def); return "Option<flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>>"; } case ftVectorOfInteger: case ftVectorOfFloat: { const auto typname = GetTypeBasic(type.VectorType()); return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", " + typname + ">>>"; } case ftVectorOfBool: { return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", bool>>>"; } case ftVectorOfEnumKey: { const auto typname = WrapInNameSpace(*type.enum_def); return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", " + typname + ">>>"; } case ftVectorOfStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", " + typname + ">>>"; } case ftVectorOfTable: { const auto typname = WrapInNameSpace(*type.struct_def); return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", flatbuffers::ForwardsUOffset<" + typname + \ "<" + lifetime + ">>>>>"; } case ftVectorOfString: { return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", flatbuffers::ForwardsUOffset<&" + lifetime + \ " str>>>>"; } case ftVectorOfUnionValue: { const auto typname = WrapInNameSpace(*type.enum_def) + \ "UnionTableOffset"; return "Option<flatbuffers::WIPOffset<flatbuffers::Vector<" + \ lifetime + ", flatbuffers::ForwardsUOffset<" "flatbuffers::Table<" + lifetime + ">>>>"; } } return "INVALID_CODE_GENERATION"; // for return analysis } std::string TableBuilderArgsDefaultValue(const FieldDef &field) { return GetDefaultScalarValue(field); } std::string TableBuilderAddFuncDefaultValue(const FieldDef &field) { switch (GetFullType(field.value.type)) { case ftUnionKey: case ftEnumKey: { const std::string basetype = GetTypeBasic(field.value.type); return GetDefaultScalarValue(field); } default: { return GetDefaultScalarValue(field); } } } std::string TableBuilderArgsAddFuncType(const FieldDef &field, const std::string lifetime) { const Type& type = field.value.type; switch (GetFullType(field.value.type)) { case ftVectorOfStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", " + typname + ">>"; } case ftVectorOfTable: { const auto typname = WrapInNameSpace(*type.struct_def); return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", flatbuffers::ForwardsUOffset<" + typname + \ "<" + lifetime + ">>>>"; } case ftVectorOfInteger: case ftVectorOfFloat: { const auto typname = GetTypeBasic(type.VectorType()); return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", " + typname + ">>"; } case ftVectorOfBool: { return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", bool>>"; } case ftVectorOfString: { return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", flatbuffers::ForwardsUOffset<&" + lifetime + " str>>>"; } case ftVectorOfEnumKey: { const auto typname = WrapInNameSpace(*type.enum_def); return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", " + typname + ">>"; } case ftVectorOfUnionValue: { const auto typname = WrapInNameSpace(*type.enum_def); return "flatbuffers::WIPOffset<flatbuffers::Vector<" + lifetime + \ ", flatbuffers::ForwardsUOffset<flatbuffers::Table<" + \ lifetime + ">>>"; } case ftEnumKey: { const auto typname = WrapInNameSpace(*type.enum_def); return typname; } case ftStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return "&" + lifetime + " " + typname + ""; } case ftTable: { const auto typname = WrapInNameSpace(*type.struct_def); return "flatbuffers::WIPOffset<" + typname + "<" + lifetime + ">>"; } case ftInteger: case ftFloat: { const auto typname = GetTypeBasic(type); return typname; } case ftBool: { return "bool"; } case ftString: { return "flatbuffers::WIPOffset<&" + lifetime + " str>"; } case ftUnionKey: { const auto typname = WrapInNameSpace(*type.enum_def); return typname; } case ftUnionValue: { const auto typname = WrapInNameSpace(*type.enum_def); return "flatbuffers::WIPOffset<flatbuffers::UnionWIPOffset>"; } } return "INVALID_CODE_GENERATION"; // for return analysis } std::string TableBuilderArgsAddFuncBody(const FieldDef &field) { const Type& type = field.value.type; switch (GetFullType(field.value.type)) { case ftInteger: case ftFloat: { const auto typname = GetTypeBasic(field.value.type); return "self.fbb_.push_slot::<" + typname + ">"; } case ftBool: { return "self.fbb_.push_slot::<bool>"; } case ftEnumKey: case ftUnionKey: { const auto underlying_typname = GetTypeBasic(type); return "self.fbb_.push_slot::<" + underlying_typname + ">"; } case ftStruct: { const std::string typname = WrapInNameSpace(*type.struct_def); return "self.fbb_.push_slot_always::<&" + typname + ">"; } case ftTable: { const auto typname = WrapInNameSpace(*type.struct_def); return "self.fbb_.push_slot_always::<flatbuffers::WIPOffset<" + \ typname + ">>"; } case ftUnionValue: case ftString: case ftVectorOfInteger: case ftVectorOfFloat: case ftVectorOfBool: case ftVectorOfEnumKey: case ftVectorOfStruct: case ftVectorOfTable: case ftVectorOfString: case ftVectorOfUnionValue: { return "self.fbb_.push_slot_always::<flatbuffers::WIPOffset<_>>"; } } return "INVALID_CODE_GENERATION"; // for return analysis } std::string GenTableAccessorFuncReturnType(const FieldDef &field, const std::string lifetime) { const Type& type = field.value.type; switch (GetFullType(field.value.type)) { case ftInteger: case ftFloat: { const auto typname = GetTypeBasic(type); return typname; } case ftBool: { return "bool"; } case ftStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return WrapInOptionIfNotRequired("&" + lifetime + " " + typname, field.required); } case ftTable: { const auto typname = WrapInNameSpace(*type.struct_def); return WrapInOptionIfNotRequired(typname + "<" + lifetime + ">", field.required); } case ftEnumKey: case ftUnionKey: { const auto typname = WrapInNameSpace(*type.enum_def); return typname; } case ftUnionValue: { return WrapInOptionIfNotRequired("flatbuffers::Table<" + lifetime + ">", field.required); } case ftString: { return WrapInOptionIfNotRequired("&" + lifetime + " str", field.required); } case ftVectorOfInteger: case ftVectorOfFloat: { const auto typname = GetTypeBasic(type.VectorType()); if (IsOneByte(type.VectorType().base_type)) { return WrapInOptionIfNotRequired("&" + lifetime + " [" + typname + "]", field.required); } return WrapInOptionIfNotRequired("flatbuffers::Vector<" + lifetime + ", " + typname + ">", field.required); } case ftVectorOfBool: { return WrapInOptionIfNotRequired("&" + lifetime + " [bool]", field.required); } case ftVectorOfEnumKey: { const auto typname = WrapInNameSpace(*type.enum_def); return WrapInOptionIfNotRequired("flatbuffers::Vector<" + lifetime + ", " + typname + ">", field.required); } case ftVectorOfStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return WrapInOptionIfNotRequired("&" + lifetime + " [" + typname + "]", field.required); } case ftVectorOfTable: { const auto typname = WrapInNameSpace(*type.struct_def); return WrapInOptionIfNotRequired("flatbuffers::Vector<flatbuffers::ForwardsUOffset<" + \ typname + "<" + lifetime + ">>>", field.required); } case ftVectorOfString: { return WrapInOptionIfNotRequired("flatbuffers::Vector<flatbuffers::ForwardsUOffset<&" + \ lifetime + " str>>", field.required); } case ftVectorOfUnionValue: { FLATBUFFERS_ASSERT(false && "vectors of unions are not yet supported"); // TODO(rw): when we do support these, we should consider using the // Into trait to convert tables to typesafe union values. return "INVALID_CODE_GENERATION"; // for return analysis } } return "INVALID_CODE_GENERATION"; // for return analysis } std::string GenTableAccessorFuncBody(const FieldDef &field, const std::string lifetime, const std::string offset_prefix) { const std::string offset_name = offset_prefix + "::" + \ GetFieldOffsetName(field); const Type& type = field.value.type; switch (GetFullType(field.value.type)) { case ftInteger: case ftFloat: case ftBool: { const auto typname = GetTypeBasic(type); const auto default_value = GetDefaultScalarValue(field); return "self._tab.get::<" + typname + ">(" + offset_name + ", Some(" + \ default_value + ")).unwrap()"; } case ftStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return AddUnwrapIfRequired("self._tab.get::<" + typname + ">(" + offset_name + ", None)", field.required); } case ftTable: { const auto typname = WrapInNameSpace(*type.struct_def); return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" + \ typname + "<" + lifetime + ">>>(" + offset_name + ", None)", field.required); } case ftUnionValue: { return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Table<" + lifetime + ">>>(" + offset_name + \ ", None)", field.required); } case ftUnionKey: case ftEnumKey: { const auto underlying_typname = GetTypeBasic(type); const auto typname = WrapInNameSpace(*type.enum_def); const auto default_value = GetDefaultScalarValue(field); return "self._tab.get::<" + typname + ">(" + offset_name + \ ", Some(" + default_value + ")).unwrap()"; } case ftString: { return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<&str>>(" + \ offset_name + ", None)", field.required); } case ftVectorOfInteger: case ftVectorOfFloat: { const auto typname = GetTypeBasic(type.VectorType()); std::string s = "self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Vector<" + lifetime + ", " + typname + \ ">>>(" + offset_name + ", None)"; // single-byte values are safe to slice if (IsOneByte(type.VectorType().base_type)) { s += ".map(|v| v.safe_slice())"; } return AddUnwrapIfRequired(s, field.required); } case ftVectorOfBool: { return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Vector<" + lifetime + ", bool>>>(" + \ offset_name + ", None).map(|v| v.safe_slice())", field.required); } case ftVectorOfEnumKey: { const auto typname = WrapInNameSpace(*type.enum_def); return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Vector<" + lifetime + ", " + typname + ">>>(" + \ offset_name + ", None)", field.required); } case ftVectorOfStruct: { const auto typname = WrapInNameSpace(*type.struct_def); return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Vector<" + typname + ">>>(" + \ offset_name + ", None).map(|v| v.safe_slice() )", field.required); } case ftVectorOfTable: { const auto typname = WrapInNameSpace(*type.struct_def); return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Vector<flatbuffers::ForwardsUOffset<" + typname + \ "<" + lifetime + ">>>>>(" + offset_name + ", None)", field.required); } case ftVectorOfString: { return AddUnwrapIfRequired("self._tab.get::<flatbuffers::ForwardsUOffset<" "flatbuffers::Vector<flatbuffers::ForwardsUOffset<&" + \ lifetime + " str>>>>(" + offset_name + ", None)", field.required); } case ftVectorOfUnionValue: { FLATBUFFERS_ASSERT(false && "vectors of unions are not yet supported"); return "INVALID_CODE_GENERATION"; // for return analysis } } return "INVALID_CODE_GENERATION"; // for return analysis } bool TableFieldReturnsOption(const Type& type) { switch (GetFullType(type)) { case ftInteger: case ftFloat: case ftBool: case ftEnumKey: case ftUnionKey: return false; default: return true; } } // Generate an accessor struct, builder struct, and create function for a // table. void GenTable(const StructDef &struct_def) { GenComment(struct_def.doc_comment); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_.SetValue("OFFSET_TYPELABEL", Name(struct_def) + "Offset"); code_.SetValue("STRUCT_NAME_SNAKECASE", MakeSnakeCase(Name(struct_def))); // Generate an offset type, the base type, the Follow impl, and the // init_from_table impl. code_ += "pub enum {{OFFSET_TYPELABEL}} {}"; code_ += "#[derive(Copy, Clone, Debug, PartialEq)]"; code_ += ""; code_ += "pub struct {{STRUCT_NAME}}<'a> {"; code_ += " pub _tab: flatbuffers::Table<'a>,"; code_ += "}"; code_ += ""; code_ += "impl<'a> flatbuffers::Follow<'a> for {{STRUCT_NAME}}<'a> {"; code_ += " type Inner = {{STRUCT_NAME}}<'a>;"; code_ += " #[inline]"; code_ += " fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {"; code_ += " Self {"; code_ += " _tab: flatbuffers::Table { buf: buf, loc: loc },"; code_ += " }"; code_ += " }"; code_ += "}"; code_ += ""; code_ += "impl<'a> {{STRUCT_NAME}}<'a> {"; code_ += " #[inline]"; code_ += " pub fn init_from_table(table: flatbuffers::Table<'a>) -> " "Self {"; code_ += " {{STRUCT_NAME}} {"; code_ += " _tab: table,"; code_ += " }"; code_ += " }"; // Generate a convenient create* function that uses the above builder // to create a table in one function call. code_.SetValue("MAYBE_US", struct_def.fields.vec.size() == 0 ? "_" : ""); code_.SetValue("MAYBE_LT", TableBuilderArgsNeedsLifetime(struct_def) ? "<'args>" : ""); code_ += " #[allow(unused_mut)]"; code_ += " pub fn create<'bldr: 'args, 'args: 'mut_bldr, 'mut_bldr>("; code_ += " _fbb: " "&'mut_bldr mut flatbuffers::FlatBufferBuilder<'bldr>,"; code_ += " {{MAYBE_US}}args: &'args {{STRUCT_NAME}}Args{{MAYBE_LT}})" " -> flatbuffers::WIPOffset<{{STRUCT_NAME}}<'bldr>> {"; code_ += " let mut builder = {{STRUCT_NAME}}Builder::new(_fbb);"; for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1; size; size /= 2) { for (auto it = struct_def.fields.vec.rbegin(); it != struct_def.fields.vec.rend(); ++it) { const auto &field = **it; // TODO(rw): fully understand this sortbysize usage if (!field.deprecated && (!struct_def.sortbysize || size == SizeOf(field.value.type.base_type))) { code_.SetValue("FIELD_NAME", Name(field)); if (TableFieldReturnsOption(field.value.type)) { code_ += " if let Some(x) = args.{{FIELD_NAME}} " "{ builder.add_{{FIELD_NAME}}(x); }"; } else { code_ += " builder.add_{{FIELD_NAME}}(args.{{FIELD_NAME}});"; } } } } code_ += " builder.finish()"; code_ += " }"; code_ += ""; // Generate field id constants. if (struct_def.fields.vec.size() > 0) { for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { // Deprecated fields won't be accessible. continue; } code_.SetValue("OFFSET_NAME", GetFieldOffsetName(field)); code_.SetValue("OFFSET_VALUE", NumToString(field.value.offset)); code_ += " pub const {{OFFSET_NAME}}: flatbuffers::VOffsetT = " "{{OFFSET_VALUE}};"; } code_ += ""; } // Generate the accessors. Each has one of two forms: // // If a value can be None: // pub fn name(&'a self) -> Option<user_facing_type> { // self._tab.get::<internal_type>(offset, defaultval) // } // // If a value is always Some: // pub fn name(&'a self) -> user_facing_type { // self._tab.get::<internal_type>(offset, defaultval).unwrap() // } const auto offset_prefix = Name(struct_def); for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated) { // Deprecated fields won't be accessible. continue; } code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("RETURN_TYPE", GenTableAccessorFuncReturnType(field, "'a")); code_.SetValue("FUNC_BODY", GenTableAccessorFuncBody(field, "'a", offset_prefix)); GenComment(field.doc_comment, " "); code_ += " #[inline]"; code_ += " pub fn {{FIELD_NAME}}(&self) -> {{RETURN_TYPE}} {"; code_ += " {{FUNC_BODY}}"; code_ += " }"; // Generate a comparison function for this field if it is a key. if (field.key) { GenKeyFieldMethods(field); } // Generate a nested flatbuffer field, if applicable. auto nested = field.attributes.Lookup("nested_flatbuffer"); if (nested) { std::string qualified_name = nested->constant; auto nested_root = parser_.LookupStruct(nested->constant); if (nested_root == nullptr) { qualified_name = parser_.current_namespace_->GetFullyQualifiedName( nested->constant); nested_root = parser_.LookupStruct(qualified_name); } FLATBUFFERS_ASSERT(nested_root); // Guaranteed to exist by parser. (void)nested_root; code_.SetValue("OFFSET_NAME", offset_prefix + "::" + GetFieldOffsetName(field)); code_ += " pub fn {{FIELD_NAME}}_nested_flatbuffer(&'a self) -> " " Option<{{STRUCT_NAME}}<'a>> {"; code_ += " match self.{{FIELD_NAME}}() {"; code_ += " None => { None }"; code_ += " Some(data) => {"; code_ += " use self::flatbuffers::Follow;"; code_ += " Some(<flatbuffers::ForwardsUOffset" "<{{STRUCT_NAME}}<'a>>>::follow(data, 0))"; code_ += " },"; code_ += " }"; code_ += " }"; } } // Explicit specializations for union accessors for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.deprecated || field.value.type.base_type != BASE_TYPE_UNION) { continue; } auto u = field.value.type.enum_def; code_.SetValue("FIELD_NAME", Name(field)); for (auto u_it = u->vals.vec.begin(); u_it != u->vals.vec.end(); ++u_it) { auto &ev = **u_it; if (ev.union_type.base_type == BASE_TYPE_NONE) { continue; } auto table_init_type = WrapInNameSpace( ev.union_type.struct_def->defined_namespace, ev.union_type.struct_def->name); code_.SetValue("U_ELEMENT_ENUM_TYPE", WrapInNameSpace(u->defined_namespace, GetEnumValUse(*u, ev))); code_.SetValue("U_ELEMENT_TABLE_TYPE", table_init_type); code_.SetValue("U_ELEMENT_NAME", MakeSnakeCase(Name(ev))); code_ += " #[inline]"; code_ += " #[allow(non_snake_case)]"; code_ += " pub fn {{FIELD_NAME}}_as_{{U_ELEMENT_NAME}}(&'a self) -> " "Option<{{U_ELEMENT_TABLE_TYPE}}> {"; code_ += " if self.{{FIELD_NAME}}_type() == {{U_ELEMENT_ENUM_TYPE}} {"; code_ += " self.{{FIELD_NAME}}().map(|u| " "{{U_ELEMENT_TABLE_TYPE}}::init_from_table(u))"; code_ += " } else {"; code_ += " None"; code_ += " }"; code_ += " }"; code_ += ""; } } code_ += "}"; // End of table impl. code_ += ""; // Generate an args struct: code_.SetValue("MAYBE_LT", TableBuilderArgsNeedsLifetime(struct_def) ? "<'a>" : ""); code_ += "pub struct {{STRUCT_NAME}}Args{{MAYBE_LT}} {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { code_.SetValue("PARAM_NAME", Name(field)); code_.SetValue("PARAM_TYPE", TableBuilderArgsDefnType(field, "'a ")); code_ += " pub {{PARAM_NAME}}: {{PARAM_TYPE}},"; } } code_ += "}"; // Generate an impl of Default for the *Args type: code_ += "impl<'a> Default for {{STRUCT_NAME}}Args{{MAYBE_LT}} {"; code_ += " #[inline]"; code_ += " fn default() -> Self {"; code_ += " {{STRUCT_NAME}}Args {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { code_.SetValue("PARAM_VALUE", TableBuilderArgsDefaultValue(field)); code_.SetValue("REQ", field.required ? " // required field" : ""); code_.SetValue("PARAM_NAME", Name(field)); code_ += " {{PARAM_NAME}}: {{PARAM_VALUE}},{{REQ}}"; } } code_ += " }"; code_ += " }"; code_ += "}"; // Generate a builder struct: code_ += "pub struct {{STRUCT_NAME}}Builder<'a: 'b, 'b> {"; code_ += " fbb_: &'b mut flatbuffers::FlatBufferBuilder<'a>,"; code_ += " start_: flatbuffers::WIPOffset<" "flatbuffers::TableUnfinishedWIPOffset>,"; code_ += "}"; // Generate builder functions: code_ += "impl<'a: 'b, 'b> {{STRUCT_NAME}}Builder<'a, 'b> {"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated) { const bool is_scalar = IsScalar(field.value.type.base_type); std::string offset = GetFieldOffsetName(field); std::string name = Name(field); std::string value = GetDefaultScalarValue(field); // Generate functions to add data, which take one of two forms. // // If a value has a default: // fn add_x(x_: type) { // fbb_.push_slot::<type>(offset, x_, Some(default)); // } // // If a value does not have a default: // fn add_x(x_: type) { // fbb_.push_slot_always::<type>(offset, x_); // } code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("FIELD_OFFSET", Name(struct_def) + "::" + offset); code_.SetValue("FIELD_TYPE", TableBuilderArgsAddFuncType(field, "'b ")); code_.SetValue("FUNC_BODY", TableBuilderArgsAddFuncBody(field)); code_ += " #[inline]"; code_ += " pub fn add_{{FIELD_NAME}}(&mut self, {{FIELD_NAME}}: " "{{FIELD_TYPE}}) {"; if (is_scalar) { code_.SetValue("FIELD_DEFAULT_VALUE", TableBuilderAddFuncDefaultValue(field)); code_ += " {{FUNC_BODY}}({{FIELD_OFFSET}}, {{FIELD_NAME}}, " "{{FIELD_DEFAULT_VALUE}});"; } else { code_ += " {{FUNC_BODY}}({{FIELD_OFFSET}}, {{FIELD_NAME}});"; } code_ += " }"; } } // Struct initializer (all fields required); code_ += " #[inline]"; code_ += " pub fn new(_fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>) -> " "{{STRUCT_NAME}}Builder<'a, 'b> {"; code_.SetValue("NUM_FIELDS", NumToString(struct_def.fields.vec.size())); code_ += " let start = _fbb.start_table();"; code_ += " {{STRUCT_NAME}}Builder {"; code_ += " fbb_: _fbb,"; code_ += " start_: start,"; code_ += " }"; code_ += " }"; // finish() function. code_ += " #[inline]"; code_ += " pub fn finish(self) -> " "flatbuffers::WIPOffset<{{STRUCT_NAME}}<'a>> {"; code_ += " let o = self.fbb_.end_table(self.start_);"; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (!field.deprecated && field.required) { code_.SetValue("FIELD_NAME", MakeSnakeCase(Name(field))); code_.SetValue("OFFSET_NAME", GetFieldOffsetName(field)); code_ += " self.fbb_.required(o, {{STRUCT_NAME}}::{{OFFSET_NAME}}," "\"{{FIELD_NAME}}\");"; } } code_ += " flatbuffers::WIPOffset::new(o.value())"; code_ += " }"; code_ += "}"; code_ += ""; } // Generate functions to compare tables and structs by key. This function // must only be called if the field key is defined. void GenKeyFieldMethods(const FieldDef &field) { FLATBUFFERS_ASSERT(field.key); code_.SetValue("KEY_TYPE", GenTableAccessorFuncReturnType(field, "")); code_ += " #[inline]"; code_ += " pub fn key_compare_less_than(&self, o: &{{STRUCT_NAME}}) -> " " bool {"; code_ += " self.{{FIELD_NAME}}() < o.{{FIELD_NAME}}()"; code_ += " }"; code_ += ""; code_ += " #[inline]"; code_ += " pub fn key_compare_with_value(&self, val: {{KEY_TYPE}}) -> " " ::std::cmp::Ordering {"; code_ += " let key = self.{{FIELD_NAME}}();"; code_ += " key.cmp(&val)"; code_ += " }"; } // Generate functions for accessing the root table object. This function // must only be called if the root table is defined. void GenRootTableFuncs(const StructDef &struct_def) { FLATBUFFERS_ASSERT(parser_.root_struct_def_ && "root table not defined"); auto name = Name(struct_def); code_.SetValue("STRUCT_NAME", name); code_.SetValue("STRUCT_NAME_SNAKECASE", MakeSnakeCase(name)); code_.SetValue("STRUCT_NAME_CAPS", MakeUpper(MakeSnakeCase(name))); // The root datatype accessors: code_ += "#[inline]"; code_ += "pub fn get_root_as_{{STRUCT_NAME_SNAKECASE}}<'a>(buf: &'a [u8])" " -> {{STRUCT_NAME}}<'a> {"; code_ += " flatbuffers::get_root::<{{STRUCT_NAME}}<'a>>(buf)"; code_ += "}"; code_ += ""; code_ += "#[inline]"; code_ += "pub fn get_size_prefixed_root_as_{{STRUCT_NAME_SNAKECASE}}" "<'a>(buf: &'a [u8]) -> {{STRUCT_NAME}}<'a> {"; code_ += " flatbuffers::get_size_prefixed_root::<{{STRUCT_NAME}}<'a>>" "(buf)"; code_ += "}"; code_ += ""; if (parser_.file_identifier_.length()) { // Declare the identifier code_ += "pub const {{STRUCT_NAME_CAPS}}_IDENTIFIER: &'static str\\"; code_ += " = \"" + parser_.file_identifier_ + "\";"; code_ += ""; // Check if a buffer has the identifier. code_ += "#[inline]"; code_ += "pub fn {{STRUCT_NAME_SNAKECASE}}_buffer_has_identifier\\"; code_ += "(buf: &[u8]) -> bool {"; code_ += " return flatbuffers::buffer_has_identifier(buf, \\"; code_ += "{{STRUCT_NAME_CAPS}}_IDENTIFIER, false);"; code_ += "}"; code_ += ""; code_ += "#[inline]"; code_ += "pub fn {{STRUCT_NAME_SNAKECASE}}_size_prefixed\\"; code_ += "_buffer_has_identifier(buf: &[u8]) -> bool {"; code_ += " return flatbuffers::buffer_has_identifier(buf, \\"; code_ += "{{STRUCT_NAME_CAPS}}_IDENTIFIER, true);"; code_ += "}"; code_ += ""; } if (parser_.file_extension_.length()) { // Return the extension code_ += "pub const {{STRUCT_NAME_CAPS}}_EXTENSION: &'static str = \\"; code_ += "\"" + parser_.file_extension_ + "\";"; code_ += ""; } // Finish a buffer with a given root object: code_.SetValue("OFFSET_TYPELABEL", Name(struct_def) + "Offset"); code_ += "#[inline]"; code_ += "pub fn finish_{{STRUCT_NAME_SNAKECASE}}_buffer<'a, 'b>("; code_ += " fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>,"; code_ += " root: flatbuffers::WIPOffset<{{STRUCT_NAME}}<'a>>) {"; if (parser_.file_identifier_.length()) { code_ += " fbb.finish(root, Some({{STRUCT_NAME_CAPS}}_IDENTIFIER));"; } else { code_ += " fbb.finish(root, None);"; } code_ += "}"; code_ += ""; code_ += "#[inline]"; code_ += "pub fn finish_size_prefixed_{{STRUCT_NAME_SNAKECASE}}_buffer" "<'a, 'b>(" "fbb: &'b mut flatbuffers::FlatBufferBuilder<'a>, " "root: flatbuffers::WIPOffset<{{STRUCT_NAME}}<'a>>) {"; if (parser_.file_identifier_.length()) { code_ += " fbb.finish_size_prefixed(root, " "Some({{STRUCT_NAME_CAPS}}_IDENTIFIER));"; } else { code_ += " fbb.finish_size_prefixed(root, None);"; } code_ += "}"; } static void GenPadding( const FieldDef &field, std::string *code_ptr, int *id, const std::function<void(int bits, std::string *code_ptr, int *id)> &f) { if (field.padding) { for (int i = 0; i < 4; i++) { if (static_cast<int>(field.padding) & (1 << i)) { f((1 << i) * 8, code_ptr, id); } } assert(!(field.padding & ~0xF)); } } static void PaddingDefinition(int bits, std::string *code_ptr, int *id) { *code_ptr += " padding" + NumToString((*id)++) + "__: u" + \ NumToString(bits) + ","; } static void PaddingInitializer(int bits, std::string *code_ptr, int *id) { (void)bits; *code_ptr += "padding" + NumToString((*id)++) + "__: 0,"; } // Generate an accessor struct with constructor for a flatbuffers struct. void GenStruct(const StructDef &struct_def) { // Generates manual padding and alignment. // Variables are private because they contain little endian data on all // platforms. GenComment(struct_def.doc_comment); code_.SetValue("ALIGN", NumToString(struct_def.minalign)); code_.SetValue("STRUCT_NAME", Name(struct_def)); code_ += "// struct {{STRUCT_NAME}}, aligned to {{ALIGN}}"; code_ += "#[repr(C, align({{ALIGN}}))]"; // PartialEq is useful to derive because we can correctly compare structs // for equality by just comparing their underlying byte data. This doesn't // hold for PartialOrd/Ord. code_ += "#[derive(Clone, Copy, Debug, PartialEq)]"; code_ += "pub struct {{STRUCT_NAME}} {"; int padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; code_.SetValue("FIELD_TYPE", GetTypeGet(field.value.type)); code_.SetValue("FIELD_NAME", Name(field)); code_ += " {{FIELD_NAME}}_: {{FIELD_TYPE}},"; if (field.padding) { std::string padding; GenPadding(field, &padding, &padding_id, PaddingDefinition); code_ += padding; } } code_ += "} // pub struct {{STRUCT_NAME}}"; // Generate impls for SafeSliceAccess (because all structs are endian-safe), // Follow for the value type, Follow for the reference type, Push for the // value type, and Push for the reference type. code_ += "impl flatbuffers::SafeSliceAccess for {{STRUCT_NAME}} {}"; code_ += "impl<'a> flatbuffers::Follow<'a> for {{STRUCT_NAME}} {"; code_ += " type Inner = &'a {{STRUCT_NAME}};"; code_ += " #[inline]"; code_ += " fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {"; code_ += " <&'a {{STRUCT_NAME}}>::follow(buf, loc)"; code_ += " }"; code_ += "}"; code_ += "impl<'a> flatbuffers::Follow<'a> for &'a {{STRUCT_NAME}} {"; code_ += " type Inner = &'a {{STRUCT_NAME}};"; code_ += " #[inline]"; code_ += " fn follow(buf: &'a [u8], loc: usize) -> Self::Inner {"; code_ += " flatbuffers::follow_cast_ref::<{{STRUCT_NAME}}>(buf, loc)"; code_ += " }"; code_ += "}"; code_ += "impl<'b> flatbuffers::Push for {{STRUCT_NAME}} {"; code_ += " type Output = {{STRUCT_NAME}};"; code_ += " #[inline]"; code_ += " fn push(&self, dst: &mut [u8], _rest: &[u8]) {"; code_ += " let src = unsafe {"; code_ += " ::std::slice::from_raw_parts(" "self as *const {{STRUCT_NAME}} as *const u8, Self::size())"; code_ += " };"; code_ += " dst.copy_from_slice(src);"; code_ += " }"; code_ += "}"; code_ += "impl<'b> flatbuffers::Push for &'b {{STRUCT_NAME}} {"; code_ += " type Output = {{STRUCT_NAME}};"; code_ += ""; code_ += " #[inline]"; code_ += " fn push(&self, dst: &mut [u8], _rest: &[u8]) {"; code_ += " let src = unsafe {"; code_ += " ::std::slice::from_raw_parts(" "*self as *const {{STRUCT_NAME}} as *const u8, Self::size())"; code_ += " };"; code_ += " dst.copy_from_slice(src);"; code_ += " }"; code_ += "}"; code_ += ""; code_ += ""; // Generate a constructor that takes all fields as arguments. code_ += "impl {{STRUCT_NAME}} {"; std::string arg_list; std::string init_list; padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; const auto member_name = Name(field) + "_"; const auto reference = StructMemberAccessNeedsCopy(field.value.type) ? "" : "&'a "; const auto arg_name = "_" + Name(field); const auto arg_type = reference + GetTypeGet(field.value.type); if (it != struct_def.fields.vec.begin()) { arg_list += ", "; } arg_list += arg_name + ": "; arg_list += arg_type; init_list += " " + member_name; if (StructMemberAccessNeedsCopy(field.value.type)) { init_list += ": " + arg_name + ".to_little_endian(),\n"; } else { init_list += ": *" + arg_name + ",\n"; } } code_.SetValue("ARG_LIST", arg_list); code_.SetValue("INIT_LIST", init_list); code_ += " pub fn new<'a>({{ARG_LIST}}) -> Self {"; code_ += " {{STRUCT_NAME}} {"; code_ += "{{INIT_LIST}}"; padding_id = 0; for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; if (field.padding) { std::string padding; GenPadding(field, &padding, &padding_id, PaddingInitializer); code_ += " " + padding; } } code_ += " }"; code_ += " }"; // Generate accessor methods for the struct. for (auto it = struct_def.fields.vec.begin(); it != struct_def.fields.vec.end(); ++it) { const auto &field = **it; auto field_type = TableBuilderArgsAddFuncType(field, "'a"); auto member = "self." + Name(field) + "_"; auto value = StructMemberAccessNeedsCopy(field.value.type) ? member + ".from_little_endian()" : member; code_.SetValue("FIELD_NAME", Name(field)); code_.SetValue("FIELD_TYPE", field_type); code_.SetValue("FIELD_VALUE", value); code_.SetValue("REF", IsStruct(field.value.type) ? "&" : ""); GenComment(field.doc_comment, " "); code_ += " pub fn {{FIELD_NAME}}<'a>(&'a self) -> {{FIELD_TYPE}} {"; code_ += " {{REF}}{{FIELD_VALUE}}"; code_ += " }"; // Generate a comparison function for this field if it is a key. if (field.key) { GenKeyFieldMethods(field); } } code_ += "}"; code_ += ""; } // Set up the correct namespace. This opens a namespace if the current // namespace is different from the target namespace. This function // closes and opens the namespaces only as necessary. // // The file must start and end with an empty (or null) namespace so that // namespaces are properly opened and closed. void SetNameSpace(const Namespace *ns) { if (cur_name_space_ == ns) { return; } // Compute the size of the longest common namespace prefix. // If cur_name_space is A::B::C::D and ns is A::B::E::F::G, // the common prefix is A::B:: and we have old_size = 4, new_size = 5 // and common_prefix_size = 2 size_t old_size = cur_name_space_ ? cur_name_space_->components.size() : 0; size_t new_size = ns ? ns->components.size() : 0; size_t common_prefix_size = 0; while (common_prefix_size < old_size && common_prefix_size < new_size && ns->components[common_prefix_size] == cur_name_space_->components[common_prefix_size]) { common_prefix_size++; } // Close cur_name_space in reverse order to reach the common prefix. // In the previous example, D then C are closed. for (size_t j = old_size; j > common_prefix_size; --j) { code_ += "} // pub mod " + cur_name_space_->components[j - 1]; } if (old_size != common_prefix_size) { code_ += ""; } // open namespace parts to reach the ns namespace // in the previous example, E, then F, then G are opened for (auto j = common_prefix_size; j != new_size; ++j) { code_ += "pub mod " + MakeSnakeCase(ns->components[j]) + " {"; code_ += " #![allow(dead_code)]"; code_ += " #![allow(unused_imports)]"; code_ += ""; code_ += " use std::mem;"; code_ += " use std::cmp::Ordering;"; code_ += ""; code_ += " extern crate flatbuffers;"; code_ += " use self::flatbuffers::EndianScalar;"; } if (new_size != common_prefix_size) { code_ += ""; } cur_name_space_ = ns; } }; } // namespace rust bool GenerateRust(const Parser &parser, const std::string &path, const std::string &file_name) { rust::RustGenerator generator(parser, path, file_name); return generator.generate(); } std::string RustMakeRule(const Parser &parser, const std::string &path, const std::string &file_name) { std::string filebase = flatbuffers::StripPath(flatbuffers::StripExtension(file_name)); std::string make_rule = GeneratedFileName(path, filebase) + ": "; auto included_files = parser.GetIncludedFilesRecursive(file_name); for (auto it = included_files.begin(); it != included_files.end(); ++it) { make_rule += " " + *it; } return make_rule; } } // namespace flatbuffers // TODO(rw): Generated code should import other generated files. // TODO(rw): Generated code should refer to namespaces in included files in a // way that makes them referrable. // TODO(rw): Generated code should indent according to nesting level. // TODO(rw): Generated code should generate endian-safe Debug impls. // TODO(rw): Generated code could use a Rust-only enum type to access unions, // instead of making the user use _type() to manually switch.
1
14,831
Let's manually create one extra whitespace line here (code_ += "")
google-flatbuffers
java
@@ -138,6 +138,17 @@ func (bp *BucketPool) Count() uint64 { return bp.total.count } +// Construct returns a copy of the bucket pool +func (bp *BucketPool) Construct(enableSMStorage bool) *BucketPool { + pool := BucketPool{} + pool.enableSMStorage = enableSMStorage + pool.total = &totalAmount{ + amount: new(big.Int).Set(bp.total.amount), + count: bp.total.count, + } + return &pool +} + // SyncPool sync the data from state manager func (bp *BucketPool) SyncPool(sm protocol.StateManager) error { if bp.enableSMStorage {
1
// Copyright (c) 2020 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package staking import ( "math/big" "github.com/golang/protobuf/proto" "github.com/iotexproject/go-pkgs/hash" "github.com/pkg/errors" "github.com/iotexproject/iotex-core/action/protocol" "github.com/iotexproject/iotex-core/action/protocol/staking/stakingpb" "github.com/iotexproject/iotex-core/state" ) // const const ( stakingBucketPool = "bucketPool" ) var ( bucketPoolAddr = hash.Hash160b([]byte(stakingBucketPool)) bucketPoolAddrKey = append([]byte{_const}, bucketPoolAddr[:]...) ) // when a bucket is created, the amount of staked IOTX token is deducted from user, but does not transfer to any address // in the same way when a bucket are withdrawn, bucket amount is added back to user, but does not come out of any address // // for better accounting/auditing, we take protocol's address as the 'bucket pool' address // 1. at Greenland height we sum up all existing bucket's amount and set the total amount to bucket pool address // 2. for future bucket creation/deposit/registration, the amount of staked IOTX token will be added to bucket pool (so // the pool is 'receiving' token) // 3. for future bucket withdrawal, the bucket amount will be deducted from bucket pool (so the pool is 'releasing' token) type ( // BucketPool implements the bucket pool BucketPool struct { enableSMStorage bool total *totalAmount } totalAmount struct { amount *big.Int count uint64 } ) func (t *totalAmount) Serialize() ([]byte, error) { gen := stakingpb.TotalAmount{ Amount: t.amount.String(), Count: t.count, } return proto.Marshal(&gen) } func (t *totalAmount) Deserialize(data []byte) error { gen := stakingpb.TotalAmount{} if err := proto.Unmarshal(data, &gen); err != nil { return err } var ok bool if t.amount, ok = new(big.Int).SetString(gen.Amount, 10); !ok { return state.ErrStateDeserialization } if t.amount.Cmp(big.NewInt(0)) == -1 { return state.ErrNotEnoughBalance } t.count = gen.Count return nil } func (t *totalAmount) AddBalance(amount *big.Int, newBucket bool) { t.amount.Add(t.amount, amount) if newBucket { t.count++ } } func (t *totalAmount) SubBalance(amount *big.Int) error { if amount.Cmp(t.amount) == 1 || t.count == 0 { return state.ErrNotEnoughBalance } t.amount.Sub(t.amount, amount) t.count-- return nil } // NewBucketPool creates an instance of BucketPool func NewBucketPool(sr protocol.StateReader, enableSMStorage bool) (*BucketPool, error) { bp := BucketPool{ enableSMStorage: enableSMStorage, total: &totalAmount{ amount: big.NewInt(0), }, } if bp.enableSMStorage { switch _, err := sr.State(bp.total, protocol.NamespaceOption(StakingNameSpace), protocol.KeyOption(bucketPoolAddrKey)); errors.Cause(err) { case nil: return &bp, nil case state.ErrStateNotExist: // fall back to load all buckets default: return nil, err } } // sum up all existing buckets all, _, err := getAllBuckets(sr) if err != nil && errors.Cause(err) != state.ErrStateNotExist { return nil, err } for _, v := range all { if v.StakedAmount.Cmp(big.NewInt(0)) <= 0 { return nil, state.ErrNotEnoughBalance } bp.total.amount.Add(bp.total.amount, v.StakedAmount) } bp.total.count = uint64(len(all)) return &bp, nil } // Total returns the total amount staked in bucket pool func (bp *BucketPool) Total() *big.Int { return new(big.Int).Set(bp.total.amount) } // Count returns the total number of buckets in bucket pool func (bp *BucketPool) Count() uint64 { return bp.total.count } // SyncPool sync the data from state manager func (bp *BucketPool) SyncPool(sm protocol.StateManager) error { if bp.enableSMStorage { _, err := sm.State(bp.total, protocol.NamespaceOption(StakingNameSpace), protocol.KeyOption(bucketPoolAddrKey)) return err } // get stashed total amount ser, err := protocol.UnloadAndAssertBytes(sm, stakingBucketPool) switch errors.Cause(err) { case protocol.ErrTypeAssertion: return errors.Wrap(err, "failed to sync bucket pool") case protocol.ErrNoName: return nil } if err := bp.total.Deserialize(ser); err != nil { return err } return nil } // Commit is called upon workingset commit func (bp *BucketPool) Commit(sr protocol.StateReader) error { return nil } // CreditPool subtracts staked amount out of the pool func (bp *BucketPool) CreditPool(sm protocol.StateManager, amount *big.Int) error { if err := bp.total.SubBalance(amount); err != nil { return err } if bp.enableSMStorage { _, err := sm.PutState(bp.total, protocol.NamespaceOption(StakingNameSpace), protocol.KeyOption(bucketPoolAddrKey)) return err } ser, err := bp.total.Serialize() if err != nil { return errors.Wrap(err, "failed to stash pending bucket pool") } return sm.Load(stakingBucketPool, ser) } // DebitPool adds staked amount into the pool func (bp *BucketPool) DebitPool(sm protocol.StateManager, amount *big.Int, newBucket bool) error { bp.total.AddBalance(amount, newBucket) if bp.enableSMStorage { _, err := sm.PutState(bp.total, protocol.NamespaceOption(StakingNameSpace), protocol.KeyOption(bucketPoolAddrKey)) return err } ser, err := bp.total.Serialize() if err != nil { return errors.Wrap(err, "failed to stash pending bucket pool") } return sm.Load(stakingBucketPool, ser) }
1
22,199
nit: copy or clone
iotexproject-iotex-core
go
@@ -182,3 +182,14 @@ func (d Document) Decode(dec Decoder) error { } return decodeStruct(d.s, dec) } + +// RevisionOn returns whether or not we should write a revision value when we +// write a document. It could be either a map or a struct with a revision key or +// field named after revField. +func (d Document) RevisionOn(revField string) bool { + if d.m != nil { + _, ok := d.m[revField] + return ok + } + return d.fields.MatchFold(revField) != nil +}
1
// Copyright 2019 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver import ( "reflect" "gocloud.dev/docstore/internal/fields" "gocloud.dev/gcerrors" "gocloud.dev/internal/gcerr" ) // A Document is a lightweight wrapper around either a map[string]interface{} or a // struct pointer. It provides operations to get and set fields and field paths. type Document struct { Origin interface{} // the argument to NewDocument m map[string]interface{} // nil if it's a *struct s reflect.Value // the struct reflected fields fields.List // for structs } // Create a new document from doc, which must be a non-nil map[string]interface{} or struct pointer. func NewDocument(doc interface{}) (Document, error) { if doc == nil { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "document cannot be nil") } if m, ok := doc.(map[string]interface{}); ok { if m == nil { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "document map cannot be nil") } return Document{Origin: doc, m: m}, nil } v := reflect.ValueOf(doc) t := v.Type() if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "expecting *struct or map[string]interface{}, got %s", t) } t = t.Elem() if v.IsNil() { return Document{}, gcerr.Newf(gcerr.InvalidArgument, nil, "document struct pointer cannot be nil") } fields, err := fieldCache.Fields(t) if err != nil { return Document{}, err } return Document{Origin: doc, s: v.Elem(), fields: fields}, nil } // GetField returns the value of the named document field. func (d Document) GetField(field string) (interface{}, error) { if d.m != nil { x, ok := d.m[field] if !ok { return nil, gcerr.Newf(gcerr.NotFound, nil, "field %q not found in map", field) } return x, nil } else { v, err := d.structField(field) if err != nil { return nil, err } return v.Interface(), nil } } // getDocument gets the value of the given field path, which must be a document. // If create is true, it creates intermediate documents as needed. func (d Document) getDocument(fp []string, create bool) (Document, error) { if len(fp) == 0 { return d, nil } x, err := d.GetField(fp[0]) if err != nil { if create && gcerrors.Code(err) == gcerrors.NotFound { // TODO(jba): create the right type for the struct field. x = map[string]interface{}{} if err := d.SetField(fp[0], x); err != nil { return Document{}, err } } else { return Document{}, err } } d2, err := NewDocument(x) if err != nil { return Document{}, err } return d2.getDocument(fp[1:], create) } // Get returns the value of the given field path in the document. func (d Document) Get(fp []string) (interface{}, error) { d2, err := d.getDocument(fp[:len(fp)-1], false) if err != nil { return nil, err } return d2.GetField(fp[len(fp)-1]) } func (d Document) structField(name string) (reflect.Value, error) { // We do case-insensitive match here to cover the MongoDB's lowercaseFields // option. f := d.fields.MatchFold(name) if f == nil { return reflect.Value{}, gcerr.Newf(gcerr.NotFound, nil, "field %q not found in struct type %s", name, d.s.Type()) } fv, ok := fieldByIndex(d.s, f.Index) if !ok { return reflect.Value{}, gcerr.Newf(gcerr.InvalidArgument, nil, "nil embedded pointer; cannot get field %q from %s", name, d.s.Type()) } return fv, nil } // Set sets the value of the field path in the document. // This creates sub-maps as necessary, if possible. func (d Document) Set(fp []string, val interface{}) error { d2, err := d.getDocument(fp[:len(fp)-1], true) if err != nil { return err } return d2.SetField(fp[len(fp)-1], val) } // SetField sets the field to value in the document. func (d Document) SetField(field string, value interface{}) error { if d.m != nil { d.m[field] = value return nil } v, err := d.structField(field) if err != nil { return err } if !v.CanSet() { return gcerr.Newf(gcerr.InvalidArgument, nil, "cannot set field %s in struct of type %s: not addressable", field, d.s.Type()) } v.Set(reflect.ValueOf(value)) return nil } // FieldNames returns names of the top-level fields of d. func (d Document) FieldNames() []string { var names []string if d.m != nil { for k := range d.m { names = append(names, k) } } else { for _, f := range d.fields { names = append(names, f.Name) } } return names } // Encode encodes the document using the given Encoder. func (d Document) Encode(e Encoder) error { if d.m != nil { return encodeMap(reflect.ValueOf(d.m), e) } return encodeStructWithFields(d.s, d.fields, e) } // Decode decodes the document using the given Decoder. func (d Document) Decode(dec Decoder) error { if d.m != nil { return decodeMap(reflect.ValueOf(d.m), dec) } return decodeStruct(d.s, dec) }
1
19,126
I think this can be called `HasField`, because that's what it's doing. It really is independent of revision.
google-go-cloud
go
@@ -16,7 +16,10 @@ import java.math.BigDecimal; import java.time.Year; import java.util.function.Predicate; -import static javaslang.API.*; +import static javaslang.API.$; +import static javaslang.API.Case; +import static javaslang.API.Match; +import static javaslang.API.run; import static javaslang.MatchTest_DeveloperPatterns.Developer; import static javaslang.Patterns.*; import static javaslang.Predicates.*;
1
/* / \____ _ _ ____ ______ / \ ____ __ _______ * / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG * _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io * /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0 */ package javaslang; import javaslang.collection.List; import javaslang.control.Either; import javaslang.control.Option; import javaslang.control.Option.Some; import javaslang.match.annotation.Unapply; import org.junit.Test; import java.math.BigDecimal; import java.time.Year; import java.util.function.Predicate; import static javaslang.API.*; import static javaslang.MatchTest_DeveloperPatterns.Developer; import static javaslang.Patterns.*; import static javaslang.Predicates.*; import static org.assertj.core.api.Assertions.assertThat; public class MatchTest { // -- MatchError @Test(expected = MatchError.class) public void shouldThrowIfNotMatching() { Match(new Object()).of( Case(ignored -> false, o -> null) ); } // -- $() @Test public void shouldMatchNullWithAnyReturningValue() { assertThat(Case($(), 1).apply(null)).isEqualTo(Option.of(1)); } @Test public void shouldMatchAnyReturningValue() { assertThat(Case($(), 1).apply(new Object())).isEqualTo(Option.of(1)); } @Test public void shouldMatchNullWithAnyReturningAppliedFunction() { assertThat(Case($(), o -> 1).apply(null)).isEqualTo(Option.of(1)); } @Test public void shouldMatchAnyReturningAppliedFunction() { assertThat(Case($(), o -> 1).apply(new Object())).isEqualTo(Option.of(1)); } @Test public void shouldTakeFirstMatch() { final String actual = Match(new Object()).of( Case($(), "first"), Case($(), "second") ); assertThat(actual).isEqualTo("first"); } // -- $(value) @Test public void shouldMatchValueReturningValue() { final Object value = new Object(); assertThat(Case($(value), 1).apply(value)).isEqualTo(Option.of(1)); } @Test public void shouldMatchValueReturningValue_NegativeCase() { final Object value = new Object(); assertThat(Case($(value), 1).apply(new Object())).isEqualTo(Option.none()); } @Test public void shouldMatchValueReturningAppliedFunction() { final Object value = new Object(); assertThat(Case($(value), o -> 1).apply(value)).isEqualTo(Option.of(1)); } @Test public void shouldMatchValueReturningAppliedFunction_NegativeCase() { final Object value = new Object(); assertThat(Case($(value), o -> 1).apply(new Object())).isEqualTo(Option.none()); } // -- $(predicate) @Test public void shouldMatchPredicateReturningValue() { final Object value = new Object(); assertThat(Case($(is(value)), 1).apply(value)).isEqualTo(Option.of(1)); } @Test public void shouldMatchPredicateReturningValue_NegativeCase() { final Object value = new Object(); assertThat(Case($(is(value)), 1).apply(new Object())).isEqualTo(Option.none()); } @Test public void shouldMatchPredicateReturningAppliedFunction() { final Object value = new Object(); assertThat(Case($(is(value)), o -> 1).apply(value)).isEqualTo(Option.of(1)); } @Test public void shouldMatchPredicateReturningAppliedFunction_NegativeCase() { final Object value = new Object(); assertThat(Case($(is(value)), o -> 1).apply(new Object())).isEqualTo(Option.none()); } // -- multiple cases // i match { // case 1 => "one" // case 2 => "two" // case _ => "many" // } @Test public void shouldMatchIntUsingPatterns() { final String actual = Match(3).of( Case($(1), "one"), Case($(2), "two"), Case($(), "many") ); assertThat(actual).isEqualTo("many"); } @Test public void shouldMatchIntUsingPredicates() { final String actual = Match(3).of( Case(is(1), "one"), Case(is(2), "two"), Case($(), "many") ); assertThat(actual).isEqualTo("many"); } @Test public void shouldComputeUpperBoundOfReturnValue() { final Number num = Match(3).of( Case(is(1), 1), Case(is(2), 2.0), Case($(), i -> new BigDecimal("" + i)) ); assertThat(num).isEqualTo(new BigDecimal("3")); } // -- instanceOf @Test public void shouldMatchX() { final Object obj = 1; final int actual = Match(obj).of( Case(instanceOf(Year.class), y -> 0), Case(instanceOf(Integer.class), i -> 1) ); assertThat(actual).isEqualTo(1); } // -- Either @Test public void shouldMatchLeft() { final Either<Integer, String> either = Either.left(1); final String actual = Match(either).of( Case(Left($()), l -> "left: " + l), Case(Right($()), r -> "right: " + r) ); assertThat(actual).isEqualTo("left: 1"); } @Test public void shouldMatchRight() { final Either<Integer, String> either = Either.right("a"); final String actual = Match(either).of( Case(Left($()), l -> "left: " + l), Case(Right($()), r -> "right: " + r) ); assertThat(actual).isEqualTo("right: a"); } // -- Option @Test public void shouldMatchSome() { final Option<Integer> opt = Option.some(1); final String actual = Match(opt).of( Case(Some($()), String::valueOf), Case(None(), "no value") ); assertThat(actual).isEqualTo("1"); } @Test public void shouldMatchNone() { final Option<Integer> opt = Option.none(); final String actual = Match(opt).of( Case(Some($()), String::valueOf), Case(None(), "no value") ); assertThat(actual).isEqualTo("no value"); } @Test public void shouldDecomposeSomeTuple() { final Option<Tuple2<String, Integer>> tuple2Option = Option.of(Tuple.of("Test", 123)); final Tuple2<String, Integer> actual = Match(tuple2Option).of( Case(Some($()), value -> { @SuppressWarnings("UnnecessaryLocalVariable") Tuple2<String, Integer> tuple2 = value; // types are inferred correctly! return tuple2; }) ); assertThat(actual).isEqualTo(Tuple.of("Test", 123)); } @Test public void shouldDecomposeSomeSomeTuple() { final Option<Option<Tuple2<String, Integer>>> tuple2OptionOption = Option.of(Option.of(Tuple.of("Test", 123))); final Some<Tuple2<String, Integer>> actual = Match(tuple2OptionOption).of( Case(Some(Some($(Tuple.of("Test", 123)))), value -> { @SuppressWarnings("UnnecessaryLocalVariable") final Some<Tuple2<String, Integer>> some = value; // types are inferred correctly! return some; }) ); assertThat(actual).isEqualTo(Option.of(Tuple.of("Test", 123))); } // -- List @Test public void shouldDecomposeEmptyList() { final List<Integer> list = List.empty(); final boolean isEmpty = Match(list).of( Case(List($(), $()), (x, xs) -> false), Case(List(), true) ); assertThat(isEmpty).isTrue(); } @Test public void shouldDecomposeNonEmptyList() { final List<Integer> list = List.of(1); final boolean isNotEmpty = Match(list).of( Case(List($(), $()), (x, xs) -> true), Case(List(), false) ); assertThat(isNotEmpty).isTrue(); } @SuppressWarnings("UnnecessaryLocalVariable") @Test public void shouldDecomposeListOfTuple3() { final List<Tuple3<String, Integer, Double>> tuple3List = List.of( Tuple.of("begin", 10, 4.5), Tuple.of("middle", 11, 0.0), Tuple.of("end", 12, 1.2)); final String actual = Match(tuple3List).of( Case(List($(), $()), (x, xs) -> { // types are inferred correctly! final Tuple3<String, Integer, Double> head = x; final List<Tuple3<String, Integer, Double>> tail = xs; return head + "::" + tail; }) ); assertThat(actual).isEqualTo("(begin, 10, 4.5)::List((middle, 11, 0.0), (end, 12, 1.2))"); } @SuppressWarnings("UnnecessaryLocalVariable") @Test public void shouldDecomposeListWithNonEmptyTail() { final List<Option<Number>> intOptionList = List.of(Option.some(1), Option.some(2.0)); final String actual = Match(intOptionList).of( Case(List(Some($(1)), List(Some($(2.0)), $())), (x, xs) -> { // types are inferred correctly! final Some<Number> head = x; final List<Option<Number>> tail = xs; return head + "::" + tail; }) ); assertThat(actual).isEqualTo("Some(1)::List(Some(2.0))"); } // -- run @Test public void shouldRunUnitOfWork() { class OuterWorld { String effect = null; void displayHelp() { effect = "help"; } void displayVersion() { effect = "version"; } } final OuterWorld outerWorld = new OuterWorld(); Match("-v").of( Case(isIn("-h", "--help"), o -> run(outerWorld::displayHelp)), Case(isIn("-v", "--version"), o -> run(outerWorld::displayVersion)), Case($(), o -> { throw new IllegalArgumentException(); }) ); assertThat(outerWorld.effect).isEqualTo("version"); } @Test public void shouldRunWithInferredArguments() { class OuterWorld { Number effect = null; void writeInt(int i) { effect = i; } void writeDouble(double d) { effect = d; } } final OuterWorld outerWorld = new OuterWorld(); final Object obj = .1d; Match(obj).of( Case(instanceOf(Integer.class), i -> run(() -> outerWorld.writeInt(i))), Case(instanceOf(Double.class), d -> run(() -> outerWorld.writeDouble(d))), Case($(), o -> { throw new NumberFormatException(); }) ); assertThat(outerWorld.effect).isEqualTo(.1d); } // -- Developer @Test public void shouldMatchCustomTypeWithUnapplyMethod() { final Person person = new Developer("Daniel", true, Option.some(13)); final String actual = Match(person).of( Case(Developer($("Daniel"), $(true), $()), Person.Util::devInfo), Case($(), p -> "Unknown person: " + p.getName()) ); assertThat(actual).isEqualTo("Daniel is caffeinated."); } interface Person { String getName(); class Util { static String devInfo(String name, boolean isCaffeinated, Option<Number> number) { return name + " is " + (isCaffeinated ? "" : "not ") + "caffeinated."; } } } static final class Developer implements Person { private final String name; private final boolean isCaffeinated; private final Option<Number> number; Developer(String name, boolean isCaffeinated, Option<Number> number) { this.name = name; this.isCaffeinated = isCaffeinated; this.number = number; } public String getName() { return name; } public boolean isCaffeinated() { return isCaffeinated; } public Option<Number> number() { return number; } @javaslang.match.annotation.Patterns static class $ { @Unapply static Tuple3<String, Boolean, Option<Number>> Developer(Developer dev) { return Tuple.of(dev.getName(), dev.isCaffeinated(), dev.number()); } } } // Ambiguity check @Test public void shouldNotAmbiguous() { final Option<String> ok = Option.of("ok"); { // value // Case("1", o -> "ok"); // Not possible, would lead to ambiguities (see below) assertThat(Case("1", () -> "ok").apply("1")).isEqualTo(ok); assertThat(Case("1", "ok").apply("1")).isEqualTo(ok); } { // predicate as variable Predicate<String> p = s -> true; assertThat(Case(p, o -> "ok").apply("1")).isEqualTo(ok); // ambiguous, if Case(T, Function<T, R>) present assertThat(Case(p, () -> "ok").apply("1")).isEqualTo(ok); assertThat(Case(p, "ok").apply("1")).isEqualTo(ok); } { // predicate as lambda assertThat(Case(o -> true, o -> "ok").apply("1")).isEqualTo(ok); // ambiguous, if Case(T, Function<T, R>) present assertThat(Case(o -> true, () -> "ok").apply("1")).isEqualTo(ok); assertThat(Case(o -> true, "ok").apply("1")).isEqualTo(ok); } { // $(predicate) assertThat(Case($(o -> true), o -> "ok").apply("1")).isEqualTo(ok); // ambiguous, if Case(T, Function<T, R>) present assertThat(Case($(o -> true), () -> "ok").apply("1")).isEqualTo(ok); assertThat(Case($(o -> true), "ok").apply("1")).isEqualTo(ok); } { // $(value) assertThat(Case($("1"), o -> "ok").apply("1")).isEqualTo(ok); // ambiguous, if Case(T, Function<T, R>) present assertThat(Case($("1"), () -> "ok").apply("1")).isEqualTo(ok); assertThat(Case($("1"), "ok").apply("1")).isEqualTo(ok); } } }
1
9,456
Is this to avoid collisions?
vavr-io-vavr
java
@@ -79,7 +79,7 @@ func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local) *TransactionPo expiredTxCount: make(map[basics.Round]int), ledger: ledger, statusCache: makeStatusCache(cfg.TxPoolSize), - logStats: cfg.EnableAssembleStats, + logStats: cfg.EnableProcessBlockStats, expFeeFactor: cfg.TxPoolExponentialIncreaseFactor, txPoolMaxSize: cfg.TxPoolSize, }
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package pools import ( "fmt" "sync" "time" "github.com/algorand/go-deadlock" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/verify" "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/util/condvar" ) // TransactionPool is a struct maintaining a sanitized pool of transactions that are available for inclusion in // a Block. We sanitize it by preventing duplicates and limiting the number of transactions retained for each account type TransactionPool struct { mu deadlock.Mutex cond sync.Cond expiredTxCount map[basics.Round]int pendingBlockEvaluator *ledger.BlockEvaluator numPendingWholeBlocks basics.Round feeThresholdMultiplier uint64 ledger *ledger.Ledger statusCache *statusCache logStats bool expFeeFactor uint64 txPoolMaxSize int // pendingMu protects pendingTxGroups and pendingTxids pendingMu deadlock.RWMutex pendingTxGroups [][]transactions.SignedTxn pendingVerifyParams [][]verify.Params pendingTxids map[transactions.Txid]txPoolVerifyCacheVal // Calls to remember() add transactions to rememberedTxGroups and // rememberedTxids. Calling rememberCommit() adds them to the // pendingTxGroups and pendingTxids. This allows us to batch the // changes in OnNewBlock() without preventing a concurrent call // to Pending() or Verified(). rememberedTxGroups [][]transactions.SignedTxn rememberedVerifyParams [][]verify.Params rememberedTxids map[transactions.Txid]txPoolVerifyCacheVal } // MakeTransactionPool is the constructor, it uses Ledger to ensure that no account has pending transactions that together overspend. // // The pool also contains status information for the last transactionPoolStatusSize // transactions that were removed from the pool without being committed. func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local) *TransactionPool { if cfg.TxPoolExponentialIncreaseFactor < 1 { cfg.TxPoolExponentialIncreaseFactor = 1 } pool := TransactionPool{ pendingTxids: make(map[transactions.Txid]txPoolVerifyCacheVal), rememberedTxids: make(map[transactions.Txid]txPoolVerifyCacheVal), expiredTxCount: make(map[basics.Round]int), ledger: ledger, statusCache: makeStatusCache(cfg.TxPoolSize), logStats: cfg.EnableAssembleStats, expFeeFactor: cfg.TxPoolExponentialIncreaseFactor, txPoolMaxSize: cfg.TxPoolSize, } pool.cond.L = &pool.mu pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round)) return &pool } type txPoolVerifyCacheVal struct { txn transactions.SignedTxn params verify.Params } // TODO I moved this number to be a constant in the module, we should consider putting it in the local config const expiredHistory = 10 // timeoutOnNewBlock determines how long Test() and Remember() wait for // OnNewBlock() to process a new block that appears to be in the ledger. const timeoutOnNewBlock = time.Second // NumExpired returns the number of transactions that expired at the end of a round (only meaningful if cleanup has // been called for that round) func (pool *TransactionPool) NumExpired(round basics.Round) int { pool.mu.Lock() defer pool.mu.Unlock() return pool.expiredTxCount[round] } // PendingTxIDs return the IDs of all pending transactions func (pool *TransactionPool) PendingTxIDs() []transactions.Txid { pool.pendingMu.RLock() defer pool.pendingMu.RUnlock() ids := make([]transactions.Txid, len(pool.pendingTxids)) i := 0 for txid := range pool.pendingTxids { ids[i] = txid i++ } return ids } // Pending returns a list of transaction groups that should be proposed // in the next block, in order. func (pool *TransactionPool) Pending() [][]transactions.SignedTxn { pool.pendingMu.RLock() defer pool.pendingMu.RUnlock() // note that this operation is safe for the sole reason that arrays in go are immutable. // if the underlaying array need to be expanded, the actual underlaying array would need // to be reallocated. return pool.pendingTxGroups } // rememberCommit() saves the changes added by remember to // pendingTxGroups and pendingTxids. The caller is assumed to // be holding pool.mu. flush indicates whether previous // pendingTxGroups and pendingTxids should be flushed out and // replaced altogether by rememberedTxGroups and rememberedTxids. func (pool *TransactionPool) rememberCommit(flush bool) { pool.pendingMu.Lock() defer pool.pendingMu.Unlock() if flush { pool.pendingTxGroups = pool.rememberedTxGroups pool.pendingVerifyParams = pool.rememberedVerifyParams pool.pendingTxids = pool.rememberedTxids } else { pool.pendingTxGroups = append(pool.pendingTxGroups, pool.rememberedTxGroups...) pool.pendingVerifyParams = append(pool.pendingVerifyParams, pool.rememberedVerifyParams...) for txid, txn := range pool.rememberedTxids { pool.pendingTxids[txid] = txn } } pool.rememberedTxGroups = nil pool.rememberedVerifyParams = nil pool.rememberedTxids = make(map[transactions.Txid]txPoolVerifyCacheVal) } // PendingCount returns the number of transactions currently pending in the pool. func (pool *TransactionPool) PendingCount() int { pool.pendingMu.RLock() defer pool.pendingMu.RUnlock() return pool.pendingCountNoLock() } // pendingCountNoLock is a helper for PendingCount that returns the number of // transactions pending in the pool func (pool *TransactionPool) pendingCountNoLock() int { var count int for _, txgroup := range pool.pendingTxGroups { count += len(txgroup) } return count } // checkPendingQueueSize test to see if there is more room in the pending // group transaction list. As long as we haven't surpassed the size limit, we // should be good to go. func (pool *TransactionPool) checkPendingQueueSize() error { pendingSize := len(pool.Pending()) if pendingSize >= pool.txPoolMaxSize { return fmt.Errorf("TransactionPool.Test: transaction pool have reached capacity") } return nil } func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error { // The baseline threshold fee per byte is 1, the smallest fee we can // represent. This amounts to a fee of 100 for a 100-byte txn, which // is well below MinTxnFee (1000). This means that, when the pool // is not under load, the total MinFee dominates for small txns, // but once the pool comes under load, the fee-per-byte will quickly // come to dominate. feePerByte := uint64(1) // The threshold is multiplied by the feeThresholdMultiplier that // tracks the load on the transaction pool over time. If the pool // is mostly idle, feeThresholdMultiplier will be 0, and all txns // are accepted (assuming the BlockEvaluator approves them, which // requires a flat MinTxnFee). feePerByte = feePerByte * pool.feeThresholdMultiplier // The feePerByte should be bumped to 1 to make the exponentially // threshold growing valid. if feePerByte == 0 && pool.numPendingWholeBlocks > 1 { feePerByte = uint64(1) } // The threshold grows exponentially if there are multiple blocks // pending in the pool. // golang has no convenient integer exponentiation, so we just // do this in a loop for i := 0; i < int(pool.numPendingWholeBlocks)-1; i++ { feePerByte *= pool.expFeeFactor } for _, t := range txgroup { feeThreshold := feePerByte * uint64(t.GetEncodedLength()) if t.Txn.Fee.Raw < feeThreshold { return fmt.Errorf("fee %d below threshold %d (%d per byte * %d bytes)", t.Txn.Fee, feeThreshold, feePerByte, t.GetEncodedLength()) } } return nil } // Test performs basic duplicate detection and well-formedness checks // on a transaction group without storing the group. func (pool *TransactionPool) Test(txgroup []transactions.SignedTxn) error { if err := pool.checkPendingQueueSize(); err != nil { return err } pool.mu.Lock() defer pool.mu.Unlock() if pool.pendingBlockEvaluator == nil { return fmt.Errorf("Test: pendingBlockEvaluator is nil") } return pool.pendingBlockEvaluator.TestTransactionGroup(txgroup) } type poolIngestParams struct { checkFee bool // if set, perform fee checks preferSync bool // if set, wait until ledger is caught up } // remember attempts to add a transaction group to the pool. func (pool *TransactionPool) remember(txgroup []transactions.SignedTxn, verifyParams []verify.Params) error { params := poolIngestParams{ checkFee: true, preferSync: true, } return pool.ingest(txgroup, verifyParams, params) } // add tries to add the transaction group to the pool, bypassing the fee // priority checks. func (pool *TransactionPool) add(txgroup []transactions.SignedTxn, verifyParams []verify.Params) error { params := poolIngestParams{ checkFee: false, preferSync: false, } return pool.ingest(txgroup, verifyParams, params) } // ingest checks whether a transaction group could be remembered in the pool, // and stores this transaction if valid. // // ingest assumes that pool.mu is locked. It might release the lock // while it waits for OnNewBlock() to be called. func (pool *TransactionPool) ingest(txgroup []transactions.SignedTxn, verifyParams []verify.Params, params poolIngestParams) error { if pool.pendingBlockEvaluator == nil { return fmt.Errorf("TransactionPool.ingest: no pending block evaluator") } if params.preferSync { // Make sure that the latest block has been processed by OnNewBlock(). // If not, we might be in a race, so wait a little bit for OnNewBlock() // to catch up to the ledger. latest := pool.ledger.Latest() waitExpires := time.Now().Add(timeoutOnNewBlock) for pool.pendingBlockEvaluator.Round() <= latest && time.Now().Before(waitExpires) { condvar.TimedWait(&pool.cond, timeoutOnNewBlock) if pool.pendingBlockEvaluator == nil { return fmt.Errorf("TransactionPool.ingest: no pending block evaluator") } } } if params.checkFee { err := pool.checkSufficientFee(txgroup) if err != nil { return err } } err := pool.addToPendingBlockEvaluator(txgroup) if err != nil { return err } pool.rememberedTxGroups = append(pool.rememberedTxGroups, txgroup) pool.rememberedVerifyParams = append(pool.rememberedVerifyParams, verifyParams) for i, t := range txgroup { pool.rememberedTxids[t.ID()] = txPoolVerifyCacheVal{txn: t, params: verifyParams[i]} } return nil } // RememberOne stores the provided transaction // Precondition: Only RememberOne() properly-signed and well-formed transactions (i.e., ensure t.WellFormed()) func (pool *TransactionPool) RememberOne(t transactions.SignedTxn, verifyParams verify.Params) error { return pool.Remember([]transactions.SignedTxn{t}, []verify.Params{verifyParams}) } // Remember stores the provided transaction group // Precondition: Only Remember() properly-signed and well-formed transactions (i.e., ensure t.WellFormed()) func (pool *TransactionPool) Remember(txgroup []transactions.SignedTxn, verifyParams []verify.Params) error { if err := pool.checkPendingQueueSize(); err != nil { return err } pool.mu.Lock() defer pool.mu.Unlock() err := pool.remember(txgroup, verifyParams) if err != nil { return fmt.Errorf("TransactionPool.Remember: %v", err) } pool.rememberCommit(false) return nil } // Lookup returns the error associated with a transaction that used // to be in the pool. If no status information is available (e.g., because // it was too long ago, or the transaction committed successfully), then // found is false. If the transaction is still in the pool, txErr is empty. func (pool *TransactionPool) Lookup(txid transactions.Txid) (tx transactions.SignedTxn, txErr string, found bool) { if pool == nil { return transactions.SignedTxn{}, "", false } pool.mu.Lock() defer pool.mu.Unlock() pool.pendingMu.RLock() defer pool.pendingMu.RUnlock() cacheval, inPool := pool.pendingTxids[txid] tx = cacheval.txn if inPool { return tx, "", true } return pool.statusCache.check(txid) } // Verified returns whether a given SignedTxn is already in the // pool, and, since only verified transactions should be added // to the pool, whether that transaction is verified (i.e., Verify // returned success). This is used as an optimization to avoid // re-checking signatures on transactions that we have already // verified. func (pool *TransactionPool) Verified(txn transactions.SignedTxn, params verify.Params) bool { if pool == nil { return false } pool.pendingMu.RLock() defer pool.pendingMu.RUnlock() cacheval, ok := pool.pendingTxids[txn.ID()] if !ok { return false } if cacheval.params != params { return false } pendingSigTxn := cacheval.txn return pendingSigTxn.Sig == txn.Sig && pendingSigTxn.Msig.Equal(txn.Msig) && pendingSigTxn.Lsig.Equal(&txn.Lsig) } // OnNewBlock excises transactions from the pool that are included in the specified Block or if they've expired func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledger.StateDelta) { var stats telemetryspec.ProcessBlockMetrics var knownCommitted uint var unknownCommitted uint commitedTxids := delta.Txids if pool.logStats { pool.pendingMu.RLock() for txid := range commitedTxids { if _, ok := pool.pendingTxids[txid]; ok { knownCommitted++ } else { unknownCommitted++ } } pool.pendingMu.RUnlock() } pool.mu.Lock() defer pool.mu.Unlock() defer pool.cond.Broadcast() if pool.pendingBlockEvaluator == nil || block.Round() >= pool.pendingBlockEvaluator.Round() { // Adjust the pool fee threshold. The rules are: // - If there was less than one full block in the pool, reduce // the multiplier by 2x. It will eventually go to 0, so that // only the flat MinTxnFee matters if the pool is idle. // - If there were less than two full blocks in the pool, keep // the multiplier as-is. // - If there were two or more full blocks in the pool, grow // the multiplier by 2x (or increment by 1, if 0). switch pool.numPendingWholeBlocks { case 0: pool.feeThresholdMultiplier = pool.feeThresholdMultiplier / pool.expFeeFactor case 1: // Keep the fee multiplier the same. default: if pool.feeThresholdMultiplier == 0 { pool.feeThresholdMultiplier = 1 } else { pool.feeThresholdMultiplier = pool.feeThresholdMultiplier * pool.expFeeFactor } } // Recompute the pool by starting from the new latest block. // This has the side-effect of discarding transactions that // have been committed (or that are otherwise no longer valid). stats = pool.recomputeBlockEvaluator(commitedTxids) } stats.KnownCommittedCount = knownCommitted stats.UnknownCommittedCount = unknownCommitted proto := config.Consensus[block.CurrentProtocol] pool.expiredTxCount[block.Round()] = int(stats.ExpiredCount) delete(pool.expiredTxCount, block.Round()-expiredHistory*basics.Round(proto.MaxTxnLife)) if pool.logStats { var details struct { Round uint64 } details.Round = uint64(block.Round()) logging.Base().Metrics(telemetryspec.Transaction, stats, details) } } func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactions.SignedTxn) error { r := pool.pendingBlockEvaluator.Round() + pool.numPendingWholeBlocks for _, tx := range txgroup { if tx.Txn.LastValid < r { return transactions.TxnDeadError{ Round: r, FirstValid: tx.Txn.FirstValid, LastValid: tx.Txn.LastValid, } } } txgroupad := make([]transactions.SignedTxnWithAD, len(txgroup)) for i, tx := range txgroup { txgroupad[i].SignedTxn = tx } return pool.pendingBlockEvaluator.TransactionGroup(txgroupad) } func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.SignedTxn) error { err := pool.addToPendingBlockEvaluatorOnce(txgroup) if err == ledger.ErrNoSpace { pool.numPendingWholeBlocks++ pool.pendingBlockEvaluator.ResetTxnBytes() err = pool.addToPendingBlockEvaluatorOnce(txgroup) } return err } // recomputeBlockEvaluator constructs a new BlockEvaluator and feeds all // in-pool transactions to it (removing any transactions that are rejected // by the BlockEvaluator). func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transactions.Txid]basics.Round) (stats telemetryspec.ProcessBlockMetrics) { pool.pendingBlockEvaluator = nil latest := pool.ledger.Latest() prev, err := pool.ledger.BlockHdr(latest) if err != nil { logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot get prev header for %d: %v", latest, err) return } // Process upgrade to see if we support the next protocol version _, upgradeState, err := bookkeeping.ProcessUpgradeParams(prev) if err != nil { logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: error processing upgrade params for next round: %v", err) return } // Ensure we know about the next protocol version (MakeBlock will panic // if we don't, and we would rather stall locally than panic) _, ok := config.Consensus[upgradeState.CurrentProtocol] if !ok { logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: next protocol version %v is not supported", upgradeState.CurrentProtocol) return } // Grab the transactions to be played through the new block evaluator pool.pendingMu.RLock() txgroups := pool.pendingTxGroups verifyParams := pool.pendingVerifyParams pendingCount := pool.pendingCountNoLock() pool.pendingMu.RUnlock() next := bookkeeping.MakeBlock(prev) pool.numPendingWholeBlocks = 0 pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, pendingCount) if err != nil { logging.Base().Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err) return } // Feed the transactions in order for i, txgroup := range txgroups { if len(txgroup) == 0 { continue } if _, alreadyCommitted := committedTxIds[txgroup[0].ID()]; alreadyCommitted { continue } err := pool.add(txgroup, verifyParams[i]) if err != nil { for _, tx := range txgroup { pool.statusCache.put(tx, err.Error()) } switch err.(type) { case transactions.TxnDeadError: stats.ExpiredCount++ default: stats.RemovedInvalidCount++ } } } pool.rememberCommit(true) return }
1
38,134
nit: a better name for this variable would be enableLogStats, but it's beyond the scope of your change.
algorand-go-algorand
go
@@ -256,7 +256,6 @@ class CheckerTestCase(object): # Init test_reporter = TestReporter() linter = PyLinter(pylint.config.Configuration()) -linter.set_reporter(test_reporter) linter.config.persistent = 0 checkers.initialize(PluginRegistry(linter))
1
# Copyright (c) 2012-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]> # Copyright (c) 2012 FELD Boris <[email protected]> # Copyright (c) 2013-2017 Claudiu Popa <[email protected]> # Copyright (c) 2013-2014 Google, Inc. # Copyright (c) 2013 [email protected] <[email protected]> # Copyright (c) 2014 LCD 47 <[email protected]> # Copyright (c) 2014 Brett Cannon <[email protected]> # Copyright (c) 2014 Ricardo Gemignani <[email protected]> # Copyright (c) 2014 Arun Persaud <[email protected]> # Copyright (c) 2015 Pavel Roskin <[email protected]> # Copyright (c) 2015 Ionel Cristian Maries <[email protected]> # Copyright (c) 2016 Derek Gustafson <[email protected]> # Copyright (c) 2016 Roy Williams <[email protected]> # Copyright (c) 2016 xmo-odoo <[email protected]> # Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html # For details: https://github.com/PyCQA/pylint/blob/master/COPYING """functional/non regression tests for pylint""" from __future__ import print_function import collections import contextlib import functools from glob import glob import os from os import linesep, getcwd, sep from os.path import abspath, basename, dirname, join, splitext import sys import tempfile import tokenize import six from six.moves import StringIO import astroid from pylint import checkers import pylint.config from pylint.utils import PyLintASTWalker from pylint.reporters import BaseReporter from pylint.interfaces import IReporter from pylint.lint import PluginRegistry, PyLinter # Utils SYS_VERS_STR = '%d%d%d' % sys.version_info[:3] TITLE_UNDERLINES = ['', '=', '-', '.'] PREFIX = abspath(dirname(__file__)) PY3K = sys.version_info[0] == 3 def _get_tests_info(input_dir, msg_dir, prefix, suffix): """get python input examples and output messages We use following conventions for input files and messages: for different inputs: test for python >= x.y -> input = <name>_pyxy.py test for python < x.y -> input = <name>_py_xy.py for one input and different messages: message for python >= x.y -> message = <name>_pyxy.txt lower versions -> message with highest num """ result = [] for fname in glob(join(input_dir, prefix + '*' + suffix)): infile = basename(fname) fbase = splitext(infile)[0] # filter input files : pyrestr = fbase.rsplit('_py', 1)[-1] # like _26 or 26 if pyrestr.isdigit(): # '24', '25'... if SYS_VERS_STR < pyrestr: continue if pyrestr.startswith('_') and pyrestr[1:].isdigit(): # skip test for higher python versions if SYS_VERS_STR >= pyrestr[1:]: continue messages = glob(join(msg_dir, fbase + '*.txt')) # the last one will be without ext, i.e. for all or upper versions: if messages: for outfile in sorted(messages, reverse=True): py_rest = outfile.rsplit('_py', 1)[-1][:-4] if py_rest.isdigit() and SYS_VERS_STR >= py_rest: break else: # This will provide an error message indicating the missing filename. outfile = join(msg_dir, fbase + '.txt') result.append((infile, outfile)) return result class TestReporter(BaseReporter): """reporter storing plain text messages""" __implements__ = IReporter def __init__(self): # pylint: disable=super-init-not-called self.message_ids = {} self.reset() self.path_strip_prefix = getcwd() + sep def reset(self): self.out = StringIO() self.messages = [] def handle_message(self, msg): """manage message of different type and in the context of path """ obj = msg.obj line = msg.line msg_id = msg.msg_id msg = msg.msg self.message_ids[msg_id] = 1 if obj: obj = ':%s' % obj sigle = msg_id[0] if PY3K and linesep != '\n': # 2to3 writes os.linesep instead of using # the previosly used line separators msg = msg.replace('\r\n', '\n') self.messages.append('%s:%3s%s: %s' % (sigle, line, obj, msg)) def finalize(self): self.messages.sort() for msg in self.messages: print(msg, file=self.out) result = self.out.getvalue() self.reset() return result # pylint: disable=unused-argument def on_set_current_module(self, module, filepath): pass # pylint: enable=unused-argument def display_reports(self, layout): """ignore layouts""" _display = None class MinimalTestReporter(BaseReporter): def handle_message(self, msg): self.messages.append(msg) def on_set_current_module(self, module, filepath): self.messages = [] _display = None class Message(collections.namedtuple('Message', ['msg_id', 'line', 'node', 'args', 'confidence'])): def __new__(cls, msg_id, line=None, node=None, args=None, confidence=None): return tuple.__new__(cls, (msg_id, line, node, args, confidence)) def __eq__(self, other): if isinstance(other, Message): if self.confidence and other.confidence: return super(Message, self).__eq__(other) return self[:-1] == other[:-1] return NotImplemented # pragma: no cover __hash__ = None class UnittestLinter(object): """A fake linter class to capture checker messages.""" # pylint: disable=unused-argument, no-self-use def __init__(self): self._messages = [] self.stats = {} self.config = pylint.config.Configuration() def release_messages(self): try: return self._messages finally: self._messages = [] def add_message(self, msg_id, line=None, node=None, args=None, confidence=None, col_offset=None): # Do not test col_offset for now since changing Message breaks everything self._messages.append(Message(msg_id, line, node, args, confidence)) def is_message_enabled(self, *unused_args, **unused_kwargs): return True def add_stats(self, **kwargs): for name, value in six.iteritems(kwargs): self.stats[name] = value return self.stats @property def options_providers(self): return linter.options_providers def set_config(**kwargs): """Decorator for setting config values on a checker.""" def _wrapper(fun): @functools.wraps(fun) def _forward(self): for key, value in six.iteritems(kwargs): setattr(self.checker.config, key, value) if isinstance(self, CheckerTestCase): # reopen checker in case, it may be interested in configuration change self.checker.open() fun(self) return _forward return _wrapper class CheckerTestCase(object): """A base testcase class for unit testing individual checker classes.""" CHECKER_CLASS = None CONFIG = {} def setup_method(self): self.linter = UnittestLinter() registry = pylint.config.PluginRegistry(self.linter) registry.register_options = self.linter.config.add_options self.checker = self.CHECKER_CLASS(registry) # pylint: disable=not-callable for key, value in six.iteritems(self.CONFIG): setattr(self.checker.config, key, value) self.checker.open() @contextlib.contextmanager def assertNoMessages(self): """Assert that no messages are added by the given method.""" with self.assertAddsMessages(): yield @contextlib.contextmanager def assertAddsMessages(self, *messages): """Assert that exactly the given method adds the given messages. The list of messages must exactly match *all* the messages added by the method. Additionally, we check to see whether the args in each message can actually be substituted into the message string. """ yield got = self.linter.release_messages() msg = ('Expected messages did not match actual.\n' 'Expected:\n%s\nGot:\n%s' % ('\n'.join(repr(m) for m in messages), '\n'.join(repr(m) for m in got))) assert list(messages) == got, msg def walk(self, node): """recursive walk on the given node""" walker = PyLintASTWalker(linter) walker.add_checker(self.checker) walker.walk(node) # Init test_reporter = TestReporter() linter = PyLinter(pylint.config.Configuration()) linter.set_reporter(test_reporter) linter.config.persistent = 0 checkers.initialize(PluginRegistry(linter)) def _tokenize_str(code): return list(tokenize.generate_tokens(StringIO(code).readline)) @contextlib.contextmanager def _create_tempfile(content=None): """Create a new temporary file. If *content* parameter is given, then it will be written in the temporary file, before passing it back. This is a context manager and should be used with a *with* statement. """ # Can't use tempfile.NamedTemporaryFile here # because on Windows the file must be closed before writing to it, # see http://bugs.python.org/issue14243 file_handle, tmp = tempfile.mkstemp() if content: if sys.version_info >= (3, 0): # erff os.write(file_handle, bytes(content, 'ascii')) else: os.write(file_handle, content) try: yield tmp finally: os.close(file_handle) os.remove(tmp) @contextlib.contextmanager def _create_file_backed_module(code): """Create an astroid module for the given code, backed by a real file.""" with _create_tempfile() as temp: module = astroid.parse(code) module.file = temp yield module
1
9,902
Because linters don't handle reports now, this was breaking the setup for _all_ tests. I deleted it so I could run my tests, but I didn't check the impact on other tests as many tests are failing at the moment.
PyCQA-pylint
py
@@ -138,6 +138,12 @@ func main() { updateCommand, } app.Before = func(context *cli.Context) error { + + // do nothing if logrus was already initialized in init.go + if logrus.StandardLogger().Out != logrus.New().Out { + return nil + } + if context.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) }
1
package main import ( "fmt" "io" "os" "strings" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "github.com/urfave/cli" ) // version will be populated by the Makefile, read from // VERSION file of the source code. var version = "" // gitCommit will be the hash that the binary was built from // and will be populated by the Makefile var gitCommit = "" const ( specConfig = "config.json" usage = `Open Container Initiative runtime runc is a command line client for running applications packaged according to the Open Container Initiative (OCI) format and is a compliant implementation of the Open Container Initiative specification. runc integrates well with existing process supervisors to provide a production container runtime environment for applications. It can be used with your existing process monitoring tools and the container will be spawned as a direct child of the process supervisor. Containers are configured using bundles. A bundle for a container is a directory that includes a specification file named "` + specConfig + `" and a root filesystem. The root filesystem contains the contents of the container. To start a new instance of a container: # runc run [ -b bundle ] <container-id> Where "<container-id>" is your name for the instance of the container that you are starting. The name you provide for the container instance must be unique on your host. Providing the bundle directory using "-b" is optional. The default value for "bundle" is the current directory.` ) func main() { app := cli.NewApp() app.Name = "runc" app.Usage = usage var v []string if version != "" { v = append(v, version) } if gitCommit != "" { v = append(v, fmt.Sprintf("commit: %s", gitCommit)) } v = append(v, fmt.Sprintf("spec: %s", specs.Version)) app.Version = strings.Join(v, "\n") root := "/run/runc" rootless, err := isRootless(nil) if err != nil { fatal(err) } if rootless { runtimeDir := os.Getenv("XDG_RUNTIME_DIR") if runtimeDir != "" { root = runtimeDir + "/runc" // According to the XDG specification, we need to set anything in // XDG_RUNTIME_DIR to have a sticky bit if we don't want it to get // auto-pruned. if err := os.MkdirAll(root, 0700); err != nil { fatal(err) } if err := os.Chmod(root, 0700|os.ModeSticky); err != nil { fatal(err) } } } app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "debug", Usage: "enable debug output for logging", }, cli.StringFlag{ Name: "log", Value: "/dev/null", Usage: "set the log file path where internal debug information is written", }, cli.StringFlag{ Name: "log-format", Value: "text", Usage: "set the format used by logs ('text' (default), or 'json')", }, cli.StringFlag{ Name: "root", Value: root, Usage: "root directory for storage of container state (this should be located in tmpfs)", }, cli.StringFlag{ Name: "criu", Value: "criu", Usage: "path to the criu binary used for checkpoint and restore", }, cli.BoolFlag{ Name: "systemd-cgroup", Usage: "enable systemd cgroup support, expects cgroupsPath to be of form \"slice:prefix:name\" for e.g. \"system.slice:runc:434234\"", }, cli.StringFlag{ Name: "rootless", Value: "auto", Usage: "enable rootless mode ('true', 'false', or 'auto')", }, } app.Commands = []cli.Command{ checkpointCommand, createCommand, deleteCommand, eventsCommand, execCommand, initCommand, killCommand, listCommand, pauseCommand, psCommand, restoreCommand, resumeCommand, runCommand, specCommand, startCommand, stateCommand, updateCommand, } app.Before = func(context *cli.Context) error { if context.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) } if path := context.GlobalString("log"); path != "" { f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND|os.O_SYNC, 0666) if err != nil { return err } logrus.SetOutput(f) } switch context.GlobalString("log-format") { case "text": // retain logrus's default. case "json": logrus.SetFormatter(new(logrus.JSONFormatter)) default: return fmt.Errorf("unknown log-format %q", context.GlobalString("log-format")) } return nil } // If the command returns an error, cli takes upon itself to print // the error on cli.ErrWriter and exit. // Use our own writer here to ensure the log gets sent to the right location. cli.ErrWriter = &FatalWriter{cli.ErrWriter} if err := app.Run(os.Args); err != nil { fatal(err) } } type FatalWriter struct { cliErrWriter io.Writer } func (f *FatalWriter) Write(p []byte) (n int, err error) { logrus.Error(string(p)) return f.cliErrWriter.Write(p) }
1
16,700
I'm not really a fan of this -- why not set up logging for `init` here (or setting a global flag) rather than doing it this way?
opencontainers-runc
go
@@ -1,4 +1,14 @@ -/* global axe */ +/** + * Namespace `axe.imports` which holds required external dependencies + * + * @namespace imports + * @memberof axe + */ +export { default as axios } from 'axios'; +export { CssSelectorParser } from 'css-selector-parser'; +export { default as doT } from '@deque/dot'; +export { default as emojiRegexText } from 'emoji-regex'; +export { default as memoizee } from 'memoizee'; /** * Note:
1
/* global axe */ /** * Note: * This file is run via browserify to pull in the required dependencies. * See - './build/imports-generator' */ /** * Polyfill `Promise` * Reference: https://www.npmjs.com/package/es6-promise */ if (!('Promise' in window)) { require('es6-promise').polyfill(); } /** * Polyfill required TypedArray and functions * Reference https://github.com/zloirock/core-js/ */ if (!('Uint32Array' in window)) { require('core-js/features/typed-array/uint32-array'); } if (window.Uint32Array) { if (!('some' in window.Uint32Array.prototype)) { require('core-js/features/typed-array/some'); } if (!('reduce' in window.Uint32Array.prototype)) { require('core-js/features/typed-array/reduce'); } } /** * Polyfill `WeakMap` * Reference: https://github.com/polygonplanet/weakmap-polyfill */ require('weakmap-polyfill'); /** * Namespace `axe.imports` which holds required external dependencies * * @namespace imports * @memberof axe */ axe.imports = { axios: require('axios'), CssSelectorParser: require('css-selector-parser').CssSelectorParser, doT: require('@deque/dot'), emojiRegexText: require('emoji-regex') };
1
15,088
This is my favorite part of this PR. Being able to `import` 3rd party tools will hugely improve our workflows. :heart:
dequelabs-axe-core
js
@@ -0,0 +1,19 @@ +/** + * BSD-style license; for more info see http://pmd.sourceforge.net/license.html + */ + +package net.sourceforge.pmd.lang.java.rule.multithreading; + +import java.security.MessageDigest; + +/** + * Using a MessageDigest which is static can cause + * unexpected results when used in a multi-threaded environment. This rule will + * find static MessageDigest which are used in an unsynchronized manner. + */ +public class UnsynchronizedStaticMessageDigestRule extends UnsynchronizedStaticFormatterRule { + + public UnsynchronizedStaticMessageDigestRule() { + super(MessageDigest.class); + } +}
1
1
16,311
having this extend `UnsynchronizedStaticFormatterRule` seems semantically incorrect even if it works we should probably refactor `UnsynchronizedStaticFormatterRule` into a `UnsynchronizedStaticAccessRule` which can be configured through properties to track unsynchronized static access to any given types (with proper defaults to include `Formatter` and `MessageDigest`). This will also scale better as new types are suggested to have a thread-safe access enforced. The `UnsynchronizedStaticFormatterRule` can be marked as deprecated at the ruleset level, point to the new implementation, overriding the property defaults to keep just looking for `Formatter`. Moreover, the `THREAD_SAFE_FORMATTER` static should probably be replaced with a property of thread-safe classes to ignore, once again with proper defaults.
pmd-pmd
java
@@ -126,6 +126,8 @@ namespace Nethermind.Synchronization.ParallelSync } public long FindBestHeader() => _blockTree.BestSuggestedHeader?.Number ?? 0; + + public Keccak FindBestHeaderHash() => _blockTree.BestSuggestedHeader?.Hash ?? Keccak.Zero; public long FindBestFullBlock() => Math.Min(FindBestHeader(), _blockTree.BestSuggestedBody?.Number ?? 0); // avoiding any potential concurrency issue
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using Nethermind.Blockchain; using Nethermind.Blockchain.Receipts; using Nethermind.Blockchain.Synchronization; using Nethermind.Core; using Nethermind.Core.Crypto; using Nethermind.Db; using Nethermind.Dirichlet.Numerics; using Nethermind.Logging; namespace Nethermind.Synchronization.ParallelSync { public class SyncProgressResolver : ISyncProgressResolver { // TODO: we can search 1024 back and confirm 128 deep header and start using it as Max(0, confirmed) // then we will never have to look 128 back again // note that we will be doing that every second or so private const int _maxLookupBack = 128; private readonly IBlockTree _blockTree; private readonly IReceiptStorage _receiptStorage; private readonly IDb _stateDb; private readonly IDb _beamStateDb; private readonly ISyncConfig _syncConfig; private ILogger _logger; public SyncProgressResolver(IBlockTree blockTree, IReceiptStorage receiptStorage, IDb stateDb, IDb beamStateDb, ISyncConfig syncConfig, ILogManager logManager) { _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); _blockTree = blockTree ?? throw new ArgumentNullException(nameof(blockTree)); _receiptStorage = receiptStorage ?? throw new ArgumentNullException(nameof(receiptStorage)); _stateDb = stateDb ?? throw new ArgumentNullException(nameof(stateDb)); _beamStateDb = beamStateDb ?? throw new ArgumentNullException(nameof(beamStateDb)); _syncConfig = syncConfig ?? throw new ArgumentNullException(nameof(syncConfig)); } private bool IsFullySynced(Keccak stateRoot) { if (stateRoot == Keccak.EmptyTreeHash) { return true; } return _stateDb.Innermost.Get(stateRoot) != null; } private bool IsBeamSynced(Keccak stateRoot) { if (stateRoot == Keccak.EmptyTreeHash) { return true; } return _beamStateDb.Innermost.Get(stateRoot) != null; } public long FindBestFullState() { // so the full state can be in a few places but there are some best guesses // if we are state syncing then the full state may be one of the recent blocks (maybe one of the last 128 blocks) // if we full syncing then the state should be at head // if we are beam syncing then the state should be in a different DB and should not cause much trouble here // it also may seem tricky if best suggested is part of a reorg while we are already full syncing so // ideally we would like to check it siblings too (but this may be a bit expensive and less likely // to be important // we want to avoid a scenario where state is not found even as it is just near head or best suggested Block head = _blockTree.Head; BlockHeader initialBestSuggested = _blockTree.BestSuggestedHeader; // just storing here for debugging sake BlockHeader bestSuggested = initialBestSuggested; long bestFullState = 0; if (head != null) { // head search should be very inexpensive as we generally expect the state to be there bestFullState = SearchForFullState(head.Header); } if (bestSuggested != null) { if (bestFullState < bestSuggested?.Number) { bestFullState = Math.Max(bestFullState, SearchForFullState(bestSuggested)); } } return bestFullState; } private long SearchForFullState(BlockHeader startHeader) { long bestFullState = 0; for (int i = 0; i < _maxLookupBack; i++) { if (startHeader == null) { break; } if (IsFullySynced(startHeader.StateRoot)) { bestFullState = startHeader.Number; break; } startHeader = _blockTree.FindHeader(startHeader.ParentHash, BlockTreeLookupOptions.TotalDifficultyNotNeeded); } return bestFullState; } public long FindBestHeader() => _blockTree.BestSuggestedHeader?.Number ?? 0; public long FindBestFullBlock() => Math.Min(FindBestHeader(), _blockTree.BestSuggestedBody?.Number ?? 0); // avoiding any potential concurrency issue public bool IsLoadingBlocksFromDb() { return !_blockTree.CanAcceptNewBlocks; } public long FindBestProcessedBlock() => _blockTree.Head?.Number ?? -1; public UInt256 ChainDifficulty => _blockTree.BestSuggestedBody?.TotalDifficulty ?? UInt256.Zero; public bool IsFastBlocksHeadersFinished() => !IsFastBlocks() || (_blockTree.LowestInsertedHeader?.Number ?? long.MaxValue) <= 1; public bool IsFastBlocksBodiesFinished() => !IsFastBlocks() || (!_syncConfig.DownloadBodiesInFastSync || (_blockTree.LowestInsertedBody?.Number ?? long.MaxValue) <= 1); public bool IsFastBlocksReceiptsFinished() => !IsFastBlocks() || (!_syncConfig.DownloadReceiptsInFastSync || (_receiptStorage.LowestInsertedReceiptBlock ?? long.MaxValue) <= 1); private bool IsFastBlocks() { bool isFastBlocks = _syncConfig.FastBlocks; // if pivot number is 0 then it is equivalent to fast blocks disabled if (!isFastBlocks || _syncConfig.PivotNumberParsed == 0L) { return false; } bool immediateBeamSync = !_syncConfig.DownloadHeadersInFastSync; bool anyHeaderDownloaded = _blockTree.LowestInsertedHeader != null; if (immediateBeamSync && anyHeaderDownloaded) { return false; } return true; } } }
1
24,152
Keccak.Zero should not be used to mean null
NethermindEth-nethermind
.cs
@@ -40,5 +40,12 @@ namespace OpenTelemetry.Metrics.Tests new object[] { "my_metric2" }, new object[] { new string('m', 63) }, }; + + public static IEnumerable<object[]> InvalidHistogramBounds + => new List<object[]> + { + new object[] { new double[] { 0, 0 } }, + new object[] { new double[] { 1, 0 } }, + }; } }
1
// <copyright file="MetricsTestData.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System.Collections.Generic; namespace OpenTelemetry.Metrics.Tests { public class MetricsTestData { public static IEnumerable<object[]> InvalidInstrumentNames => new List<object[]> { new object[] { " " }, new object[] { "-first-char-not-alphabetic" }, new object[] { "1first-char-not-alphabetic" }, new object[] { "invalid+separator" }, new object[] { new string('m', 64) }, }; public static IEnumerable<object[]> ValidInstrumentNames => new List<object[]> { new object[] { "m" }, new object[] { "first-char-alphabetic" }, new object[] { "my-2-instrument" }, new object[] { "my.metric" }, new object[] { "my_metric2" }, new object[] { new string('m', 63) }, }; } }
1
22,389
perhaps add a couple more edge cases.
open-telemetry-opentelemetry-dotnet
.cs
@@ -4,11 +4,15 @@ package Example import "strconv" +/// Composite components of Monster color. type Color byte const ( ColorRed Color = 1 + /// \brief color Green + /// Green is bit_flag with value (1u << 1) ColorGreen Color = 2 + /// \brief color Blue (1u << 3) ColorBlue Color = 8 )
1
// Code generated by the FlatBuffers compiler. DO NOT EDIT. package Example import "strconv" type Color byte const ( ColorRed Color = 1 ColorGreen Color = 2 ColorBlue Color = 8 ) var EnumNamesColor = map[Color]string{ ColorRed: "Red", ColorGreen: "Green", ColorBlue: "Blue", } var EnumValuesColor = map[string]Color{ "Red": ColorRed, "Green": ColorGreen, "Blue": ColorBlue, } func (v Color) String() string { if s, ok := EnumNamesColor[v]; ok { return s } return "Color(" + strconv.FormatInt(int64(v), 10) + ")" }
1
15,961
Is this blank line needed, or typo?
google-flatbuffers
java
@@ -122,12 +122,6 @@ func (p *blockPrefetcher) run() { func (p *blockPrefetcher) request(priority int, kmd KeyMetadata, ptr BlockPointer, block Block, entryName string, doneCh, errCh chan<- struct{}) error { - if _, err := p.config.BlockCache().Get(ptr); err == nil { - return nil - } - if err := checkDataVersion(p.config, path{}, ptr); err != nil { - return err - } select { case p.progressCh <- prefetchRequest{ priority, kmd, ptr, block, doneCh, errCh}:
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "io" "sort" "sync" "time" "github.com/keybase/client/go/logger" "github.com/keybase/kbfs/tlf" "github.com/pkg/errors" "golang.org/x/net/context" ) const ( fileIndirectBlockPrefetchPriority int = -100 dirEntryPrefetchPriority int = -200 updatePointerPrefetchPriority int = 0 defaultPrefetchPriority int = -1024 prefetchTimeout time.Duration = 15 * time.Minute ) type prefetcherConfig interface { syncedTlfGetterSetter dataVersioner logMaker blockCacher } type prefetchRequest struct { priority int kmd KeyMetadata ptr BlockPointer block Block doneCh chan<- struct{} errCh chan<- struct{} } type blockPrefetcher struct { config prefetcherConfig log logger.Logger // blockRetriever to retrieve blocks from the server retriever BlockRetriever // channel to synchronize prefetch requests with the prefetcher shutdown progressCh chan prefetchRequest // channel that is idempotently closed when a shutdown occurs shutdownCh chan struct{} // channel that is closed when a shutdown completes and all pending // prefetch requests are complete doneCh chan struct{} } var _ Prefetcher = (*blockPrefetcher)(nil) func newBlockPrefetcher(retriever BlockRetriever, config prefetcherConfig) *blockPrefetcher { p := &blockPrefetcher{ config: config, retriever: retriever, progressCh: make(chan prefetchRequest), shutdownCh: make(chan struct{}), doneCh: make(chan struct{}), } if config != nil { p.log = config.MakeLogger("PRE") } else { p.log = logger.NewNull() } if retriever == nil { // If we pass in a nil retriever, this prefetcher shouldn't do // anything. Treat it as already shut down. p.Shutdown() close(p.doneCh) } else { go p.run() } return p } func (p *blockPrefetcher) run() { var wg sync.WaitGroup defer func() { wg.Wait() close(p.doneCh) }() for { select { case req := <-p.progressCh: ctx, cancel := context.WithTimeout(context.Background(), prefetchTimeout) errCh := p.retriever.RequestWithPrefetch(ctx, req.priority, req.kmd, req.ptr, req.block, TransientEntry, req.doneCh, req.errCh) wg.Add(1) go func() { defer wg.Done() defer cancel() select { case err := <-errCh: if err != nil { p.log.CDebugf(ctx, "Done prefetch for block %s. "+ "Error: %+v", req.ptr.ID, err) } case <-p.shutdownCh: // Cancel but still wait for the request to finish, so that // p.doneCh accurately represents whether we still have // requests pending. cancel() <-errCh } }() case <-p.shutdownCh: return } } } func (p *blockPrefetcher) request(priority int, kmd KeyMetadata, ptr BlockPointer, block Block, entryName string, doneCh, errCh chan<- struct{}) error { if _, err := p.config.BlockCache().Get(ptr); err == nil { return nil } if err := checkDataVersion(p.config, path{}, ptr); err != nil { return err } select { case p.progressCh <- prefetchRequest{ priority, kmd, ptr, block, doneCh, errCh}: return nil case <-p.shutdownCh: return errors.Wrapf(io.EOF, "Skipping prefetch for block %v since "+ "the prefetcher is shutdown", ptr.ID) } } // calculatePriority returns either a base priority for an unsynced TLF or a // high priority for a synced TLF. func (p *blockPrefetcher) calculatePriority(basePriority int, tlfID tlf.ID) int { if p.config.IsSyncedTlf(tlfID) { return defaultOnDemandRequestPriority - 1 } return basePriority } func (p *blockPrefetcher) prefetchIndirectFileBlock(b *FileBlock, kmd KeyMetadata) (<-chan struct{}, <-chan struct{}, int) { // Prefetch indirect block pointers. p.log.CDebugf(context.TODO(), "Prefetching pointers for indirect file "+ "block. Num pointers to prefetch: %d", len(b.IPtrs)) startingPriority := p.calculatePriority(fileIndirectBlockPrefetchPriority, kmd.TlfID()) numBlocks := len(b.IPtrs) doneCh := make(chan struct{}, numBlocks) errCh := make(chan struct{}, numBlocks) for i, ptr := range b.IPtrs { _ = p.request(startingPriority-i, kmd, ptr.BlockPointer, b.NewEmpty(), "", doneCh, errCh) } return doneCh, errCh, numBlocks } func (p *blockPrefetcher) prefetchIndirectDirBlock(b *DirBlock, kmd KeyMetadata) (<-chan struct{}, <-chan struct{}, int) { // Prefetch indirect block pointers. p.log.CDebugf(context.TODO(), "Prefetching pointers for indirect dir "+ "block. Num pointers to prefetch: %d", len(b.IPtrs)) startingPriority := p.calculatePriority(fileIndirectBlockPrefetchPriority, kmd.TlfID()) numBlocks := len(b.IPtrs) doneCh := make(chan struct{}, numBlocks) errCh := make(chan struct{}, numBlocks) for i, ptr := range b.IPtrs { _ = p.request(startingPriority-i, kmd, ptr.BlockPointer, b.NewEmpty(), "", doneCh, errCh) } return doneCh, errCh, numBlocks } func (p *blockPrefetcher) prefetchDirectDirBlock(ptr BlockPointer, b *DirBlock, kmd KeyMetadata) (<-chan struct{}, <-chan struct{}, int) { p.log.CDebugf(context.TODO(), "Prefetching entries for directory block "+ "ID %s. Num entries: %d", ptr.ID, len(b.Children)) // Prefetch all DirEntry root blocks. dirEntries := dirEntriesBySizeAsc{dirEntryMapToDirEntries(b.Children)} sort.Sort(dirEntries) startingPriority := p.calculatePriority(dirEntryPrefetchPriority, kmd.TlfID()) numBlocks := 0 doneCh := make(chan struct{}, len(dirEntries.dirEntries)) errCh := make(chan struct{}, len(dirEntries.dirEntries)) for i, entry := range dirEntries.dirEntries { // Prioritize small files priority := startingPriority - i var block Block switch entry.Type { case Dir: block = &DirBlock{} case File: block = &FileBlock{} case Exec: block = &FileBlock{} default: p.log.CDebugf(context.TODO(), "Skipping prefetch for entry of "+ "unknown type %d", entry.Type) continue } _ = p.request(priority, kmd, entry.BlockPointer, block, entry.entryName, doneCh, errCh) numBlocks++ } return doneCh, errCh, numBlocks } // PrefetchBlock implements the Prefetcher interface for blockPrefetcher. func (p *blockPrefetcher) PrefetchBlock(block Block, ptr BlockPointer, kmd KeyMetadata, priority int) (<-chan struct{}, <-chan struct{}, error) { // TODO: Remove this log line. p.log.CDebugf(context.TODO(), "Prefetching block by request from "+ "upstream component. Priority: %d", priority) doneCh := make(chan struct{}, 1) errCh := make(chan struct{}, 1) return doneCh, errCh, p.request(priority, kmd, ptr, block, "", doneCh, errCh) } // PrefetchAfterBlockRetrieved implements the Prefetcher interface for // blockPrefetcher. Returns a channel that is closed once all the prefetches // complete. func (p *blockPrefetcher) PrefetchAfterBlockRetrieved(b Block, ptr BlockPointer, kmd KeyMetadata) (doneCh, errCh <-chan struct{}, numBlocks int) { switch b := b.(type) { case *FileBlock: if b.IsInd { doneCh, errCh, numBlocks = p.prefetchIndirectFileBlock(b, kmd) } case *DirBlock: if b.IsInd { doneCh, errCh, numBlocks = p.prefetchIndirectDirBlock(b, kmd) } else { doneCh, errCh, numBlocks = p.prefetchDirectDirBlock(ptr, b, kmd) } default: // Skipping prefetch for block of unknown type (likely CommonBlock) } return doneCh, errCh, numBlocks } // Shutdown implements the Prefetcher interface for blockPrefetcher. func (p *blockPrefetcher) Shutdown() <-chan struct{} { select { case <-p.shutdownCh: default: close(p.shutdownCh) } return p.doneCh } // ShutdownCh implements the Prefetcher interface for blockPrefetcher. func (p *blockPrefetcher) ShutdownCh() <-chan struct{} { return p.shutdownCh }
1
17,689
This was a major PitA to find: it was causing some huge goroutine leaks and unfinished prefetches.
keybase-kbfs
go
@@ -84,10 +84,8 @@ module Mongoid #:nodoc # # @since 2.1.8 def prohibited_methods - @prohibited_methods ||= MODULES.inject([]) do |methods, mod| - methods.tap do |mets| - mets << mod.instance_methods.map{ |m| m.to_sym } - end + @prohibited_methods ||= MODULES.map do |mod| + mod.instance_methods.map{ |m| m.to_sym } end.flatten end end
1
# encoding: utf-8 module Mongoid #:nodoc module Components #:nodoc extend ActiveSupport::Concern # All modules that a +Document+ is composed of are defined in this # module, to keep the document class from getting too cluttered. included do extend ActiveModel::Translation extend Mongoid::Finders class_attribute :paranoid end include ActiveModel::Conversion include ActiveModel::MassAssignmentSecurity include ActiveModel::Naming include ActiveModel::Observing include ActiveModel::Serializers::JSON include ActiveModel::Serializers::Xml include Mongoid::Atomic include Mongoid::Dirty include Mongoid::Attributes include Mongoid::Collections include Mongoid::Copyable include Mongoid::Extras include Mongoid::Fields include Mongoid::Hierarchy include Mongoid::Indexes include Mongoid::Inspection include Mongoid::JSON include Mongoid::Matchers include Mongoid::NestedAttributes include Mongoid::Persistence include Mongoid::Relations include Mongoid::Reloading include Mongoid::Safety include Mongoid::Scoping include Mongoid::Serialization include Mongoid::Sharding include Mongoid::State include Mongoid::Threaded::Lifecycle include Mongoid::Timestamps::Timeless include Mongoid::Validations include Mongoid::Callbacks include Mongoid::MultiDatabase MODULES = [ Mongoid::Atomic, Mongoid::Attributes, Mongoid::Collections, Mongoid::Copyable, Mongoid::Dirty, Mongoid::Extras, Mongoid::Fields, Mongoid::Hierarchy, Mongoid::Indexes, Mongoid::Inspection, Mongoid::JSON, Mongoid::Matchers, Mongoid::NestedAttributes, Mongoid::Persistence, Mongoid::Relations, Mongoid::Relations::Proxy, Mongoid::Safety, Mongoid::Scoping, Mongoid::Serialization, Mongoid::Sharding, Mongoid::State, Mongoid::Validations, Mongoid::Callbacks, Mongoid::MultiDatabase, ] class << self # Get a list of methods that would be a bad idea to define as field names # or override when including Mongoid::Document. # # @example Bad thing! # Mongoid::Components.prohibited_methods # # @return [ Array<Symbol> ] # # @since 2.1.8 def prohibited_methods @prohibited_methods ||= MODULES.inject([]) do |methods, mod| methods.tap do |mets| mets << mod.instance_methods.map{ |m| m.to_sym } end end.flatten end end end end
1
9,488
you can use Enum#flat_map here.
mongodb-mongoid
rb
@@ -155,6 +155,7 @@ public class AbstractVmNode extends AbstractNode implements VmNode { */ @Deprecated public void dump(final String prefix, final boolean recurse, final Writer writer) { + @SuppressWarnings("PMD.CloseResource") final PrintWriter printWriter = writer instanceof PrintWriter ? (PrintWriter) writer : new PrintWriter(writer); printWriter.println(toString(prefix)); if (children != null && recurse) {
1
package net.sourceforge.pmd.lang.vm.ast; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import java.io.PrintWriter; import java.io.Writer; import org.apache.commons.lang3.text.StrBuilder; import net.sourceforge.pmd.lang.ast.AbstractNode; /** * */ public class AbstractVmNode extends AbstractNode implements VmNode { /** */ // TODO - It seems that this field is only valid when parsing, and should // not be kept around. protected VmParser parser; /** */ protected int info; // added /** */ public boolean state; /** */ protected boolean invalid = false; /** */ protected Token first; /** */ protected Token last; protected String templateName; /** * @param i */ public AbstractVmNode(final int i) { super(i); } /** * @param p * @param i */ public AbstractVmNode(final VmParser p, final int i) { this(i); parser = p; templateName = parser.currentTemplateName; } @Override public void jjtOpen() { first = parser.getToken(1); // added if (beginLine == -1 && parser.token.next != null) { beginLine = parser.token.next.beginLine; beginColumn = parser.token.next.beginColumn; } } @Override public void jjtClose() { last = parser.getToken(0); // added if (beginLine == -1 && (children == null || children.length == 0)) { beginColumn = parser.token.beginColumn; } if (beginLine == -1) { beginLine = parser.token.beginLine; } endLine = parser.token.endLine; endColumn = parser.token.endColumn; } /** * @param t */ public void setFirstToken(final Token t) { this.first = t; } public Token getFirstToken() { return first; } public Token getLastToken() { return last; } @Override public Object jjtAccept(final VmParserVisitor visitor, final Object data) { return visitor.visit(this, data); } @Override public Object childrenAccept(final VmParserVisitor visitor, final Object data) { if (children != null) { for (int i = 0; i < children.length; ++i) { ((VmNode) children[i]).jjtAccept(visitor, data); } } return data; } @Override public String getXPathNodeName() { return VmParserTreeConstants.jjtNodeName[id]; } /* * You can override these two methods in subclasses of SimpleNode to * customize the way the node appears when the tree is dumped. If your * output uses more than one line you should override toString(String), * otherwise overriding toString() is probably all you need to do. */ /** * @param prefix * @return String representation of this node. */ public String toString(final String prefix) { return prefix + toString(); } /** * Override this method if you want to customize how the node dumps out its * children. * * @param prefix * @deprecated This method will be removed with PMD 7. The rule designer is a better way to inspect nodes. */ @Deprecated public void dump(final String prefix, final boolean recurse, final Writer writer) { final PrintWriter printWriter = writer instanceof PrintWriter ? (PrintWriter) writer : new PrintWriter(writer); printWriter.println(toString(prefix)); if (children != null && recurse) { for (int i = 0; i < children.length; ++i) { final AbstractVmNode n = (AbstractVmNode) children[i]; if (n != null) { n.dump(prefix + " ", recurse, printWriter); } } } } // All additional methods /* * see org.apache.velocity.runtime.parser.node.Node#literal() */ public String literal() { // if we have only one string, just return it and avoid // buffer allocation. VELOCITY-606 if (first != null && first.equals(last)) { return NodeUtils.tokenLiteral(first); } Token t = first; final StrBuilder sb = new StrBuilder(NodeUtils.tokenLiteral(t)); while (t != null && !t.equals(last)) { t = t.next; sb.append(NodeUtils.tokenLiteral(t)); } return sb.toString(); } /* * see org.apache.velocity.runtime.parser.node.Node#getType() */ public int getType() { return id; } /* * see org.apache.velocity.runtime.parser.node.Node#setInfo(int) */ public void setInfo(final int info) { this.info = info; } /* * see org.apache.velocity.runtime.parser.node.Node#getInfo() */ public int getInfo() { return info; } /* * see org.apache.velocity.runtime.parser.node.Node#setInvalid() */ public void setInvalid() { invalid = true; } /* * see org.apache.velocity.runtime.parser.node.Node#isInvalid() */ public boolean isInvalid() { return invalid; } /* * see org.apache.velocity.runtime.parser.node.Node#getLine() */ public int getLine() { return first.beginLine; } /* * see org.apache.velocity.runtime.parser.node.Node#getColumn() */ public int getColumn() { return first.beginColumn; } public String getTemplateName() { return templateName; } }
1
16,250
Same potential FP: The stream is provided from outside (here as a method parameter), so we should not be responsible here to close it, should we?
pmd-pmd
java
@@ -26,11 +26,12 @@ Upcase::Application.configure do config.log_level = :info config.log_formatter = ::Logger::Formatter.new - config.middleware.use \ - Rack::SslEnforcer, - hsts: false, - strict: true, - redirect_to: "https://#{ENV["APP_DOMAIN"]}" + config.force_ssl = true + config.middleware.insert_before( + ActionDispatch::SSL, + Rack::CanonicalHost, + ENV.fetch("APP_DOMAIN"), + ) config.action_mailer.delivery_method = :smtp config.action_mailer.smtp_settings = MAIL_SETTINGS
1
require Rails.root.join('config/initializers/mail') Upcase::Application.configure do config.cache_classes = true config.consider_all_requests_local = false config.action_controller.perform_caching = true config.action_controller.asset_host = ENV.fetch("ASSET_HOST") config.assets.compile = false config.assets.digest = true config.assets.js_compressor = :uglifier # Serve static assets, which allows us to populate the CDN with compressed # assets if a client supports them config.serve_static_files = true # Fiddling with expires values is kind of pointless as we use hashing to bust # caches during redeploys, but it should bump up our google pagespeed # ranking. config.static_cache_control = 'public, max-age=31536000' config.eager_load = true config.i18n.fallbacks = true config.active_support.deprecation = :notify config.log_level = :info config.log_formatter = ::Logger::Formatter.new config.middleware.use \ Rack::SslEnforcer, hsts: false, strict: true, redirect_to: "https://#{ENV["APP_DOMAIN"]}" config.action_mailer.delivery_method = :smtp config.action_mailer.smtp_settings = MAIL_SETTINGS config.action_mailer.perform_deliveries = true config.action_mailer.default(charset: "utf-8") config.action_mailer.raise_delivery_errors = true PAPERCLIP_STORAGE_OPTIONS = { storage: :s3, s3_credentials: "#{Rails.root}/config/s3.yml", s3_protocol: 'https' } GITHUB_KEY = ENV['GITHUB_KEY'] GITHUB_SECRET = ENV['GITHUB_SECRET'] config.font_assets.origin = "https://#{ENV["APP_DOMAIN"]}" end
1
15,847
Put a comma after the last parameter of a multiline method call.
thoughtbot-upcase
rb
@@ -67,10 +67,8 @@ public class VerbsManager { output -> { final Value verbClass = (Value) output; - final Verb verb = verbClass.newInstance().as(Verb.class); - try { - verb.install(container); + verbClass.invokeMember("install", container); } catch (ScriptException se) { errorCallback.accept(se); }
1
/* * Copyright (C) 2015-2017 PÂRIS Quentin * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package org.phoenicis.engines; import org.graalvm.polyglot.Value; import org.phoenicis.repository.dto.ApplicationDTO; import org.phoenicis.repository.dto.CategoryDTO; import org.phoenicis.repository.dto.RepositoryDTO; import org.phoenicis.repository.dto.TypeDTO; import org.phoenicis.scripts.exceptions.ScriptException; import org.phoenicis.scripts.interpreter.ScriptInterpreter; import org.phoenicis.scripts.session.InteractiveScriptSession; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Consumer; /** * manages the Verbs */ public class VerbsManager { private final ScriptInterpreter scriptInterpreter; /** * Constructor * * @param scriptInterpreter The underlying script interpreter */ public VerbsManager(ScriptInterpreter scriptInterpreter) { this.scriptInterpreter = scriptInterpreter; } /** * Installs a Verb in a given container * * @param engineId ID of the engine which provides the Verb (e.g. "Wine") * @param container name of the container * @param verbId ID of the Verb * @param doneCallback callback executed after the script ran * @param errorCallback callback executed in case of an error */ public void installVerb(String engineId, String container, String verbId, Runnable doneCallback, Consumer<Exception> errorCallback) { final InteractiveScriptSession interactiveScriptSession = scriptInterpreter.createInteractiveSession(); final String script = String.format("include(\"%s\");", verbId); interactiveScriptSession.eval(script, output -> { final Value verbClass = (Value) output; final Verb verb = verbClass.newInstance().as(Verb.class); try { verb.install(container); } catch (ScriptException se) { errorCallback.accept(se); } doneCallback.run(); }, errorCallback); } /** * Installs a list of Verbs in a given container * * @param engineId ID of the engine which provides the Verbs (e.g. "Wine") * @param container name of the container * @param verbIds A list of verb ids * @param doneCallback callback executed after the scripts ran * @param errorCallback callback executed in case of an error */ public void installVerbs(String engineId, String container, List<String> verbIds, Runnable doneCallback, Consumer<Exception> errorCallback) { if (verbIds.isEmpty()) { doneCallback.run(); } else { final String verbId = verbIds.get(0); final List<String> remainingVerbIds = verbIds.subList(1, verbIds.size()); installVerb(engineId, container, verbId, // recursively install the remaining verbs in the list () -> installVerbs(engineId, container, remainingVerbIds, doneCallback, errorCallback), errorCallback); } } /** * Fetches the available Verbs * * @param repositoryDTO * @param callback */ public void fetchAvailableVerbs(RepositoryDTO repositoryDTO, Consumer<Map<String, ApplicationDTO>> callback) { Map<String, ApplicationDTO> verbs = new HashMap<>(); // get engine CategoryDTOs List<CategoryDTO> categoryDTOS = new ArrayList<>(); for (TypeDTO typeDTO : repositoryDTO.getTypes()) { if (typeDTO.getId().equals("engines")) { categoryDTOS = typeDTO.getCategories(); } } for (CategoryDTO engine : categoryDTOS) { for (ApplicationDTO applicationDTO : engine.getApplications()) { if (applicationDTO.getId().equals(engine.getId() + ".verbs")) { verbs.put(engine.getId().replaceAll("^.*\\.", ""), applicationDTO); } } } callback.accept(verbs); } }
1
14,123
It seems like this is the only way to access javascript `static` methods from Java.
PhoenicisOrg-phoenicis
java
@@ -4,8 +4,6 @@ * A temporary workaround to ensure the same version of React * is always used across multiple entrypoints. * - * @private - * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License");
1
/** * WordPress Element shim. * * A temporary workaround to ensure the same version of React * is always used across multiple entrypoints. * * @private * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import * as element from '@wordpress/element__non-shim'; if ( global.googlesitekit === undefined ) { global.googlesitekit = {}; } const { __experimentalCreateInterpolateElement, Children, cloneElement, Component, concatChildren, createContext, createElement, createPortal, createRef, findDOMNode, forwardRef, Fragment, isEmptyElement, isValidElement, lazy, memo, Platform, RawHTML, render, renderToString, StrictMode, Suspense, switchChildrenNodeName, unmountComponentAtNode, useCallback, useContext, useDebugValue, useEffect, useImperativeHandle, useLayoutEffect, useMemo, useReducer, useRef, useState, } = global.googlesitekit._element || element; export { __experimentalCreateInterpolateElement, Children, cloneElement, Component, concatChildren, createContext, createElement, createPortal, createRef, findDOMNode, forwardRef, Fragment, isEmptyElement, isValidElement, lazy, memo, Platform, RawHTML, render, renderToString, StrictMode, Suspense, switchChildrenNodeName, unmountComponentAtNode, useCallback, useContext, useDebugValue, useEffect, useImperativeHandle, useLayoutEffect, useMemo, useReducer, useRef, useState, }; if ( global.googlesitekit._element === undefined ) { global.googlesitekit._element = { __experimentalCreateInterpolateElement, Children, cloneElement, Component, concatChildren, createContext, createElement, createPortal, createRef, findDOMNode, forwardRef, Fragment, isEmptyElement, isValidElement, lazy, memo, Platform, RawHTML, render, renderToString, StrictMode, Suspense, switchChildrenNodeName, unmountComponentAtNode, useCallback, useContext, useDebugValue, useEffect, useImperativeHandle, useLayoutEffect, useMemo, useReducer, useRef, useState, }; }
1
28,345
Why was this removed here?
google-site-kit-wp
js
@@ -4,12 +4,6 @@ package pslice -import "github.com/ethersphere/bee/pkg/swarm" - -func PSlicePeers(p *PSlice) []swarm.Address { - return p.peers -} - func PSliceBins(p *PSlice) []uint { return p.bins }
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pslice import "github.com/ethersphere/bee/pkg/swarm" func PSlicePeers(p *PSlice) []swarm.Address { return p.peers } func PSliceBins(p *PSlice) []uint { return p.bins }
1
9,747
i am aware that you did not add these, but might i ask why we need these one-liner funcs instead of just exporting the struct fields in the first place?
ethersphere-bee
go
@@ -296,7 +296,7 @@ abstract class BaseTableScan implements TableScan { } requiredFieldIds.addAll(selectedIds); - return TypeUtil.select(schema, requiredFieldIds); + return TypeUtil.project(schema, requiredFieldIds); } else if (context.projectedSchema() != null) { return context.projectedSchema();
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Set; import org.apache.iceberg.events.Listeners; import org.apache.iceberg.events.ScanEvent; import org.apache.iceberg.expressions.Binder; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.util.TableScanUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Base class for {@link TableScan} implementations. */ abstract class BaseTableScan implements TableScan { private static final Logger LOG = LoggerFactory.getLogger(BaseTableScan.class); private static final DateTimeFormatter DATE_FORMAT = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss.SSS"); private final TableOperations ops; private final Table table; private final Schema schema; private final TableScanContext context; protected BaseTableScan(TableOperations ops, Table table, Schema schema) { this(ops, table, schema, new TableScanContext()); } protected BaseTableScan(TableOperations ops, Table table, Schema schema, TableScanContext context) { this.ops = ops; this.table = table; this.schema = schema; this.context = context; } protected TableOperations tableOps() { return ops; } protected Long snapshotId() { return context.snapshotId(); } protected boolean colStats() { return context.returnColumnStats(); } protected boolean shouldIgnoreResiduals() { return context.ignoreResiduals(); } protected Collection<String> selectedColumns() { return context.selectedColumns(); } protected Map<String, String> options() { return context.options(); } protected TableScanContext context() { return context; } @SuppressWarnings("checkstyle:HiddenField") protected abstract TableScan newRefinedScan( TableOperations ops, Table table, Schema schema, TableScanContext context); @SuppressWarnings("checkstyle:HiddenField") protected abstract CloseableIterable<FileScanTask> planFiles( TableOperations ops, Snapshot snapshot, Expression rowFilter, boolean ignoreResiduals, boolean caseSensitive, boolean colStats); @Override public Table table() { return table; } @Override public TableScan appendsBetween(long fromSnapshotId, long toSnapshotId) { throw new UnsupportedOperationException("Incremental scan is not supported"); } @Override public TableScan appendsAfter(long fromSnapshotId) { throw new UnsupportedOperationException("Incremental scan is not supported"); } @Override public TableScan useSnapshot(long scanSnapshotId) { Preconditions.checkArgument(context.snapshotId() == null, "Cannot override snapshot, already set to id=%s", context.snapshotId()); Preconditions.checkArgument(ops.current().snapshot(scanSnapshotId) != null, "Cannot find snapshot with ID %s", scanSnapshotId); return newRefinedScan( ops, table, schema, context.useSnapshotId(scanSnapshotId)); } @Override public TableScan asOfTime(long timestampMillis) { Preconditions.checkArgument(context.snapshotId() == null, "Cannot override snapshot, already set to id=%s", context.snapshotId()); Long lastSnapshotId = null; for (HistoryEntry logEntry : ops.current().snapshotLog()) { if (logEntry.timestampMillis() <= timestampMillis) { lastSnapshotId = logEntry.snapshotId(); } } // the snapshot ID could be null if no entries were older than the requested time. in that case, // there is no valid snapshot to read. Preconditions.checkArgument(lastSnapshotId != null, "Cannot find a snapshot older than %s", formatTimestampMillis(timestampMillis)); return useSnapshot(lastSnapshotId); } @Override public TableScan option(String property, String value) { return newRefinedScan( ops, table, schema, context.withOption(property, value)); } @Override public TableScan project(Schema projectedSchema) { return newRefinedScan( ops, table, schema, context.project(projectedSchema)); } @Override public TableScan caseSensitive(boolean scanCaseSensitive) { return newRefinedScan( ops, table, schema, context.setCaseSensitive(scanCaseSensitive)); } @Override public TableScan includeColumnStats() { return newRefinedScan( ops, table, schema, context.shouldReturnColumnStats(true)); } @Override public TableScan select(Collection<String> columns) { return newRefinedScan( ops, table, schema, context.selectColumns(columns)); } @Override public TableScan filter(Expression expr) { return newRefinedScan(ops, table, schema, context.filterRows(Expressions.and(context.rowFilter(), expr))); } @Override public Expression filter() { return context.rowFilter(); } @Override public TableScan ignoreResiduals() { return newRefinedScan( ops, table, schema, context.ignoreResiduals(true)); } @Override public CloseableIterable<FileScanTask> planFiles() { Snapshot snapshot = snapshot(); if (snapshot != null) { LOG.info("Scanning table {} snapshot {} created at {} with filter {}", table, snapshot.snapshotId(), formatTimestampMillis(snapshot.timestampMillis()), context.rowFilter()); Listeners.notifyAll( new ScanEvent(table.name(), snapshot.snapshotId(), context.rowFilter(), schema())); return planFiles(ops, snapshot, context.rowFilter(), context.ignoreResiduals(), context.caseSensitive(), context.returnColumnStats()); } else { LOG.info("Scanning empty table {}", table); return CloseableIterable.empty(); } } @Override public CloseableIterable<CombinedScanTask> planTasks() { Map<String, String> options = context.options(); long splitSize; if (options.containsKey(TableProperties.SPLIT_SIZE)) { splitSize = Long.parseLong(options.get(TableProperties.SPLIT_SIZE)); } else { splitSize = targetSplitSize(); } int lookback; if (options.containsKey(TableProperties.SPLIT_LOOKBACK)) { lookback = Integer.parseInt(options.get(TableProperties.SPLIT_LOOKBACK)); } else { lookback = ops.current().propertyAsInt( TableProperties.SPLIT_LOOKBACK, TableProperties.SPLIT_LOOKBACK_DEFAULT); } long openFileCost; if (options.containsKey(TableProperties.SPLIT_OPEN_FILE_COST)) { openFileCost = Long.parseLong(options.get(TableProperties.SPLIT_OPEN_FILE_COST)); } else { openFileCost = ops.current().propertyAsLong( TableProperties.SPLIT_OPEN_FILE_COST, TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT); } CloseableIterable<FileScanTask> fileScanTasks = planFiles(); CloseableIterable<FileScanTask> splitFiles = TableScanUtil.splitFiles(fileScanTasks, splitSize); return TableScanUtil.planTasks(splitFiles, splitSize, lookback, openFileCost); } @Override public Schema schema() { return lazyColumnProjection(); } @Override public Snapshot snapshot() { return context.snapshotId() != null ? ops.current().snapshot(context.snapshotId()) : ops.current().currentSnapshot(); } @Override public boolean isCaseSensitive() { return context.caseSensitive(); } @Override public String toString() { return MoreObjects.toStringHelper(this) .add("table", table) .add("projection", schema().asStruct()) .add("filter", context.rowFilter()) .add("ignoreResiduals", context.ignoreResiduals()) .add("caseSensitive", context.caseSensitive()) .toString(); } /** * To be able to make refinements {@link #select(Collection)} and {@link #caseSensitive(boolean)} in any order, * we resolve the schema to be projected lazily here. * * @return the Schema to project */ private Schema lazyColumnProjection() { Collection<String> selectedColumns = context.selectedColumns(); if (selectedColumns != null) { Set<Integer> requiredFieldIds = Sets.newHashSet(); // all of the filter columns are required requiredFieldIds.addAll( Binder.boundReferences(schema.asStruct(), Collections.singletonList(context.rowFilter()), context.caseSensitive())); // all of the projection columns are required Set<Integer> selectedIds; if (context.caseSensitive()) { selectedIds = TypeUtil.getProjectedIds(schema.select(selectedColumns)); } else { selectedIds = TypeUtil.getProjectedIds(schema.caseInsensitiveSelect(selectedColumns)); } requiredFieldIds.addAll(selectedIds); return TypeUtil.select(schema, requiredFieldIds); } else if (context.projectedSchema() != null) { return context.projectedSchema(); } return schema; } private static String formatTimestampMillis(long millis) { return DATE_FORMAT.format(LocalDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.systemDefault())); } }
1
40,620
I agree with this because it is the opposite of `GetProjectedIds` used above.
apache-iceberg
java
@@ -632,7 +632,7 @@ class InstanceAttribute(dict): 'disableApiTermination', 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck', - 'groupSet'] + 'groupSet', 'productCodes', 'ebsOptimized',] def __init__(self, parent=None): dict.__init__(self)
1
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2010, Eucalyptus Systems, Inc. # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. """ Represents an EC2 Instance """ import boto from boto.ec2.ec2object import EC2Object, TaggedEC2Object from boto.resultset import ResultSet from boto.ec2.address import Address from boto.ec2.blockdevicemapping import BlockDeviceMapping from boto.ec2.image import ProductCodes from boto.ec2.networkinterface import NetworkInterface from boto.ec2.group import Group import base64 class InstanceState(object): """ The state of the instance. :ivar code: The low byte represents the state. The high byte is an opaque internal value and should be ignored. Valid values: * 0 (pending) * 16 (running) * 32 (shutting-down) * 48 (terminated) * 64 (stopping) * 80 (stopped) :ivar name: The name of the state of the instance. Valid values: * "pending" * "running" * "shutting-down" * "terminated" * "stopping" * "stopped" """ def __init__(self, code=0, name=None): self.code = code self.name = name def __repr__(self): return '%s(%d)' % (self.name, self.code) def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'code': self.code = int(value) elif name == 'name': self.name = value else: setattr(self, name, value) class InstancePlacement(object): """ The location where the instance launched. :ivar zone: The Availability Zone of the instance. :ivar group_name: The name of the placement group the instance is in (for cluster compute instances). :ivar tenancy: The tenancy of the instance (if the instance is running within a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. """ def __init__(self, zone=None, group_name=None, tenancy=None): self.zone = zone self.group_name = group_name self.tenancy = tenancy def __repr__(self): return self.zone def startElement(self, name, attrs, connection): pass def endElement(self, name, value, connection): if name == 'availabilityZone': self.zone = value elif name == 'groupName': self.group_name = value elif name == 'tenancy': self.tenancy = value else: setattr(self, name, value) class Reservation(EC2Object): """ Represents a Reservation response object. :ivar id: The unique ID of the Reservation. :ivar owner_id: The unique ID of the owner of the Reservation. :ivar groups: A list of Group objects representing the security groups associated with launched instances. :ivar instances: A list of Instance objects launched in this Reservation. """ def __init__(self, connection=None): super(Reservation, self).__init__(connection) self.id = None self.owner_id = None self.groups = [] self.instances = [] def __repr__(self): return 'Reservation:%s' % self.id def startElement(self, name, attrs, connection): if name == 'instancesSet': self.instances = ResultSet([('item', Instance)]) return self.instances elif name == 'groupSet': self.groups = ResultSet([('item', Group)]) return self.groups else: return None def endElement(self, name, value, connection): if name == 'reservationId': self.id = value elif name == 'ownerId': self.owner_id = value else: setattr(self, name, value) def stop_all(self, dry_run=False): for instance in self.instances: instance.stop(dry_run=dry_run) class Instance(TaggedEC2Object): """ Represents an instance. :ivar id: The unique ID of the Instance. :ivar groups: A list of Group objects representing the security groups associated with the instance. :ivar public_dns_name: The public dns name of the instance. :ivar private_dns_name: The private dns name of the instance. :ivar state: The string representation of the instance's current state. :ivar state_code: An integer representation of the instance's current state. :ivar previous_state: The string representation of the instance's previous state. :ivar previous_state_code: An integer representation of the instance's current state. :ivar key_name: The name of the SSH key associated with the instance. :ivar instance_type: The type of instance (e.g. m1.small). :ivar launch_time: The time the instance was launched. :ivar image_id: The ID of the AMI used to launch this instance. :ivar placement: The availability zone in which the instance is running. :ivar placement_group: The name of the placement group the instance is in (for cluster compute instances). :ivar placement_tenancy: The tenancy of the instance, if the instance is running within a VPC. An instance with a tenancy of dedicated runs on a single-tenant hardware. :ivar kernel: The kernel associated with the instance. :ivar ramdisk: The ramdisk associated with the instance. :ivar architecture: The architecture of the image (i386|x86_64). :ivar hypervisor: The hypervisor used. :ivar virtualization_type: The type of virtualization used. :ivar product_codes: A list of product codes associated with this instance. :ivar ami_launch_index: This instances position within it's launch group. :ivar monitored: A boolean indicating whether monitoring is enabled or not. :ivar monitoring_state: A string value that contains the actual value of the monitoring element returned by EC2. :ivar spot_instance_request_id: The ID of the spot instance request if this is a spot instance. :ivar subnet_id: The VPC Subnet ID, if running in VPC. :ivar vpc_id: The VPC ID, if running in VPC. :ivar private_ip_address: The private IP address of the instance. :ivar ip_address: The public IP address of the instance. :ivar platform: Platform of the instance (e.g. Windows) :ivar root_device_name: The name of the root device. :ivar root_device_type: The root device type (ebs|instance-store). :ivar block_device_mapping: The Block Device Mapping for the instance. :ivar state_reason: The reason for the most recent state transition. :ivar groups: List of security Groups associated with the instance. :ivar interfaces: List of Elastic Network Interfaces associated with this instance. :ivar ebs_optimized: Whether instance is using optimized EBS volumes or not. :ivar instance_profile: A Python dict containing the instance profile id and arn associated with this instance. """ def __init__(self, connection=None): super(Instance, self).__init__(connection) self.id = None self.dns_name = None self.public_dns_name = None self.private_dns_name = None self.key_name = None self.instance_type = None self.launch_time = None self.image_id = None self.kernel = None self.ramdisk = None self.product_codes = ProductCodes() self.ami_launch_index = None self.monitored = False self.monitoring_state = None self.spot_instance_request_id = None self.subnet_id = None self.vpc_id = None self.private_ip_address = None self.ip_address = None self.requester_id = None self._in_monitoring_element = False self.persistent = False self.root_device_name = None self.root_device_type = None self.block_device_mapping = None self.state_reason = None self.group_name = None self.client_token = None self.eventsSet = None self.groups = [] self.platform = None self.interfaces = [] self.hypervisor = None self.virtualization_type = None self.architecture = None self.instance_profile = None self._previous_state = None self._state = InstanceState() self._placement = InstancePlacement() def __repr__(self): return 'Instance:%s' % self.id @property def state(self): return self._state.name @property def state_code(self): return self._state.code @property def previous_state(self): if self._previous_state: return self._previous_state.name return None @property def previous_state_code(self): if self._previous_state: return self._previous_state.code return 0 @property def placement(self): return self._placement.zone @property def placement_group(self): return self._placement.group_name @property def placement_tenancy(self): return self._placement.tenancy def startElement(self, name, attrs, connection): retval = super(Instance, self).startElement(name, attrs, connection) if retval is not None: return retval if name == 'monitoring': self._in_monitoring_element = True elif name == 'blockDeviceMapping': self.block_device_mapping = BlockDeviceMapping() return self.block_device_mapping elif name == 'productCodes': return self.product_codes elif name == 'stateReason': self.state_reason = SubParse('stateReason') return self.state_reason elif name == 'groupSet': self.groups = ResultSet([('item', Group)]) return self.groups elif name == "eventsSet": self.eventsSet = SubParse('eventsSet') return self.eventsSet elif name == 'networkInterfaceSet': self.interfaces = ResultSet([('item', NetworkInterface)]) return self.interfaces elif name == 'iamInstanceProfile': self.instance_profile = SubParse('iamInstanceProfile') return self.instance_profile elif name == 'currentState': return self._state elif name == 'previousState': self._previous_state = InstanceState() return self._previous_state elif name == 'instanceState': return self._state elif name == 'placement': return self._placement return None def endElement(self, name, value, connection): if name == 'instanceId': self.id = value elif name == 'imageId': self.image_id = value elif name == 'dnsName' or name == 'publicDnsName': self.dns_name = value # backwards compatibility self.public_dns_name = value elif name == 'privateDnsName': self.private_dns_name = value elif name == 'keyName': self.key_name = value elif name == 'amiLaunchIndex': self.ami_launch_index = value elif name == 'previousState': self.previous_state = value elif name == 'instanceType': self.instance_type = value elif name == 'rootDeviceName': self.root_device_name = value elif name == 'rootDeviceType': self.root_device_type = value elif name == 'launchTime': self.launch_time = value elif name == 'platform': self.platform = value elif name == 'kernelId': self.kernel = value elif name == 'ramdiskId': self.ramdisk = value elif name == 'state': if self._in_monitoring_element: self.monitoring_state = value if value == 'enabled': self.monitored = True self._in_monitoring_element = False elif name == 'spotInstanceRequestId': self.spot_instance_request_id = value elif name == 'subnetId': self.subnet_id = value elif name == 'vpcId': self.vpc_id = value elif name == 'privateIpAddress': self.private_ip_address = value elif name == 'ipAddress': self.ip_address = value elif name == 'requesterId': self.requester_id = value elif name == 'persistent': if value == 'true': self.persistent = True else: self.persistent = False elif name == 'groupName': if self._in_monitoring_element: self.group_name = value elif name == 'clientToken': self.client_token = value elif name == "eventsSet": self.events = value elif name == 'hypervisor': self.hypervisor = value elif name == 'virtualizationType': self.virtualization_type = value elif name == 'architecture': self.architecture = value elif name == 'ebsOptimized': self.ebs_optimized = (value == 'true') else: setattr(self, name, value) def _update(self, updated): self.__dict__.update(updated.__dict__) def update(self, validate=False, dry_run=False): """ Update the instance's state information by making a call to fetch the current instance attributes from the service. :type validate: bool :param validate: By default, if EC2 returns no data about the instance the update method returns quietly. If the validate param is True, however, it will raise a ValueError exception if no data is returned from EC2. """ rs = self.connection.get_all_reservations([self.id], dry_run=dry_run) if len(rs) > 0: r = rs[0] for i in r.instances: if i.id == self.id: self._update(i) elif validate: raise ValueError('%s is not a valid Instance ID' % self.id) return self.state def terminate(self, dry_run=False): """ Terminate the instance """ rs = self.connection.terminate_instances([self.id], dry_run=dry_run) if len(rs) > 0: self._update(rs[0]) def stop(self, force=False, dry_run=False): """ Stop the instance :type force: bool :param force: Forces the instance to stop :rtype: list :return: A list of the instances stopped """ rs = self.connection.stop_instances([self.id], force, dry_run=dry_run) if len(rs) > 0: self._update(rs[0]) def start(self, dry_run=False): """ Start the instance. """ rs = self.connection.start_instances([self.id], dry_run=dry_run) if len(rs) > 0: self._update(rs[0]) def reboot(self, dry_run=False): return self.connection.reboot_instances([self.id], dry_run=dry_run) def get_console_output(self, dry_run=False): """ Retrieves the console output for the instance. :rtype: :class:`boto.ec2.instance.ConsoleOutput` :return: The console output as a ConsoleOutput object """ return self.connection.get_console_output(self.id, dry_run=dry_run) def confirm_product(self, product_code, dry_run=False): return self.connection.confirm_product_instance( self.id, product_code, dry_run=dry_run ) def use_ip(self, ip_address, dry_run=False): """ Associates an Elastic IP to the instance. :type ip_address: Either an instance of :class:`boto.ec2.address.Address` or a string. :param ip_address: The IP address to associate with the instance. :rtype: bool :return: True if successful """ if isinstance(ip_address, Address): ip_address = ip_address.public_ip return self.connection.associate_address( self.id, ip_address, dry_run=dry_run ) def monitor(self, dry_run=False): return self.connection.monitor_instance(self.id, dry_run=dry_run) def unmonitor(self, dry_run=False): return self.connection.unmonitor_instance(self.id, dry_run=dry_run) def get_attribute(self, attribute, dry_run=False): """ Gets an attribute from this instance. :type attribute: string :param attribute: The attribute you need information about Valid choices are: * instanceType * kernel * ramdisk * userData * disableApiTermination * instanceInitiatedShutdownBehavior * rootDeviceName * blockDeviceMapping * productCodes * sourceDestCheck * groupSet * ebsOptimized :rtype: :class:`boto.ec2.image.InstanceAttribute` :return: An InstanceAttribute object representing the value of the attribute requested """ return self.connection.get_instance_attribute( self.id, attribute, dry_run=dry_run ) def modify_attribute(self, attribute, value, dry_run=False): """ Changes an attribute of this instance :type attribute: string :param attribute: The attribute you wish to change. * instanceType - A valid instance type (m1.small) * kernel - Kernel ID (None) * ramdisk - Ramdisk ID (None) * userData - Base64 encoded String (None) * disableApiTermination - Boolean (true) * instanceInitiatedShutdownBehavior - stop|terminate * sourceDestCheck - Boolean (true) * groupSet - Set of Security Groups or IDs * ebsOptimized - Boolean (false) :type value: string :param value: The new value for the attribute :rtype: bool :return: Whether the operation succeeded or not """ return self.connection.modify_instance_attribute( self.id, attribute, value, dry_run=dry_run ) def reset_attribute(self, attribute, dry_run=False): """ Resets an attribute of this instance to its default value. :type attribute: string :param attribute: The attribute to reset. Valid values are: kernel|ramdisk :rtype: bool :return: Whether the operation succeeded or not """ return self.connection.reset_instance_attribute( self.id, attribute, dry_run=dry_run ) def create_image(self, name, description=None, no_reboot=False, dry_run=False): """ Will create an AMI from the instance in the running or stopped state. :type name: string :param name: The name of the new image :type description: string :param description: An optional human-readable string describing the contents and purpose of the AMI. :type no_reboot: bool :param no_reboot: An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. :rtype: string :return: The new image id """ return self.connection.create_image( self.id, name, description, no_reboot, dry_run=dry_run ) class ConsoleOutput(object): def __init__(self, parent=None): self.parent = parent self.instance_id = None self.timestamp = None self.output = None def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name == 'instanceId': self.instance_id = value elif name == 'timestamp': self.timestamp = value elif name == 'output': self.output = base64.b64decode(value) else: setattr(self, name, value) class InstanceAttribute(dict): ValidValues = ['instanceType', 'kernel', 'ramdisk', 'userData', 'disableApiTermination', 'instanceInitiatedShutdownBehavior', 'rootDeviceName', 'blockDeviceMapping', 'sourceDestCheck', 'groupSet'] def __init__(self, parent=None): dict.__init__(self) self.instance_id = None self.request_id = None self._current_value = None def startElement(self, name, attrs, connection): if name == 'blockDeviceMapping': self[name] = BlockDeviceMapping() return self[name] elif name == 'groupSet': self[name] = ResultSet([('item', Group)]) return self[name] else: return None def endElement(self, name, value, connection): if name == 'instanceId': self.instance_id = value elif name == 'requestId': self.request_id = value elif name == 'value': if value == 'true': value = True elif value == 'false': value = False self._current_value = value elif name in self.ValidValues: self[name] = self._current_value class SubParse(dict): def __init__(self, section, parent=None): dict.__init__(self) self.section = section def startElement(self, name, attrs, connection): return None def endElement(self, name, value, connection): if name != self.section: self[name] = value
1
11,404
add 'sriovNetSupport' also to this list
boto-boto
py
@@ -61,7 +61,9 @@ public class Preferences { } if ((newAccount != null) && newAccount.getAccountNumber() != -1) { accounts.put(newAccount.getUuid(), newAccount); - accountsInOrder.add(newAccount); + if (!accountsInOrder.contains(newAccount)) { + accountsInOrder.add(newAccount); + } newAccount = null; } }
1
package com.fsck.k9; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import android.content.Context; import android.util.Log; import com.fsck.k9.mail.store.RemoteStore; import com.fsck.k9.mailstore.LocalStore; import com.fsck.k9.preferences.StorageEditor; import com.fsck.k9.preferences.Storage; public class Preferences { private static Preferences preferences; public static synchronized Preferences getPreferences(Context context) { Context appContext = context.getApplicationContext(); if (preferences == null) { preferences = new Preferences(appContext); } return preferences; } private Storage mStorage; private Map<String, Account> accounts = null; private List<Account> accountsInOrder = null; private Account newAccount; private Context mContext; private Preferences(Context context) { mStorage = Storage.getStorage(context); mContext = context; if (mStorage.isEmpty()) { Log.i(K9.LOG_TAG, "Preferences storage is zero-size, importing from Android-style preferences"); StorageEditor editor = mStorage.edit(); editor.copy(context.getSharedPreferences("AndroidMail.Main", Context.MODE_PRIVATE)); editor.commit(); } } public synchronized void loadAccounts() { accounts = new HashMap<String, Account>(); accountsInOrder = new LinkedList<Account>(); String accountUuids = getStorage().getString("accountUuids", null); if ((accountUuids != null) && (accountUuids.length() != 0)) { String[] uuids = accountUuids.split(","); for (String uuid : uuids) { Account newAccount = new Account(this, uuid); accounts.put(uuid, newAccount); accountsInOrder.add(newAccount); } } if ((newAccount != null) && newAccount.getAccountNumber() != -1) { accounts.put(newAccount.getUuid(), newAccount); accountsInOrder.add(newAccount); newAccount = null; } } /** * Returns an array of the accounts on the system. If no accounts are * registered the method returns an empty array. * * @return all accounts */ public synchronized List<Account> getAccounts() { if (accounts == null) { loadAccounts(); } return Collections.unmodifiableList(accountsInOrder); } /** * Returns an array of the accounts on the system. If no accounts are * registered the method returns an empty array. * * @return all accounts with {@link Account#isAvailable(Context)} */ public synchronized Collection<Account> getAvailableAccounts() { List<Account> allAccounts = getAccounts(); Collection<Account> retval = new ArrayList<Account>(accounts.size()); for (Account account : allAccounts) { if (account.isEnabled() && account.isAvailable(mContext)) { retval.add(account); } } return retval; } public synchronized Account getAccount(String uuid) { if (accounts == null) { loadAccounts(); } Account account = accounts.get(uuid); return account; } public synchronized Account newAccount() { newAccount = new Account(mContext); accounts.put(newAccount.getUuid(), newAccount); accountsInOrder.add(newAccount); return newAccount; } public synchronized void deleteAccount(Account account) { if (accounts != null) { accounts.remove(account.getUuid()); } if (accountsInOrder != null) { accountsInOrder.remove(account); } try { RemoteStore.removeInstance(account); } catch (Exception e) { Log.e(K9.LOG_TAG, "Failed to reset remote store for account " + account.getUuid(), e); } LocalStore.removeAccount(account); account.deleteCertificates(); account.delete(this); if (newAccount == account) { newAccount = null; } } /** * Returns the Account marked as default. If no account is marked as default * the first account in the list is marked as default and then returned. If * there are no accounts on the system the method returns null. */ public Account getDefaultAccount() { String defaultAccountUuid = getStorage().getString("defaultAccountUuid", null); Account defaultAccount = getAccount(defaultAccountUuid); if (defaultAccount == null) { Collection<Account> accounts = getAvailableAccounts(); if (!accounts.isEmpty()) { defaultAccount = accounts.iterator().next(); setDefaultAccount(defaultAccount); } } return defaultAccount; } public void setDefaultAccount(Account account) { getStorage().edit().putString("defaultAccountUuid", account.getUuid()).commit(); } public Storage getStorage() { return mStorage; } public static <T extends Enum<T>> T getEnumStringPref(Storage storage, String key, T defaultEnum) { String stringPref = storage.getString(key, null); if (stringPref == null) { return defaultEnum; } else { try { return Enum.valueOf(defaultEnum.getDeclaringClass(), stringPref); } catch (IllegalArgumentException ex) { Log.w(K9.LOG_TAG, "Unable to convert preference key [" + key + "] value [" + stringPref + "] to enum of type " + defaultEnum.getDeclaringClass(), ex); return defaultEnum; } } } }
1
13,611
Code style issue: `if` body is not wrapped in braces.
k9mail-k-9
java
@@ -81,9 +81,8 @@ public class ItunesSearchFragment extends Fragment { if (result != null && result.size() > 0) { gridView.setVisibility(View.VISIBLE); txtvEmpty.setVisibility(View.GONE); - for (Podcast p : result) { - adapter.add(p); - } + + adapter.addAll(result); adapter.notifyDataSetInvalidated(); } else { gridView.setVisibility(View.GONE);
1
package de.danoeh.antennapod.fragment; import android.content.Intent; import android.os.Bundle; import android.support.v4.app.Fragment; import android.support.v4.view.MenuItemCompat; import android.support.v7.widget.SearchView; import android.util.Log; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuInflater; import android.view.MenuItem; import android.view.View; import android.view.ViewGroup; import android.widget.Button; import android.widget.GridView; import android.widget.ProgressBar; import android.widget.TextView; import com.afollestad.materialdialogs.MaterialDialog; import com.squareup.okhttp.OkHttpClient; import com.squareup.okhttp.Request; import com.squareup.okhttp.Response; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.util.ArrayList; import java.util.List; import java.util.Locale; import de.danoeh.antennapod.R; import de.danoeh.antennapod.activity.OnlineFeedViewActivity; import de.danoeh.antennapod.adapter.itunes.ItunesAdapter; import de.danoeh.antennapod.core.ClientConfig; import de.danoeh.antennapod.core.service.download.AntennapodHttpClient; import de.danoeh.antennapod.menuhandler.MenuItemUtils; import rx.Observable; import rx.Subscription; import rx.android.schedulers.AndroidSchedulers; import rx.schedulers.Schedulers; import static de.danoeh.antennapod.adapter.itunes.ItunesAdapter.Podcast; //Searches iTunes store for given string and displays results in a list public class ItunesSearchFragment extends Fragment { private static final String TAG = "ItunesSearchFragment"; private static final String API_URL = "https://itunes.apple.com/search?media=podcast&term=%s"; /** * Adapter responsible with the search results */ private ItunesAdapter adapter; private GridView gridView; private ProgressBar progressBar; private TextView txtvError; private Button butRetry; private TextView txtvEmpty; /** * List of podcasts retreived from the search */ private List<Podcast> searchResults; private List<Podcast> topList; private Subscription subscription; /** * Replace adapter data with provided search results from SearchTask. * @param result List of Podcast objects containing search results */ void updateData(List<Podcast> result) { this.searchResults = result; adapter.clear(); if (result != null && result.size() > 0) { gridView.setVisibility(View.VISIBLE); txtvEmpty.setVisibility(View.GONE); for (Podcast p : result) { adapter.add(p); } adapter.notifyDataSetInvalidated(); } else { gridView.setVisibility(View.GONE); txtvEmpty.setVisibility(View.VISIBLE); } } /** * Constructor */ public ItunesSearchFragment() { // Required empty public constructor } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setHasOptionsMenu(true); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment View root = inflater.inflate(R.layout.fragment_itunes_search, container, false); gridView = (GridView) root.findViewById(R.id.gridView); adapter = new ItunesAdapter(getActivity(), new ArrayList<>()); gridView.setAdapter(adapter); //Show information about the podcast when the list item is clicked gridView.setOnItemClickListener((parent, view1, position, id) -> { Podcast podcast = searchResults.get(position); if (!podcast.feedUrl.contains("itunes.apple.com")) { Intent intent = new Intent(getActivity(), OnlineFeedViewActivity.class); intent.putExtra(OnlineFeedViewActivity.ARG_FEEDURL, podcast.feedUrl); intent.putExtra(OnlineFeedViewActivity.ARG_TITLE, "iTunes"); startActivity(intent); } else { gridView.setVisibility(View.GONE); progressBar.setVisibility(View.VISIBLE); rx.Observable.create((Observable.OnSubscribe<String>) subscriber -> { OkHttpClient client = AntennapodHttpClient.getHttpClient(); Request.Builder httpReq = new Request.Builder() .url(podcast.feedUrl) .header("User-Agent", ClientConfig.USER_AGENT); try { Response response = client.newCall(httpReq.build()).execute(); if (response.isSuccessful()) { String resultString = response.body().string(); JSONObject result = new JSONObject(resultString); JSONObject results = result.getJSONArray("results").getJSONObject(0); String feedUrl = results.getString("feedUrl"); subscriber.onNext(feedUrl); } else { String prefix = getString(R.string.error_msg_prefix); subscriber.onError(new IOException(prefix + response)); } } catch (IOException | JSONException e) { subscriber.onError(e); } subscriber.onCompleted(); }) .subscribeOn(Schedulers.newThread()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(feedUrl -> { progressBar.setVisibility(View.GONE); gridView.setVisibility(View.VISIBLE); Intent intent = new Intent(getActivity(), OnlineFeedViewActivity.class); intent.putExtra(OnlineFeedViewActivity.ARG_FEEDURL, feedUrl); intent.putExtra(OnlineFeedViewActivity.ARG_TITLE, "iTunes"); startActivity(intent); }, error -> { Log.e(TAG, Log.getStackTraceString(error)); progressBar.setVisibility(View.GONE); gridView.setVisibility(View.VISIBLE); String prefix = getString(R.string.error_msg_prefix); new MaterialDialog.Builder(getActivity()) .content(prefix + " " + error.getMessage()) .neutralText(android.R.string.ok) .show(); }); } }); progressBar = (ProgressBar) root.findViewById(R.id.progressBar); txtvError = (TextView) root.findViewById(R.id.txtvError); butRetry = (Button) root.findViewById(R.id.butRetry); txtvEmpty = (TextView) root.findViewById(android.R.id.empty); loadToplist(); return root; } @Override public void onDestroy() { super.onDestroy(); if (subscription != null) { subscription.unsubscribe(); } adapter = null; } @Override public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) { super.onCreateOptionsMenu(menu, inflater); inflater.inflate(R.menu.itunes_search, menu); MenuItem searchItem = menu.findItem(R.id.action_search); final SearchView sv = (SearchView) MenuItemCompat.getActionView(searchItem); MenuItemUtils.adjustTextColor(getActivity(), sv); sv.setQueryHint(getString(R.string.search_itunes_label)); sv.setOnQueryTextListener(new android.support.v7.widget.SearchView.OnQueryTextListener() { @Override public boolean onQueryTextSubmit(String s) { sv.clearFocus(); search(s); return true; } @Override public boolean onQueryTextChange(String s) { return false; } }); MenuItemCompat.setOnActionExpandListener(searchItem, new MenuItemCompat.OnActionExpandListener() { @Override public boolean onMenuItemActionExpand(MenuItem item) { return true; } @Override public boolean onMenuItemActionCollapse(MenuItem item) { if(searchResults != null) { searchResults = null; updateData(topList); } return true; } }); } private void loadToplist() { if (subscription != null) { subscription.unsubscribe(); } gridView.setVisibility(View.GONE); txtvError.setVisibility(View.GONE); butRetry.setVisibility(View.GONE); txtvEmpty.setVisibility(View.GONE); progressBar.setVisibility(View.VISIBLE); subscription = rx.Observable.create((Observable.OnSubscribe<List<Podcast>>) subscriber -> { String lang = Locale.getDefault().getLanguage(); String url = "https://itunes.apple.com/" + lang + "/rss/toppodcasts/limit=25/explicit=true/json"; OkHttpClient client = AntennapodHttpClient.getHttpClient(); Request.Builder httpReq = new Request.Builder() .url(url) .header("User-Agent", ClientConfig.USER_AGENT); List<Podcast> results = new ArrayList<>(); try { Response response = client.newCall(httpReq.build()).execute(); if(!response.isSuccessful()) { // toplist for language does not exist, fall back to united states url = "https://itunes.apple.com/us/rss/toppodcasts/limit=25/explicit=true/json"; httpReq = new Request.Builder() .url(url) .header("User-Agent", ClientConfig.USER_AGENT); response = client.newCall(httpReq.build()).execute(); } if(response.isSuccessful()) { String resultString = response.body().string(); JSONObject result = new JSONObject(resultString); JSONObject feed = result.getJSONObject("feed"); JSONArray entries = feed.getJSONArray("entry"); for(int i=0; i < entries.length(); i++) { JSONObject json = entries.getJSONObject(i); Podcast podcast = Podcast.fromToplist(json); results.add(podcast); } } else { String prefix = getString(R.string.error_msg_prefix); subscriber.onError(new IOException(prefix + response)); } } catch (IOException | JSONException e) { subscriber.onError(e); } subscriber.onNext(results); subscriber.onCompleted(); }) .subscribeOn(Schedulers.newThread()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(podcasts -> { progressBar.setVisibility(View.GONE); topList = podcasts; updateData(topList); }, error -> { Log.e(TAG, Log.getStackTraceString(error)); progressBar.setVisibility(View.GONE); txtvError.setText(error.toString()); txtvError.setVisibility(View.VISIBLE); butRetry.setOnClickListener(v -> loadToplist()); butRetry.setVisibility(View.VISIBLE); }); } private void search(String query) { if (subscription != null) { subscription.unsubscribe(); } gridView.setVisibility(View.GONE); txtvError.setVisibility(View.GONE); butRetry.setVisibility(View.GONE); txtvEmpty.setVisibility(View.GONE); progressBar.setVisibility(View.VISIBLE); subscription = rx.Observable.create((Observable.OnSubscribe<List<Podcast>>) subscriber -> { String encodedQuery = null; try { encodedQuery = URLEncoder.encode(query, "UTF-8"); } catch (UnsupportedEncodingException e) { // this won't ever be thrown } if (encodedQuery == null) { encodedQuery = query; // failsafe } //Spaces in the query need to be replaced with '+' character. String formattedUrl = String.format(API_URL, query).replace(' ', '+'); OkHttpClient client = AntennapodHttpClient.getHttpClient(); Request.Builder httpReq = new Request.Builder() .url(formattedUrl) .header("User-Agent", ClientConfig.USER_AGENT); List<Podcast> podcasts = new ArrayList<>(); try { Response response = client.newCall(httpReq.build()).execute(); if(response.isSuccessful()) { String resultString = response.body().string(); JSONObject result = new JSONObject(resultString); JSONArray j = result.getJSONArray("results"); for (int i = 0; i < j.length(); i++) { JSONObject podcastJson = j.getJSONObject(i); Podcast podcast = Podcast.fromSearch(podcastJson); podcasts.add(podcast); } } else { String prefix = getString(R.string.error_msg_prefix); subscriber.onError(new IOException(prefix + response)); } } catch (IOException | JSONException e) { subscriber.onError(e); } subscriber.onNext(podcasts); subscriber.onCompleted(); }) .subscribeOn(Schedulers.newThread()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(podcasts -> { progressBar.setVisibility(View.GONE); updateData(podcasts); }, error -> { Log.e(TAG, Log.getStackTraceString(error)); progressBar.setVisibility(View.GONE); txtvError.setText(error.toString()); txtvError.setVisibility(View.VISIBLE); butRetry.setOnClickListener(v -> search(query)); butRetry.setVisibility(View.VISIBLE); }); } }
1
13,041
Call requires API level 11 (we are on 10)
AntennaPod-AntennaPod
java
@@ -97,7 +97,7 @@ namespace Nethermind.AuRa.Test.Contract var gasLimitContract = new BlockGasLimitContract(new AbiEncoder(), blockGasLimitContractTransition.Value, blockGasLimitContractTransition.Key, new ReadOnlyTxProcessingEnv( DbProvider, - new TrieStore(DbProvider.StateDb, LimboLogs.Instance), + new TrieStore(DbProvider.StateDb, LimboLogs.Instance).AsReadOnly(DbProvider.StateDb), BlockTree, SpecProvider, LimboLogs.Instance)); GasLimitOverrideCache = new AuRaContractGasLimitOverride.Cache();
1
// Copyright (c) 2021 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. // using System.Collections.Generic; using System.Linq; using System.Threading.Tasks; using FluentAssertions; using Nethermind.Abi; using Nethermind.Blockchain.Processing; using Nethermind.Blockchain.Rewards; using Nethermind.Blockchain.Validators; using Nethermind.Consensus; using Nethermind.Consensus.AuRa; using Nethermind.Consensus.AuRa.Contracts; using Nethermind.Core; using Nethermind.Logging; using Nethermind.Trie.Pruning; using NUnit.Framework; namespace Nethermind.AuRa.Test.Contract { public class AuRaContractGasLimitOverrideTests { private const int CorrectHeadGasLimit = 100000000; // TestContract: // pragma solidity ^0.5.0; // contract TestValidatorSet { // function blockGasLimit() public view returns(uint256) { // return 100000000; // } // } [Test] public async Task can_read_block_gas_limit_from_contract() { using var chain = await TestContractBlockchain.ForTest<TestGasLimitContractBlockchain, AuRaContractGasLimitOverrideTests>(); var gasLimit = chain.GasLimitCalculator.GetGasLimit(chain.BlockTree.Head.Header); gasLimit.Should().Be(CorrectHeadGasLimit); } [Test] public async Task caches_read_block_gas_limit() { using var chain = await TestContractBlockchain.ForTest<TestGasLimitContractBlockchain, AuRaContractGasLimitOverrideTests>(); chain.GasLimitCalculator.GetGasLimit(chain.BlockTree.Head.Header); var gasLimit = chain.GasLimitOverrideCache.GasLimitCache.Get(chain.BlockTree.Head.Hash); gasLimit.Should().Be(CorrectHeadGasLimit); } [Test] public async Task can_validate_gas_limit_correct() { using var chain = await TestContractBlockchain.ForTest<TestGasLimitContractBlockchain, AuRaContractGasLimitOverrideTests>(); var isValid = ((AuRaContractGasLimitOverride) chain.GasLimitCalculator).IsGasLimitValid(chain.BlockTree.Head.Header, CorrectHeadGasLimit, out _); isValid.Should().BeTrue(); } [Test] public async Task can_validate_gas_limit_incorrect() { using var chain = await TestContractBlockchain.ForTest<TestGasLimitContractBlockchain, AuRaContractGasLimitOverrideTests>(); var isValid = ((AuRaContractGasLimitOverride) chain.GasLimitCalculator).IsGasLimitValid(chain.BlockTree.Head.Header, 100000001, out long? expectedGasLimit); isValid.Should().BeFalse(); expectedGasLimit.Should().Be(CorrectHeadGasLimit); } [Test] public async Task skip_validate_gas_limit_before_enabled() { using var chain = await TestContractBlockchain.ForTest<TestGasLimitContractBlockchainLateBlockGasLimit, AuRaContractGasLimitOverrideTests>(); var isValid = ((AuRaContractGasLimitOverride) chain.GasLimitCalculator).IsGasLimitValid(chain.BlockTree.Genesis, 100000001, out _); isValid.Should().BeTrue(); } public class TestGasLimitContractBlockchain : TestContractBlockchain { public IGasLimitCalculator GasLimitCalculator { get; private set; } public AuRaContractGasLimitOverride.Cache GasLimitOverrideCache { get; private set; } protected override BlockProcessor CreateBlockProcessor() { var blockGasLimitContractTransition = ChainSpec.AuRa.BlockGasLimitContractTransitions.First(); var gasLimitContract = new BlockGasLimitContract(new AbiEncoder(), blockGasLimitContractTransition.Value, blockGasLimitContractTransition.Key, new ReadOnlyTxProcessingEnv( DbProvider, new TrieStore(DbProvider.StateDb, LimboLogs.Instance), BlockTree, SpecProvider, LimboLogs.Instance)); GasLimitOverrideCache = new AuRaContractGasLimitOverride.Cache(); GasLimitCalculator = new AuRaContractGasLimitOverride(new[] {gasLimitContract}, GasLimitOverrideCache, false, FollowOtherMiners.Instance, LimboLogs.Instance); return new AuRaBlockProcessor( SpecProvider, Always.Valid, new RewardCalculator(SpecProvider), TxProcessor, State, Storage, TxPool, ReceiptStorage, LimboLogs.Instance, BlockTree, null, GasLimitCalculator as AuRaContractGasLimitOverride); } protected override Task AddBlocksOnStart() => Task.CompletedTask; } public class TestGasLimitContractBlockchainLateBlockGasLimit : TestGasLimitContractBlockchain { protected override BlockProcessor CreateBlockProcessor() { var blockGasLimitContractTransition = ChainSpec.AuRa.BlockGasLimitContractTransitions.First(); ChainSpec.AuRa.BlockGasLimitContractTransitions = new Dictionary<long, Address>() {{10, blockGasLimitContractTransition.Value}}; return base.CreateBlockProcessor(); } } } }
1
25,088
do we need to pass the DB if it is the same
NethermindEth-nethermind
.cs
@@ -300,6 +300,14 @@ func (ccs *crChains) addOp(ptr BlockPointer, op op) error { } func (ccs *crChains) makeChainForOp(op op) error { + // Ignore gc ops -- their unref semantics differ from the other + // ops. Note that this only matters for old gcOps: new gcOps + // only unref the block ID, and not the whole pointer, so they + // wouldn't confuse chain creation. + if _, isGCOp := op.(*gcOp); isGCOp { + return nil + } + // First set the pointers for all updates, and track what's been // created and destroyed. for _, update := range op.AllUpdates() {
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "fmt" "sort" "github.com/keybase/client/go/logger" "golang.org/x/net/context" ) // crChain represents the set of operations that happened to a // particular KBFS node (e.g., individual file or directory) over a // given set of MD updates. It also tracks the starting and ending // block pointers for the node. type crChain struct { ops []op original, mostRecent BlockPointer file bool } // collapse finds complementary pairs of operations that cancel each // other out, and remove the relevant operations from the chain. // Examples include: // * A create followed by a remove for the same name (delete both ops) // * A create followed by a create (renamed == true) for the same name // (delete the create op) func (cc *crChain) collapse() { createsSeen := make(map[string]int) indicesToRemove := make(map[int]bool) for i, op := range cc.ops { switch realOp := op.(type) { case *createOp: if prevCreateIndex, ok := createsSeen[realOp.NewName]; realOp.renamed && ok { // A rename has papered over the first create, so // just drop it. indicesToRemove[prevCreateIndex] = true } createsSeen[realOp.NewName] = i case *rmOp: if prevCreateIndex, ok := createsSeen[realOp.OldName]; ok { delete(createsSeen, realOp.OldName) // The rm cancels out the create, so remove both. indicesToRemove[prevCreateIndex] = true indicesToRemove[i] = true } case *setAttrOp: // TODO: Collapse opposite setex pairs default: // ignore other op types } } if len(indicesToRemove) > 0 { ops := make([]op, 0, len(cc.ops)-len(indicesToRemove)) for i, op := range cc.ops { if !indicesToRemove[i] { ops = append(ops, op) } } cc.ops = ops } } func (cc *crChain) getCollapsedWriteRange() []WriteRange { if !cc.isFile() { return nil } var wr []WriteRange for _, op := range cc.ops { syncOp, ok := op.(*syncOp) if !ok { continue } wr = syncOp.collapseWriteRange(wr) } return wr } func (cc *crChain) getActionsToMerge(renamer ConflictRenamer, mergedPath path, mergedChain *crChain) (crActionList, error) { var actions crActionList // If this is a file, determine whether the unmerged chain // could actually have changed the file in some way that it // hasn't already been changed. For example, if they both // truncate the file to the same length, and there are no // other writes, we can just drop the unmerged syncs. toSkip := make(map[int]bool) if cc.isFile() && mergedChain != nil { myWriteRange := cc.getCollapsedWriteRange() mergedWriteRange := mergedChain.getCollapsedWriteRange() // If both branches contain no writes, and their truncation // points are the same, then there are no unmerged actions to // take. // // TODO: In the future we may be able to do smarter merging // here if the write ranges don't overlap, though maybe only // for certain file types? if len(myWriteRange) == 1 && myWriteRange[0].isTruncate() && len(mergedWriteRange) == 1 && mergedWriteRange[0].isTruncate() && myWriteRange[0].Off == mergedWriteRange[0].Off { // drop all sync ops for i, op := range cc.ops { if _, ok := op.(*syncOp); ok { actions = append(actions, &dropUnmergedAction{op}) toSkip[i] = true } } } } // Check each op against all ops in the corresponding merged // chain, looking for conflicts. If there is a conflict, return // it as part of the action list. If there are no conflicts for // that op, return the op's default actions. for i, unmergedOp := range cc.ops { if toSkip[i] { continue } conflict := false if mergedChain != nil { for _, mergedOp := range mergedChain.ops { action, err := unmergedOp.CheckConflict(renamer, mergedOp, cc.isFile()) if err != nil { return nil, err } if action != nil { conflict = true actions = append(actions, action) } } } // no conflicts! if !conflict { actions = append(actions, unmergedOp.GetDefaultAction(mergedPath)) } } return actions, nil } func (cc *crChain) isFile() bool { return cc.file } // identifyType figures out whether this chain represents a file or // directory. It tries to figure it out based purely on operation // state, but setAttr(mtime) can apply to either type; in that case, // we need to fetch the block to figure out the type. func (cc *crChain) identifyType(ctx context.Context, fbo *folderBlockOps, md ImmutableRootMetadata, chains *crChains) error { if len(cc.ops) == 0 { return nil } // If any op is setAttr (ex or size) or sync, this is a file // chain. If it only has a setAttr/mtime, we don't know what it // is, so fall through and fetch the block unless we come across // another op that can determine the type. var parentDir BlockPointer for _, op := range cc.ops { switch realOp := op.(type) { case *syncOp: cc.file = true return nil case *setAttrOp: if realOp.Attr != mtimeAttr { cc.file = true return nil } // We can't tell the file type from an mtimeAttr, so we // may have to actually fetch the block to figure it out. parentDir = realOp.Dir.Ref default: return nil } } parentOriginal, ok := chains.originals[parentDir] if !ok { return NoChainFoundError{parentDir} } // We have to find the current parent directory block. If the // file has been renamed, that might be different from parentDir // above. if newParent, _, ok := chains.renamedParentAndName(cc.original); ok { parentOriginal = newParent } parentMostRecent, err := chains.mostRecentFromOriginalOrSame(parentOriginal) if err != nil { return err } // If we get down here, we have an ambiguity, and need to fetch // the block to figure out the file type. dblock, err := fbo.GetDirBlockForReading(ctx, makeFBOLockState(), md.ReadOnly(), parentMostRecent, fbo.folderBranch.Branch, path{}) if err != nil { return err } // We don't have the file name handy, so search for the pointer. found := false for _, entry := range dblock.Children { if entry.BlockPointer != cc.mostRecent { continue } switch entry.Type { case Dir: cc.file = false case File: cc.file = true case Exec: cc.file = true default: return fmt.Errorf("Unexpected chain type: %s", entry.Type) } found = true break } if !found { // Give up nicely if the node has been deleted, since quota // reclamation has probably already happened and there won't // be any conflicts to resolve anyway. if chains.isDeleted(cc.original) { return nil } return fmt.Errorf("Couldn't find directory entry for %v", cc.mostRecent) } return nil } type renameInfo struct { originalOldParent BlockPointer oldName string originalNewParent BlockPointer newName string } func (ri renameInfo) String() string { return fmt.Sprintf( "renameInfo{originalOldParent: %s, oldName: %s, originalNewParent: %s, newName: %s}", ri.originalOldParent, ri.oldName, ri.originalNewParent, ri.newName) } // crChains contains a crChain for every KBFS node affected by the // operations over a given set of MD updates. The chains are indexed // by both the starting (original) and ending (most recent) pointers. // It also keeps track of which chain points to the root of the folder. type crChains struct { byOriginal map[BlockPointer]*crChain byMostRecent map[BlockPointer]*crChain originalRoot BlockPointer // The original blockpointers for nodes that have been // unreferenced or initially referenced during this chain. deletedOriginals map[BlockPointer]bool createdOriginals map[BlockPointer]bool // A map from original blockpointer to the full rename operation // of the node (from the original location of the node to the // final locations). renamedOriginals map[BlockPointer]renameInfo // Separately track pointers for unembedded block changes. blockChangePointers map[BlockPointer]bool // Pointers that should be explicitly cleaned up in the resolution. toUnrefPointers map[BlockPointer]bool // Also keep a reference to the most recent MD that's part of this // chain. mostRecentMD ImmutableRootMetadata // We need to be able to track ANY BlockPointer, at any point in // the chain, back to its original. originals map[BlockPointer]BlockPointer } func (ccs *crChains) addOp(ptr BlockPointer, op op) error { currChain, ok := ccs.byMostRecent[ptr] if !ok { return fmt.Errorf("Could not find chain for most recent ptr %v", ptr) } currChain.ops = append(currChain.ops, op) return nil } func (ccs *crChains) makeChainForOp(op op) error { // First set the pointers for all updates, and track what's been // created and destroyed. for _, update := range op.AllUpdates() { chain, ok := ccs.byMostRecent[update.Unref] if !ok { // No matching chain means it's time to start a new chain chain = &crChain{original: update.Unref} ccs.byOriginal[update.Unref] = chain } if chain.mostRecent.IsInitialized() { // delete the old most recent pointer, it's no longer needed delete(ccs.byMostRecent, chain.mostRecent) } chain.mostRecent = update.Ref ccs.byMostRecent[update.Ref] = chain if chain.original != update.Ref { // Always be able to track this one back to its original. ccs.originals[update.Ref] = chain.original } } for _, ptr := range op.Refs() { ccs.createdOriginals[ptr] = true } for _, ptr := range op.Unrefs() { // Look up the original pointer corresponding to this most // recent one. original := ptr if ptrChain, ok := ccs.byMostRecent[ptr]; ok { original = ptrChain.original } ccs.deletedOriginals[original] = true } // then set the op depending on the actual op type switch realOp := op.(type) { default: panic(fmt.Sprintf("Unrecognized operation: %v", op)) case *createOp: err := ccs.addOp(realOp.Dir.Ref, op) if err != nil { return err } case *rmOp: err := ccs.addOp(realOp.Dir.Ref, op) if err != nil { return err } case *renameOp: // split rename op into two separate operations, one for // remove and one for create ro, err := newRmOp(realOp.OldName, realOp.OldDir.Unref) if err != nil { return err } ro.setWriterInfo(realOp.getWriterInfo()) // realOp.OldDir.Ref may be zero if this is a // post-resolution chain, so set ro.Dir.Ref manually. ro.Dir.Ref = realOp.OldDir.Ref err = ccs.addOp(realOp.OldDir.Ref, ro) if err != nil { return err } ndu := realOp.NewDir.Unref ndr := realOp.NewDir.Ref if realOp.NewDir == (blockUpdate{}) { // this is a rename within the same directory ndu = realOp.OldDir.Unref ndr = realOp.OldDir.Ref } if len(realOp.Unrefs()) > 0 { // Something was overwritten; make an explicit rm for it // so we can check for conflicts. roOverwrite, err := newRmOp(realOp.NewName, ndu) if err != nil { return err } roOverwrite.setWriterInfo(realOp.getWriterInfo()) err = roOverwrite.Dir.setRef(ndr) if err != nil { return err } err = ccs.addOp(ndr, roOverwrite) if err != nil { return err } // Transfer any unrefs over. for _, ptr := range realOp.Unrefs() { roOverwrite.AddUnrefBlock(ptr) } } co, err := newCreateOp(realOp.NewName, ndu, realOp.RenamedType) if err != nil { return err } co.setWriterInfo(realOp.getWriterInfo()) co.renamed = true // ndr may be zero if this is a post-resolution chain, // so set co.Dir.Ref manually. co.Dir.Ref = ndr err = ccs.addOp(ndr, co) if err != nil { return err } // also keep track of the new parent for the renamed node if realOp.Renamed.IsInitialized() { newParentChain, ok := ccs.byMostRecent[ndr] if !ok { return fmt.Errorf("While renaming, couldn't find the chain "+ "for the new parent %v", ndr) } oldParentChain, ok := ccs.byMostRecent[realOp.OldDir.Ref] if !ok { return fmt.Errorf("While renaming, couldn't find the chain "+ "for the old parent %v", ndr) } renamedOriginal := realOp.Renamed if renamedChain, ok := ccs.byMostRecent[realOp.Renamed]; ok { renamedOriginal = renamedChain.original } // Use the previous old info if there is one already, // in case this node has been renamed multiple times. ri, ok := ccs.renamedOriginals[renamedOriginal] if !ok { // Otherwise make a new one. ri = renameInfo{ originalOldParent: oldParentChain.original, oldName: realOp.OldName, } } ri.originalNewParent = newParentChain.original ri.newName = realOp.NewName ccs.renamedOriginals[renamedOriginal] = ri // Remember what you create, in case we need to merge // directories after a rename. co.AddRefBlock(renamedOriginal) } case *syncOp: err := ccs.addOp(realOp.File.Ref, op) if err != nil { return err } case *setAttrOp: // Because the attributes apply to the file, which doesn't // actually have an updated pointer, we may need to create a // new chain. _, ok := ccs.byMostRecent[realOp.File] if !ok { // pointer didn't change, so most recent is the same: chain := &crChain{original: realOp.File, mostRecent: realOp.File} ccs.byOriginal[realOp.File] = chain ccs.byMostRecent[realOp.File] = chain } err := ccs.addOp(realOp.File, op) if err != nil { return err } case *resolutionOp: // ignore resolution op case *rekeyOp: // ignore rekey op case *gcOp: // ignore gc op } return nil } func (ccs *crChains) makeChainForNewOpWithUpdate( targetPtr BlockPointer, newOp op, update *blockUpdate) error { oldUpdate := *update // so that most recent == original var err error *update, err = makeBlockUpdate(targetPtr, update.Unref) if err != nil { return err } defer func() { // reset the update to its original state before returning. *update = oldUpdate }() err = ccs.makeChainForOp(newOp) if err != nil { return err } return nil } // makeChainForNewOp makes a new chain for an op that does not yet // have its pointers initialized. It does so by setting Unref and Ref // to be the same for the duration of this function, and calling the // usual makeChainForOp method. This function is not goroutine-safe // with respect to newOp. Also note that rename ops will not be split // into two ops; they will be placed only in the new directory chain. func (ccs *crChains) makeChainForNewOp(targetPtr BlockPointer, newOp op) error { switch realOp := newOp.(type) { case *createOp: return ccs.makeChainForNewOpWithUpdate(targetPtr, newOp, &realOp.Dir) case *rmOp: return ccs.makeChainForNewOpWithUpdate(targetPtr, newOp, &realOp.Dir) case *renameOp: // In this case, we don't want to split the rename chain, so // just make up a new operation and later overwrite it with // the rename op. co, err := newCreateOp(realOp.NewName, realOp.NewDir.Unref, File) if err != nil { return err } err = ccs.makeChainForNewOpWithUpdate(targetPtr, co, &co.Dir) if err != nil { return err } chain, ok := ccs.byMostRecent[targetPtr] if !ok { return fmt.Errorf("Couldn't find chain for %v after making it", targetPtr) } if len(chain.ops) != 1 { return fmt.Errorf("Chain of unexpected length for %v after "+ "making it", targetPtr) } chain.ops[0] = realOp return nil case *setAttrOp: return ccs.makeChainForNewOpWithUpdate(targetPtr, newOp, &realOp.Dir) case *syncOp: return ccs.makeChainForNewOpWithUpdate(targetPtr, newOp, &realOp.File) default: return fmt.Errorf("Couldn't make chain with unknown operation %s", newOp) } } func (ccs *crChains) mostRecentFromOriginal(original BlockPointer) ( BlockPointer, error) { chain, ok := ccs.byOriginal[original] if !ok { return BlockPointer{}, NoChainFoundError{original} } return chain.mostRecent, nil } func (ccs *crChains) mostRecentFromOriginalOrSame(original BlockPointer) ( BlockPointer, error) { ptr, err := ccs.mostRecentFromOriginal(original) if err == nil { // A satisfactory chain was found. return ptr, nil } else if _, ok := err.(NoChainFoundError); !ok { // An unexpected error! return BlockPointer{}, err } return original, nil } func (ccs *crChains) originalFromMostRecent(mostRecent BlockPointer) ( BlockPointer, error) { chain, ok := ccs.byMostRecent[mostRecent] if !ok { return BlockPointer{}, NoChainFoundError{mostRecent} } return chain.original, nil } func (ccs *crChains) originalFromMostRecentOrSame(mostRecent BlockPointer) ( BlockPointer, error) { ptr, err := ccs.originalFromMostRecent(mostRecent) if err == nil { // A satisfactory chain was found. return ptr, nil } else if _, ok := err.(NoChainFoundError); !ok { // An unexpected error! return BlockPointer{}, err } return mostRecent, nil } func (ccs *crChains) isCreated(original BlockPointer) bool { return ccs.createdOriginals[original] } func (ccs *crChains) isDeleted(original BlockPointer) bool { return ccs.deletedOriginals[original] } func (ccs *crChains) renamedParentAndName(original BlockPointer) ( BlockPointer, string, bool) { info, ok := ccs.renamedOriginals[original] if !ok { return BlockPointer{}, "", false } return info.originalNewParent, info.newName, true } func newCRChainsEmpty() *crChains { return &crChains{ byOriginal: make(map[BlockPointer]*crChain), byMostRecent: make(map[BlockPointer]*crChain), deletedOriginals: make(map[BlockPointer]bool), createdOriginals: make(map[BlockPointer]bool), renamedOriginals: make(map[BlockPointer]renameInfo), blockChangePointers: make(map[BlockPointer]bool), toUnrefPointers: make(map[BlockPointer]bool), originals: make(map[BlockPointer]BlockPointer), } } func newCRChains(ctx context.Context, cfg Config, rmds []ImmutableRootMetadata, fbo *folderBlockOps, identifyTypes bool) ( ccs *crChains, err error) { ccs = newCRChainsEmpty() // For each MD update, turn each update in each op into map // entries and create chains for the BlockPointers that are // affected directly by the operation. for _, rmd := range rmds { // No new operations in these. if rmd.IsWriterMetadataCopiedSet() { continue } winfo, err := newWriterInfo(ctx, cfg, rmd.LastModifyingWriter, rmd.writerKID()) if err != nil { return nil, err } if ptr := rmd.data.cachedChanges.Info.BlockPointer; ptr != zeroPtr { ccs.blockChangePointers[ptr] = true } // Copy the ops since CR will change them. ops := make(opsList, len(rmd.data.Changes.Ops)) err = CodecUpdate(cfg.Codec(), &ops, rmd.data.Changes.Ops) if err != nil { return nil, err } for _, op := range ops { op.setWriterInfo(winfo) op.setLocalTimestamp(rmd.localTimestamp) err := ccs.makeChainForOp(op) if err != nil { return nil, err } } if !ccs.originalRoot.IsInitialized() { // Find the original pointer for the root directory if rootChain, ok := ccs.byMostRecent[rmd.data.Dir.BlockPointer]; ok { ccs.originalRoot = rootChain.original } } } for _, chain := range ccs.byOriginal { chain.collapse() // NOTE: even if we've removed all its ops, still keep the // chain around so we can see the mapping between the original // and most recent pointers. // Figure out if this chain is a file or directory. We don't // need to do this for chains that represent a resolution in // progress, since in that case all actions are already // completed. if len(rmds) > 0 && identifyTypes { err := chain.identifyType(ctx, fbo, rmds[len(rmds)-1], ccs) if err != nil { return nil, err } } } if len(rmds) > 0 { ccs.mostRecentMD = rmds[len(rmds)-1] } return ccs, nil } type crChainSummary struct { Path string Ops []string } func (ccs *crChains) summary(identifyChains *crChains, nodeCache NodeCache) (res []*crChainSummary) { for _, chain := range ccs.byOriginal { summary := &crChainSummary{} res = append(res, summary) // first stringify all the ops so they are displayed even if // we can't find the path. for _, op := range chain.ops { summary.Ops = append(summary.Ops, op.String()) } // find the path name using the identified most recent pointer n := nodeCache.Get(chain.mostRecent.ref()) if n == nil { summary.Path = fmt.Sprintf("Unknown path: %v", chain.mostRecent) continue } path := nodeCache.PathFromNode(n) summary.Path = path.String() } return res } func (ccs *crChains) removeChain(ptr BlockPointer) { if chain, ok := ccs.byMostRecent[ptr]; ok { delete(ccs.byOriginal, chain.original) } else { delete(ccs.byOriginal, ptr) } delete(ccs.byMostRecent, ptr) } // copyOpAndRevertUnrefsToOriginals returns a shallow copy of the op, // modifying each custom BlockPointer field to reference the original // version of the corresponding blocks. func (ccs *crChains) copyOpAndRevertUnrefsToOriginals(currOp op) op { var unrefs []*BlockPointer var newOp op switch realOp := currOp.(type) { case *createOp: newCreateOp := *realOp unrefs = append(unrefs, &newCreateOp.Dir.Unref) newOp = &newCreateOp case *rmOp: newRmOp := *realOp unrefs = append(unrefs, &newRmOp.Dir.Unref) newOp = &newRmOp case *renameOp: newRenameOp := *realOp unrefs = append(unrefs, &newRenameOp.OldDir.Unref, &newRenameOp.NewDir.Unref, &newRenameOp.Renamed) newOp = &newRenameOp case *syncOp: newSyncOp := *realOp unrefs = append(unrefs, &newSyncOp.File.Unref) newOp = &newSyncOp case *setAttrOp: newSetAttrOp := *realOp unrefs = append(unrefs, &newSetAttrOp.Dir.Unref, &newSetAttrOp.File) newOp = &newSetAttrOp case *gcOp: // No need to copy a gcOp, it won't be modified newOp = realOp } for _, unref := range unrefs { original, ok := ccs.originals[*unref] if ok { *unref = original } } return newOp } // changeOriginal converts the original of a chain to a different original. func (ccs *crChains) changeOriginal(oldOriginal BlockPointer, newOriginal BlockPointer) error { chain, ok := ccs.byOriginal[oldOriginal] if !ok { return NoChainFoundError{oldOriginal} } if _, ok := ccs.byOriginal[newOriginal]; ok { return fmt.Errorf("crChains.changeOriginal: New original %v "+ "already exists", newOriginal) } delete(ccs.byOriginal, oldOriginal) chain.original = newOriginal ccs.byOriginal[newOriginal] = chain ccs.originals[oldOriginal] = newOriginal if chain.mostRecent == oldOriginal { chain.mostRecent = newOriginal delete(ccs.byMostRecent, oldOriginal) ccs.byMostRecent[newOriginal] = chain } if _, ok := ccs.deletedOriginals[oldOriginal]; ok { delete(ccs.deletedOriginals, oldOriginal) ccs.deletedOriginals[newOriginal] = true } if _, ok := ccs.createdOriginals[oldOriginal]; ok { delete(ccs.createdOriginals, oldOriginal) ccs.createdOriginals[newOriginal] = true } if ri, ok := ccs.renamedOriginals[oldOriginal]; ok { delete(ccs.renamedOriginals, oldOriginal) ccs.renamedOriginals[newOriginal] = ri } return nil } // getPaths returns a sorted slice of most recent paths to all the // nodes in the given CR chains that were directly modified during a // branch, and which existed at both the start and the end of the // branch. This represents the paths that need to be checked for // conflicts. The paths are sorted by descending path length. It // uses nodeCache when looking up paths, which must at least contain // the most recent root node of the branch. Note that if a path // cannot be found, the corresponding chain is completely removed from // the set of CR chains. Set includeCreates to true if the returned // paths should include the paths of newly-created nodes. func (ccs *crChains) getPaths(ctx context.Context, blocks *folderBlockOps, log logger.Logger, nodeCache NodeCache, includeCreates bool) ( []path, error) { newPtrs := make(map[BlockPointer]bool) var ptrs []BlockPointer for ptr, chain := range ccs.byMostRecent { newPtrs[ptr] = true // We only care about the paths for ptrs that are directly // affected by operations and were live through the entire // unmerged branch, or, if includeCreates was set, was created // and not deleted in the unmerged branch. if len(chain.ops) > 0 && (includeCreates || !ccs.isCreated(chain.original)) && !ccs.isDeleted(chain.original) { ptrs = append(ptrs, ptr) } } pathMap, err := blocks.SearchForPaths(ctx, nodeCache, ptrs, newPtrs, ccs.mostRecentMD.ReadOnly()) if err != nil { return nil, err } paths := make([]path, 0, len(pathMap)) for ptr, p := range pathMap { if len(p.path) == 0 { log.CDebugf(ctx, "Ignoring pointer with no found path: %v", ptr) ccs.removeChain(ptr) continue } paths = append(paths, p) // update the unmerged final paths chain, ok := ccs.byMostRecent[ptr] if !ok { log.CErrorf(ctx, "Couldn't find chain for found path: %v", ptr) continue } for _, op := range chain.ops { op.setFinalPath(p) } } // Order by descending path length. sort.Sort(crSortedPaths(paths)) return paths, nil }
1
12,719
Might be a good idea to rename the variable so it doesn't shadow the type.
keybase-kbfs
go
@@ -14,7 +14,13 @@ class Tag extends BaseItem { } static async noteIds(tagId) { - const rows = await this.db().selectAll('SELECT note_id FROM note_tags WHERE tag_id = ?', [tagId]); + // Get NoteIds of that are tagged with current tag or its descendants + const rows = await this.db().selectAll(`WITH RECURSIVE + parent_of(id, child_id) AS + (SELECT id, id FROM tags where id=? + UNION ALL + SELECT parent_of.id, tags2.id FROM parent_of JOIN tags AS tags2 ON parent_of.child_id=tags2.parent_id) + SELECT note_id FROM note_tags WHERE tag_id IN (SELECT child_id from parent_of)`, [tagId]); const output = []; for (let i = 0; i < rows.length; i++) { output.push(rows[i].note_id);
1
const BaseModel = require('lib/BaseModel.js'); const BaseItem = require('lib/models/BaseItem.js'); const NoteTag = require('lib/models/NoteTag.js'); const Note = require('lib/models/Note.js'); const { _ } = require('lib/locale'); class Tag extends BaseItem { static tableName() { return 'tags'; } static modelType() { return BaseModel.TYPE_TAG; } static async noteIds(tagId) { const rows = await this.db().selectAll('SELECT note_id FROM note_tags WHERE tag_id = ?', [tagId]); const output = []; for (let i = 0; i < rows.length; i++) { output.push(rows[i].note_id); } return output; } static async notes(tagId, options = null) { if (options === null) options = {}; const noteIds = await this.noteIds(tagId); if (!noteIds.length) return []; return Note.previews( null, Object.assign({}, options, { conditions: [`id IN ("${noteIds.join('","')}")`], }) ); } // Untag all the notes and delete tag static async untagAll(tagId) { const noteTags = await NoteTag.modelSelectAll('SELECT id FROM note_tags WHERE tag_id = ?', [tagId]); for (let i = 0; i < noteTags.length; i++) { await NoteTag.delete(noteTags[i].id); } await Tag.delete(tagId); } static async delete(id, options = null) { if (!options) options = {}; await super.delete(id, options); this.dispatch({ type: 'TAG_DELETE', id: id, }); } static async addNote(tagId, noteId) { const hasIt = await this.hasNote(tagId, noteId); if (hasIt) return; const output = await NoteTag.save({ tag_id: tagId, note_id: noteId, }); this.dispatch({ type: 'TAG_UPDATE_ONE', item: await Tag.loadWithCount(tagId), }); return output; } static async removeNote(tagId, noteId) { const noteTags = await NoteTag.modelSelectAll('SELECT id FROM note_tags WHERE tag_id = ? and note_id = ?', [tagId, noteId]); for (let i = 0; i < noteTags.length; i++) { await NoteTag.delete(noteTags[i].id); } this.dispatch({ type: 'NOTE_TAG_REMOVE', item: await Tag.load(tagId), }); } static loadWithCount(tagId) { const sql = 'SELECT * FROM tags_with_note_count WHERE id = ?'; return this.modelSelectOne(sql, [tagId]); } static async hasNote(tagId, noteId) { const r = await this.db().selectOne('SELECT note_id FROM note_tags WHERE tag_id = ? AND note_id = ? LIMIT 1', [tagId, noteId]); return !!r; } static async allWithNotes() { return await Tag.modelSelectAll('SELECT * FROM tags_with_note_count'); } static async searchAllWithNotes(options) { if (!options) options = {}; if (!options.conditions) options.conditions = []; options.conditions.push('id IN (SELECT distinct id FROM tags_with_note_count)'); return this.search(options); } static async tagsByNoteId(noteId) { const tagIds = await NoteTag.tagIdsByNoteId(noteId); return this.modelSelectAll(`SELECT * FROM tags WHERE id IN ("${tagIds.join('","')}")`); } static async commonTagsByNoteIds(noteIds) { if (!noteIds || noteIds.length === 0) { return []; } let commonTagIds = await NoteTag.tagIdsByNoteId(noteIds[0]); for (let i = 1; i < noteIds.length; i++) { const tagIds = await NoteTag.tagIdsByNoteId(noteIds[i]); commonTagIds = commonTagIds.filter(value => tagIds.includes(value)); if (commonTagIds.length === 0) { break; } } return this.modelSelectAll(`SELECT * FROM tags WHERE id IN ("${commonTagIds.join('","')}")`); } static async loadByTitle(title) { return this.loadByField('title', title, { caseInsensitive: true }); } static async addNoteTagByTitle(noteId, tagTitle) { let tag = await this.loadByTitle(tagTitle); if (!tag) tag = await Tag.save({ title: tagTitle }, { userSideValidation: true }); return await this.addNote(tag.id, noteId); } static async setNoteTagsByTitles(noteId, tagTitles) { const previousTags = await this.tagsByNoteId(noteId); const addedTitles = []; for (let i = 0; i < tagTitles.length; i++) { const title = tagTitles[i].trim().toLowerCase(); if (!title) continue; let tag = await this.loadByTitle(title); if (!tag) tag = await Tag.save({ title: title }, { userSideValidation: true }); await this.addNote(tag.id, noteId); addedTitles.push(title); } for (let i = 0; i < previousTags.length; i++) { if (addedTitles.indexOf(previousTags[i].title.toLowerCase()) < 0) { await this.removeNote(previousTags[i].id, noteId); } } } static async setNoteTagsByIds(noteId, tagIds) { const previousTags = await this.tagsByNoteId(noteId); const addedIds = []; for (let i = 0; i < tagIds.length; i++) { const tagId = tagIds[i]; await this.addNote(tagId, noteId); addedIds.push(tagId); } for (let i = 0; i < previousTags.length; i++) { if (addedIds.indexOf(previousTags[i].id) < 0) { await this.removeNote(previousTags[i].id, noteId); } } } static async save(o, options = null) { if (options && options.userSideValidation) { if ('title' in o) { o.title = o.title.trim().toLowerCase(); const existingTag = await Tag.loadByTitle(o.title); if (existingTag && existingTag.id !== o.id) throw new Error(_('The tag "%s" already exists. Please choose a different name.', o.title)); } } return super.save(o, options).then(tag => { this.dispatch({ type: 'TAG_UPDATE_ONE', item: tag, }); return tag; }); } } module.exports = Tag;
1
12,215
Again I'd prefer if this is done in JavaScript rather than in SQL.
laurent22-joplin
js
@@ -1848,7 +1848,7 @@ function fullyResolveKeys(obj) { * If there are no listeners registered with the flow, the error will be * rethrown to the global error handler. * - * Refer to the {@link ./promise} module documentation fora detailed + * Refer to the {@link ./promise} module documentation for a detailed * explanation of how the ControlFlow coordinates task execution. * * @final
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. /** * @fileoverview * The promise module is centered around the {@linkplain ControlFlow}, a class * that coordinates the execution of asynchronous tasks. The ControlFlow allows * users to focus on the imperative commands for their script without worrying * about chaining together every single asynchronous action, which can be * tedious and verbose. APIs may be layered on top of the control flow to read * as if they were synchronous. For instance, the core * {@linkplain ./webdriver.WebDriver WebDriver} API is built on top of the * control flow, allowing users to write * * driver.get('http://www.google.com/ncr'); * driver.findElement({name: 'q'}).sendKeys('webdriver'); * driver.findElement({name: 'btnGn'}).click(); * * instead of * * driver.get('http://www.google.com/ncr') * .then(function() { * return driver.findElement({name: 'q'}); * }) * .then(function(q) { * return q.sendKeys('webdriver'); * }) * .then(function() { * return driver.findElement({name: 'btnG'}); * }) * .then(function(btnG) { * return btnG.click(); * }); * * ## Tasks and Task Queues * * The control flow is based on the concept of tasks and task queues. Tasks are * functions that define the basic unit of work for the control flow to execute. * Each task is scheduled via {@link ControlFlow#execute()}, which will return * a {@link ManagedPromise ManagedPromise} that will be resolved with the task's * result. * * A task queue contains all of the tasks scheduled within a single turn of the * [JavaScript event loop][JSEL]. The control flow will create a new task queue * the first time a task is scheduled within an event loop. * * var flow = promise.controlFlow(); * flow.execute(foo); // Creates a new task queue and inserts foo. * flow.execute(bar); // Inserts bar into the same queue as foo. * setTimeout(function() { * flow.execute(baz); // Creates a new task queue and inserts baz. * }, 0); * * Whenever the control flow creates a new task queue, it will automatically * begin executing tasks in the next available turn of the event loop. This * execution is scheduled using a "micro-task" timer, such as a (native) * `ManagedPromise.then()` callback. * * setTimeout(() => console.log('a')); * ManagedPromise.resolve().then(() => console.log('b')); // A native promise. * flow.execute(() => console.log('c')); * ManagedPromise.resolve().then(() => console.log('d')); * setTimeout(() => console.log('fin')); * // b * // c * // d * // a * // fin * * In the example above, b/c/d is logged before a/fin because native promises * and this module use "micro-task" timers, which have a higher priority than * "macro-tasks" like `setTimeout`. * * ## Task Execution * * Upon creating a task queue, and whenever an exisiting queue completes a task, * the control flow will schedule a micro-task timer to process any scheduled * tasks. This ensures no task is ever started within the same turn of the * JavaScript event loop in which it was scheduled, nor is a task ever started * within the same turn that another finishes. * * When the execution timer fires, a single task will be dequeued and executed. * There are several important events that may occur while executing a task * function: * * 1. A new task queue is created by a call to {@link ControlFlow#execute()}. * Any tasks scheduled within this task queue are considered subtasks of the * current task. * 2. The task function throws an error. Any scheduled tasks are immediately * discarded and the task's promised result (previously returned by * {@link ControlFlow#execute()}) is immediately rejected with the thrown * error. * 3. The task function returns sucessfully. * * If a task function created a new task queue, the control flow will wait for * that queue to complete before processing the task result. If the queue * completes without error, the flow will settle the task's promise with the * value originaly returned by the task function. On the other hand, if the task * queue termintes with an error, the task's promise will be rejected with that * error. * * flow.execute(function() { * flow.execute(() => console.log('a')); * flow.execute(() => console.log('b')); * }); * flow.execute(() => console.log('c')); * // a * // b * // c * * ## ManagedPromise Integration * * In addition to the {@link ControlFlow} class, the promise module also exports * a [ManagedPromise/A+] {@linkplain ManagedPromise implementation} that is deeply * integrated with the ControlFlow. First and foremost, each promise * {@linkplain ManagedPromise#then() callback} is scheduled with the * control flow as a task. As a result, each callback is invoked in its own turn * of the JavaScript event loop with its own task queue. If any tasks are * scheduled within a callback, the callback's promised result will not be * settled until the task queue has completed. * * promise.fulfilled().then(function() { * flow.execute(function() { * console.log('b'); * }); * }).then(() => console.log('a')); * // b * // a * * ### Scheduling ManagedPromise Callbacks <a id="scheduling_callbacks"></a> * * How callbacks are scheduled in the control flow depends on when they are * attached to the promise. Callbacks attached to a _previously_ resolved * promise are immediately enqueued as subtasks of the currently running task. * * var p = promise.fulfilled(); * flow.execute(function() { * flow.execute(() => console.log('A')); * p.then( () => console.log('B')); * flow.execute(() => console.log('C')); * p.then( () => console.log('D')); * }).then(function() { * console.log('fin'); * }); * // A * // B * // C * // D * // fin * * When a promise is resolved while a task function is on the call stack, any * callbacks also registered in that stack frame are scheduled as if the promise * were already resolved: * * var d = promise.defer(); * flow.execute(function() { * flow.execute( () => console.log('A')); * d.promise.then(() => console.log('B')); * flow.execute( () => console.log('C')); * d.promise.then(() => console.log('D')); * * d.fulfill(); * }).then(function() { * console.log('fin'); * }); * // A * // B * // C * // D * // fin * * Callbacks attached to an _unresolved_ promise within a task function are * only weakly scheduled as subtasks and will be dropped if they reach the * front of the queue before the promise is resolved. In the example below, the * callbacks for `B` & `D` are dropped as sub-tasks since they are attached to * an unresolved promise when they reach the front of the task queue. * * var d = promise.defer(); * flow.execute(function() { * flow.execute( () => console.log('A')); * d.promise.then(() => console.log('B')); * flow.execute( () => console.log('C')); * d.promise.then(() => console.log('D')); * * setTimeout(d.fulfill, 20); * }).then(function() { * console.log('fin') * }); * // A * // C * // fin * // B * // D * * If a promise is resolved while a task function is on the call stack, any * previously registered and unqueued callbacks (i.e. either attached while no * task was on the call stack, or previously dropped as described above) act as * _interrupts_ and are inserted at the front of the task queue. If multiple * promises are fulfilled, their interrupts are enqueued in the order the * promises are resolved. * * var d1 = promise.defer(); * d1.promise.then(() => console.log('A')); * * var d2 = promise.defer(); * d2.promise.then(() => console.log('B')); * * flow.execute(function() { * d1.promise.then(() => console.log('C')); * flow.execute(() => console.log('D')); * }); * flow.execute(function() { * flow.execute(() => console.log('E')); * flow.execute(() => console.log('F')); * d1.fulfill(); * d2.fulfill(); * }).then(function() { * console.log('fin'); * }); * // D * // A * // C * // B * // E * // F * // fin * * Within a task function (or callback), each step of a promise chain acts as * an interrupt on the task queue: * * var d = promise.defer(); * flow.execute(function() { * d.promise. * then(() => console.log('A')). * then(() => console.log('B')). * then(() => console.log('C')). * then(() => console.log('D')); * * flow.execute(() => console.log('E')); * d.fulfill(); * }).then(function() { * console.log('fin'); * }); * // A * // B * // C * // D * // E * // fin * * If there are multiple promise chains derived from a single promise, they are * processed in the order created: * * var d = promise.defer(); * flow.execute(function() { * var chain = d.promise.then(() => console.log('A')); * * chain.then(() => console.log('B')). * then(() => console.log('C')); * * chain.then(() => console.log('D')). * then(() => console.log('E')); * * flow.execute(() => console.log('F')); * * d.fulfill(); * }).then(function() { * console.log('fin'); * }); * // A * // B * // C * // D * // E * // F * // fin * * Even though a subtask's promised result will never resolve while the task * function is on the stack, it will be treated as a promise resolved within the * task. In all other scenarios, a task's promise behaves just like a normal * promise. In the sample below, `C/D` is loggged before `B` because the * resolution of `subtask1` interrupts the flow of the enclosing task. Within * the final subtask, `E/F` is logged in order because `subtask1` is a resolved * promise when that task runs. * * flow.execute(function() { * var subtask1 = flow.execute(() => console.log('A')); * var subtask2 = flow.execute(() => console.log('B')); * * subtask1.then(() => console.log('C')); * subtask1.then(() => console.log('D')); * * flow.execute(function() { * flow.execute(() => console.log('E')); * subtask1.then(() => console.log('F')); * }); * }).then(function() { * console.log('fin'); * }); * // A * // C * // D * // B * // E * // F * // fin * * Finally, consider the following: * * var d = promise.defer(); * d.promise.then(() => console.log('A')); * d.promise.then(() => console.log('B')); * * flow.execute(function() { * flow.execute( () => console.log('C')); * d.promise.then(() => console.log('D')); * * flow.execute( () => console.log('E')); * d.promise.then(() => console.log('F')); * * d.fulfill(); * * flow.execute( () => console.log('G')); * d.promise.then(() => console.log('H')); * }).then(function() { * console.log('fin'); * }); * // A * // B * // C * // D * // E * // F * // G * // H * // fin * * In this example, callbacks are registered on `d.promise` both before and * during the invocation of the task function. When `d.fulfill()` is called, * the callbacks registered before the task (`A` & `B`) are registered as * interrupts. The remaining callbacks were all attached within the task and * are scheduled in the flow as standard tasks. * * ## Generator Support * * [Generators][GF] may be scheduled as tasks within a control flow or attached * as callbacks to a promise. Each time the generator yields a promise, the * control flow will wait for that promise to settle before executing the next * iteration of the generator. The yielded promise's fulfilled value will be * passed back into the generator: * * flow.execute(function* () { * var d = promise.defer(); * * setTimeout(() => console.log('...waiting...'), 25); * setTimeout(() => d.fulfill(123), 50); * * console.log('start: ' + Date.now()); * * var value = yield d.promise; * console.log('mid: %d; value = %d', Date.now(), value); * * yield promise.delayed(10); * console.log('end: ' + Date.now()); * }).then(function() { * console.log('fin'); * }); * // start: 0 * // ...waiting... * // mid: 50; value = 123 * // end: 60 * // fin * * Yielding the result of a promise chain will wait for the entire chain to * complete: * * promise.fulfilled().then(function* () { * console.log('start: ' + Date.now()); * * var value = yield flow. * execute(() => console.log('A')). * then( () => console.log('B')). * then( () => 123); * * console.log('mid: %s; value = %d', Date.now(), value); * * yield flow.execute(() => console.log('C')); * }).then(function() { * console.log('fin'); * }); * // start: 0 * // A * // B * // mid: 2; value = 123 * // C * // fin * * Yielding a _rejected_ promise will cause the rejected value to be thrown * within the generator function: * * flow.execute(function* () { * console.log('start: ' + Date.now()); * try { * yield promise.delayed(10).then(function() { * throw Error('boom'); * }); * } catch (ex) { * console.log('caught time: ' + Date.now()); * console.log(ex.message); * } * }); * // start: 0 * // caught time: 10 * // boom * * # Error Handling * * ES6 promises do not require users to handle a promise rejections. This can * result in subtle bugs as the rejections are silently "swallowed" by the * ManagedPromise class. * * ManagedPromise.reject(Error('boom')); * // ... *crickets* ... * * Selenium's promise module, on the other hand, requires that every rejection * be explicitly handled. When a {@linkplain ManagedPromise ManagedPromise} is * rejected and no callbacks are defined on that promise, it is considered an * _unhandled rejection_ and reproted to the active task queue. If the rejection * remains unhandled after a single turn of the [event loop][JSEL] (scheduled * with a micro-task), it will propagate up the stack. * * ## Error Propagation * * If an unhandled rejection occurs within a task function, that task's promised * result is rejected and all remaining subtasks are discarded: * * flow.execute(function() { * // No callbacks registered on promise -> unhandled rejection * promise.rejected(Error('boom')); * flow.execute(function() { console.log('this will never run'); }); * }).catch(function(e) { * console.log(e.message); * }); * // boom * * The promised results for discarded tasks are silently rejected with a * cancellation error and existing callback chains will never fire. * * flow.execute(function() { * promise.rejected(Error('boom')); * flow.execute(function() { console.log('a'); }). * then(function() { console.log('b'); }); * }).catch(function(e) { * console.log(e.message); * }); * // boom * * An unhandled rejection takes precedence over a task function's returned * result, even if that value is another promise: * * flow.execute(function() { * promise.rejected(Error('boom')); * return flow.execute(someOtherTask); * }).catch(function(e) { * console.log(e.message); * }); * // boom * * If there are multiple unhandled rejections within a task, they are packaged * in a {@link MultipleUnhandledRejectionError}, which has an `errors` property * that is a `Set` of the recorded unhandled rejections: * * flow.execute(function() { * promise.rejected(Error('boom1')); * promise.rejected(Error('boom2')); * }).catch(function(ex) { * console.log(ex instanceof MultipleUnhandledRejectionError); * for (var e of ex.errors) { * console.log(e.message); * } * }); * // boom1 * // boom2 * * When a subtask is discarded due to an unreported rejection in its parent * frame, the existing callbacks on that task will never settle and the * callbacks will not be invoked. If a new callback is attached ot the subtask * _after_ it has been discarded, it is handled the same as adding a callback * to a cancelled promise: the error-callback path is invoked. This behavior is * intended to handle cases where the user saves a reference to a task promise, * as illustrated below. * * var subTask; * flow.execute(function() { * promise.rejected(Error('boom')); * subTask = flow.execute(function() {}); * }).catch(function(e) { * console.log(e.message); * }).then(function() { * return subTask.then( * () => console.log('subtask success!'), * (e) => console.log('subtask failed:\n' + e)); * }); * // boom * // subtask failed: * // DiscardedTaskError: Task was discarded due to a previous failure: boom * * When a subtask fails, its promised result is treated the same as any other * promise: it must be handled within one turn of the rejection or the unhandled * rejection is propagated to the parent task. This means users can catch errors * from complex flows from the top level task: * * flow.execute(function() { * flow.execute(function() { * flow.execute(function() { * throw Error('fail!'); * }); * }); * }).catch(function(e) { * console.log(e.message); * }); * // fail! * * ## Unhandled Rejection Events * * When an unhandled rejection propagates to the root of the control flow, the * flow will emit an __uncaughtException__ event. If no listeners are registered * on the flow, the error will be rethrown to the global error handler: an * __uncaughtException__ event from the * [`process`](https://nodejs.org/api/process.html) object in node, or * `window.onerror` when running in a browser. * * Bottom line: you __*must*__ handle rejected promises. * * # ManagedPromise/A+ Compatibility * * This `promise` module is compliant with the [ManagedPromise/A+][] specification * except for sections `2.2.6.1` and `2.2.6.2`: * * > * > - `then` may be called multiple times on the same promise. * > - If/when `promise` is fulfilled, all respective `onFulfilled` callbacks * > must execute in the order of their originating calls to `then`. * > - If/when `promise` is rejected, all respective `onRejected` callbacks * > must execute in the order of their originating calls to `then`. * > * * Specifically, the conformance tests contains the following scenario (for * brevity, only the fulfillment version is shown): * * var p1 = ManagedPromise.resolve(); * p1.then(function() { * console.log('A'); * p1.then(() => console.log('B')); * }); * p1.then(() => console.log('C')); * // A * // C * // B * * Since the [ControlFlow](#scheduling_callbacks) executes promise callbacks as * tasks, with this module, the result would be * * var p2 = promise.fulfilled(); * p2.then(function() { * console.log('A'); * p2.then(() => console.log('B'); * }); * p2.then(() => console.log('C')); * // A * // B * // C * * [JSEL]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/EventLoop * [GF]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/function* * [ManagedPromise/A+]: https://promisesaplus.com/ */ 'use strict'; const events = require('./events'); const logging = require('./logging'); /** * Alias to help with readability and differentiate types. * @const */ const NativePromise = Promise; /** * Whether to append traces of `then` to rejection errors. * @type {boolean} */ var LONG_STACK_TRACES = false; // TODO: this should not be CONSTANT_CASE /** @const */ const LOG = logging.getLogger('promise'); const UNIQUE_IDS = new WeakMap; let nextId = 1; function getUid(obj) { let id = UNIQUE_IDS.get(obj); if (!id) { id = nextId; nextId += 1; UNIQUE_IDS.set(obj, id); } return id; } /** * Runs the given function after a micro-task yield. * @param {function()} fn The function to run. */ function asyncRun(fn) { NativePromise.resolve().then(function() { try { fn(); } catch (ignored) { // Do nothing. } }); } /** * @param {number} level What level of verbosity to log with. * @param {(string|function(this: T): string)} loggable The message to log. * @param {T=} opt_self The object in whose context to run the loggable * function. * @template T */ function vlog(level, loggable, opt_self) { var logLevel = logging.Level.FINE; if (level > 1) { logLevel = logging.Level.FINEST; } else if (level > 0) { logLevel = logging.Level.FINER; } if (typeof loggable === 'function') { loggable = loggable.bind(opt_self); } LOG.log(logLevel, loggable); } /** * Generates an error to capture the current stack trace. * @param {string} name Error name for this stack trace. * @param {string} msg Message to record. * @param {Function=} opt_topFn The function that should appear at the top of * the stack; only applicable in V8. * @return {!Error} The generated error. */ function captureStackTrace(name, msg, opt_topFn) { var e = Error(msg); e.name = name; if (Error.captureStackTrace) { Error.captureStackTrace(e, opt_topFn); } else { var stack = Error().stack; if (stack) { e.stack = e.toString(); e.stack += '\n' + stack; } } return e; } /** * Error used when the computation of a promise is cancelled. */ class CancellationError extends Error { /** * @param {string=} opt_msg The cancellation message. */ constructor(opt_msg) { super(opt_msg); /** @override */ this.name = this.constructor.name; /** @private {boolean} */ this.silent_ = false; } /** * Wraps the given error in a CancellationError. * * @param {*} error The error to wrap. * @param {string=} opt_msg The prefix message to use. * @return {!CancellationError} A cancellation error. */ static wrap(error, opt_msg) { var message; if (error instanceof CancellationError) { return new CancellationError( opt_msg ? (opt_msg + ': ' + error.message) : error.message); } else if (opt_msg) { message = opt_msg; if (error) { message += ': ' + error; } return new CancellationError(message); } if (error) { message = error + ''; } return new CancellationError(message); } } /** * Error used to cancel tasks when a control flow is reset. * @final */ class FlowResetError extends CancellationError { constructor() { super('ControlFlow was reset'); this.silent_ = true; } } /** * Error used to cancel tasks that have been discarded due to an uncaught error * reported earlier in the control flow. * @final */ class DiscardedTaskError extends CancellationError { /** @param {*} error The original error. */ constructor(error) { if (error instanceof DiscardedTaskError) { return /** @type {!DiscardedTaskError} */(error); } var msg = ''; if (error) { msg = ': ' + ( typeof error.message === 'string' ? error.message : error); } super('Task was discarded due to a previous failure' + msg); this.silent_ = true; } } /** * Error used when there are multiple unhandled promise rejections detected * within a task or callback. * * @final */ class MultipleUnhandledRejectionError extends Error { /** * @param {!(Set<*>)} errors The errors to report. */ constructor(errors) { super('Multiple unhandled promise rejections reported'); /** @override */ this.name = this.constructor.name; /** @type {!Set<*>} */ this.errors = errors; } } /** * Property used to flag constructor's as implementing the Thenable interface * for runtime type checking. * @const */ const IMPLEMENTED_BY_SYMBOL = Symbol('promise.Thenable'); /** * Thenable is a promise-like object with a {@code then} method which may be * used to schedule callbacks on a promised value. * * @interface * @extends {IThenable<T>} * @template T */ class Thenable { /** * Adds a property to a class prototype to allow runtime checks of whether * instances of that class implement the Thenable interface. This function * will also ensure the prototype's {@code then} function is exported from * compiled code. * @param {function(new: Thenable, ...?)} ctor The * constructor whose prototype to modify. */ static addImplementation(ctor) { ctor.prototype['then'] = ctor.prototype.then; try { ctor.prototype[IMPLEMENTED_BY_SYMBOL] = true; } catch (ignored) { // Property access denied? } } /** * Checks if an object has been tagged for implementing the Thenable * interface as defined by {@link Thenable.addImplementation}. * @param {*} object The object to test. * @return {boolean} Whether the object is an implementation of the Thenable * interface. */ static isImplementation(object) { if (!object) { return false; } try { return !!object[IMPLEMENTED_BY_SYMBOL]; } catch (e) { return false; // Property access seems to be forbidden. } } /** * Cancels the computation of this promise's value, rejecting the promise in * the process. This method is a no-op if the promise has already been * resolved. * * @param {(string|Error)=} opt_reason The reason this promise is being * cancelled. This value will be wrapped in a {@link CancellationError}. */ cancel(opt_reason) {} /** @return {boolean} Whether this promise's value is still being computed. */ isPending() {} /** * Registers listeners for when this instance is resolved. * * @param {?(function(T): (R|IThenable<R>))=} opt_callback The * function to call if this promise is successfully resolved. The function * should expect a single argument: the promise's resolved value. * @param {?(function(*): (R|IThenable<R>))=} opt_errback * The function to call if this promise is rejected. The function should * expect a single argument: the rejection reason. * @return {!ManagedPromise<R>} A new promise which will be * resolved with the result of the invoked callback. * @template R */ then(opt_callback, opt_errback) {} /** * Registers a listener for when this promise is rejected. This is synonymous * with the {@code catch} clause in a synchronous API: * * // Synchronous API: * try { * doSynchronousWork(); * } catch (ex) { * console.error(ex); * } * * // Asynchronous promise API: * doAsynchronousWork().catch(function(ex) { * console.error(ex); * }); * * @param {function(*): (R|IThenable<R>)} errback The * function to call if this promise is rejected. The function should * expect a single argument: the rejection reason. * @return {!ManagedPromise<R>} A new promise which will be * resolved with the result of the invoked callback. * @template R */ catch(errback) {} /** * Registers a listener to invoke when this promise is resolved, regardless * of whether the promise's value was successfully computed. This function * is synonymous with the {@code finally} clause in a synchronous API: * * // Synchronous API: * try { * doSynchronousWork(); * } finally { * cleanUp(); * } * * // Asynchronous promise API: * doAsynchronousWork().finally(cleanUp); * * __Note:__ similar to the {@code finally} clause, if the registered * callback returns a rejected promise or throws an error, it will silently * replace the rejection error (if any) from this promise: * * try { * throw Error('one'); * } finally { * throw Error('two'); // Hides Error: one * } * * promise.rejected(Error('one')) * .finally(function() { * throw Error('two'); // Hides Error: one * }); * * @param {function(): (R|IThenable<R>)} callback The function to call when * this promise is resolved. * @return {!ManagedPromise<R>} A promise that will be fulfilled * with the callback result. * @template R */ finally(callback) {} } /** * @enum {string} */ const PromiseState = { PENDING: 'pending', BLOCKED: 'blocked', REJECTED: 'rejected', FULFILLED: 'fulfilled' }; /** * Internal map used to store cancellation handlers for {@link ManagedPromise} * objects. This is an internal implementation detail used by the * {@link TaskQueue} class to monitor for when a promise is cancelled without * generating an extra promise via then(). * * @const {!WeakMap<!ManagedPromise, function(!CancellationError)>} */ const ON_CANCEL_HANDLER = new WeakMap; /** * Represents the eventual value of a completed operation. Each promise may be * in one of three states: pending, fulfilled, or rejected. Each promise starts * in the pending state and may make a single transition to either a * fulfilled or rejected state, at which point the promise is considered * resolved. * * @implements {Thenable<T>} * @template T * @see http://promises-aplus.github.io/promises-spec/ */ class ManagedPromise { /** * @param {function( * function((T|IThenable<T>|Thenable)=), * function(*=))} resolver * Function that is invoked immediately to begin computation of this * promise's value. The function should accept a pair of callback * functions, one for fulfilling the promise and another for rejecting it. * @param {ControlFlow=} opt_flow The control flow * this instance was created under. Defaults to the currently active flow. */ constructor(resolver, opt_flow) { getUid(this); /** @private {!ControlFlow} */ this.flow_ = opt_flow || controlFlow(); /** @private {Error} */ this.stack_ = null; if (LONG_STACK_TRACES) { this.stack_ = captureStackTrace('ManagedPromise', 'new', this.constructor); } /** @private {Thenable<?>} */ this.parent_ = null; /** @private {Array<!Task>} */ this.callbacks_ = null; /** @private {PromiseState} */ this.state_ = PromiseState.PENDING; /** @private {boolean} */ this.handled_ = false; /** @private {*} */ this.value_ = undefined; /** @private {TaskQueue} */ this.queue_ = null; try { var self = this; resolver(function(value) { self.resolve_(PromiseState.FULFILLED, value); }, function(reason) { self.resolve_(PromiseState.REJECTED, reason); }); } catch (ex) { this.resolve_(PromiseState.REJECTED, ex); } } /** @override */ toString() { return 'ManagedPromise::' + getUid(this) + ' {[[PromiseStatus]]: "' + this.state_ + '"}'; } /** * Resolves this promise. If the new value is itself a promise, this function * will wait for it to be resolved before notifying the registered listeners. * @param {PromiseState} newState The promise's new state. * @param {*} newValue The promise's new value. * @throws {TypeError} If {@code newValue === this}. * @private */ resolve_(newState, newValue) { if (PromiseState.PENDING !== this.state_) { return; } if (newValue === this) { // See promise a+, 2.3.1 // http://promises-aplus.github.io/promises-spec/#point-48 newValue = new TypeError('A promise may not resolve to itself'); newState = PromiseState.REJECTED; } this.parent_ = null; this.state_ = PromiseState.BLOCKED; if (newState !== PromiseState.REJECTED) { if (Thenable.isImplementation(newValue)) { // 2.3.2 newValue = /** @type {!Thenable} */(newValue); this.parent_ = newValue; newValue.then( this.unblockAndResolve_.bind(this, PromiseState.FULFILLED), this.unblockAndResolve_.bind(this, PromiseState.REJECTED)); return; } else if (newValue && (typeof newValue === 'object' || typeof newValue === 'function')) { // 2.3.3 try { // 2.3.3.1 var then = newValue['then']; } catch (e) { // 2.3.3.2 this.state_ = PromiseState.REJECTED; this.value_ = e; this.scheduleNotifications_(); return; } if (typeof then === 'function') { // 2.3.3.3 this.invokeThen_(/** @type {!Object} */(newValue), then); return; } } } if (newState === PromiseState.REJECTED && isError(newValue) && newValue.stack && this.stack_) { newValue.stack += '\nFrom: ' + (this.stack_.stack || this.stack_); } // 2.3.3.4 and 2.3.4 this.state_ = newState; this.value_ = newValue; this.scheduleNotifications_(); } /** * Invokes a thenable's "then" method according to 2.3.3.3 of the promise * A+ spec. * @param {!Object} x The thenable object. * @param {!Function} then The "then" function to invoke. * @private */ invokeThen_(x, then) { var called = false; var self = this; var resolvePromise = function(value) { if (!called) { // 2.3.3.3.3 called = true; // 2.3.3.3.1 self.unblockAndResolve_(PromiseState.FULFILLED, value); } }; var rejectPromise = function(reason) { if (!called) { // 2.3.3.3.3 called = true; // 2.3.3.3.2 self.unblockAndResolve_(PromiseState.REJECTED, reason); } }; try { // 2.3.3.3 then.call(x, resolvePromise, rejectPromise); } catch (e) { // 2.3.3.3.4.2 rejectPromise(e); } } /** * @param {PromiseState} newState The promise's new state. * @param {*} newValue The promise's new value. * @private */ unblockAndResolve_(newState, newValue) { if (this.state_ === PromiseState.BLOCKED) { this.state_ = PromiseState.PENDING; this.resolve_(newState, newValue); } } /** * @private */ scheduleNotifications_() { vlog(2, () => this + ' scheduling notifications', this); ON_CANCEL_HANDLER.delete(this); if (this.value_ instanceof CancellationError && this.value_.silent_) { this.callbacks_ = null; } if (!this.queue_) { this.queue_ = this.flow_.getActiveQueue_(); } if (!this.handled_ && this.state_ === PromiseState.REJECTED && !(this.value_ instanceof CancellationError)) { this.queue_.addUnhandledRejection(this); } this.queue_.scheduleCallbacks(this); } /** @override */ cancel(opt_reason) { if (!canCancel(this)) { return; } if (this.parent_ && canCancel(this.parent_)) { this.parent_.cancel(opt_reason); } else { var reason = CancellationError.wrap(opt_reason); let onCancel = ON_CANCEL_HANDLER.get(this); if (onCancel) { onCancel(reason); ON_CANCEL_HANDLER.delete(this); } if (this.state_ === PromiseState.BLOCKED) { this.unblockAndResolve_(PromiseState.REJECTED, reason); } else { this.resolve_(PromiseState.REJECTED, reason); } } function canCancel(promise) { if (!(promise instanceof ManagedPromise)) { return Thenable.isImplementation(promise); } return promise.state_ === PromiseState.PENDING || promise.state_ === PromiseState.BLOCKED; } } /** @override */ isPending() { return this.state_ === PromiseState.PENDING; } /** @override */ then(opt_callback, opt_errback) { return this.addCallback_( opt_callback, opt_errback, 'then', ManagedPromise.prototype.then); } /** @override */ catch(errback) { return this.addCallback_( null, errback, 'catch', ManagedPromise.prototype.catch); } /** @override */ finally(callback) { var error; var mustThrow = false; return this.then(function() { return callback(); }, function(err) { error = err; mustThrow = true; return callback(); }).then(function() { if (mustThrow) { throw error; } }); } /** * Registers a new callback with this promise * @param {(function(T): (R|IThenable<R>)|null|undefined)} callback The * fulfillment callback. * @param {(function(*): (R|IThenable<R>)|null|undefined)} errback The * rejection callback. * @param {string} name The callback name. * @param {!Function} fn The function to use as the top of the stack when * recording the callback's creation point. * @return {!ManagedPromise<R>} A new promise which will be resolved with the * esult of the invoked callback. * @template R * @private */ addCallback_(callback, errback, name, fn) { if (typeof callback !== 'function' && typeof errback !== 'function') { return this; } this.handled_ = true; if (this.queue_) { this.queue_.clearUnhandledRejection(this); } var cb = new Task( this.flow_, this.invokeCallback_.bind(this, callback, errback), name, LONG_STACK_TRACES ? {name: 'Promise', top: fn} : undefined); cb.promise.parent_ = this; if (this.state_ !== PromiseState.PENDING && this.state_ !== PromiseState.BLOCKED) { this.flow_.getActiveQueue_().enqueue(cb); } else { if (!this.callbacks_) { this.callbacks_ = []; } this.callbacks_.push(cb); cb.blocked = true; this.flow_.getActiveQueue_().enqueue(cb); } return cb.promise; } /** * Invokes a callback function attached to this promise. * @param {(function(T): (R|IThenable<R>)|null|undefined)} callback The * fulfillment callback. * @param {(function(*): (R|IThenable<R>)|null|undefined)} errback The * rejection callback. * @template R * @private */ invokeCallback_(callback, errback) { var callbackFn = callback; if (this.state_ === PromiseState.REJECTED) { callbackFn = errback; } if (typeof callbackFn === 'function') { if (isGenerator(callbackFn)) { return consume(callbackFn, null, this.value_); } return callbackFn(this.value_); } else if (this.state_ === PromiseState.REJECTED) { throw this.value_; } else { return this.value_; } } } Thenable.addImplementation(ManagedPromise); /** * Represents a value that will be resolved at some point in the future. This * class represents the protected "producer" half of a ManagedPromise - each Deferred * has a {@code promise} property that may be returned to consumers for * registering callbacks, reserving the ability to resolve the deferred to the * producer. * * If this Deferred is rejected and there are no listeners registered before * the next turn of the event loop, the rejection will be passed to the * {@link ControlFlow} as an unhandled failure. * * @template T */ class Deferred { /** * @param {ControlFlow=} opt_flow The control flow this instance was * created under. This should only be provided during unit tests. */ constructor(opt_flow) { var fulfill, reject; /** @type {!ManagedPromise<T>} */ this.promise = new ManagedPromise(function(f, r) { fulfill = f; reject = r; }, opt_flow); var self = this; var checkNotSelf = function(value) { if (value === self) { throw new TypeError('May not resolve a Deferred with itself'); } }; /** * Resolves this deferred with the given value. It is safe to call this as a * normal function (with no bound "this"). * @param {(T|IThenable<T>|Thenable)=} opt_value The fulfilled value. */ this.fulfill = function(opt_value) { checkNotSelf(opt_value); fulfill(opt_value); }; /** * Rejects this promise with the given reason. It is safe to call this as a * normal function (with no bound "this"). * @param {*=} opt_reason The rejection reason. */ this.reject = function(opt_reason) { checkNotSelf(opt_reason); reject(opt_reason); }; } } /** * Tests if a value is an Error-like object. This is more than an straight * instanceof check since the value may originate from another context. * @param {*} value The value to test. * @return {boolean} Whether the value is an error. */ function isError(value) { return value instanceof Error || (!!value && typeof value === 'object' && typeof value.message === 'string'); } /** * Determines whether a {@code value} should be treated as a promise. * Any object whose "then" property is a function will be considered a promise. * * @param {?} value The value to test. * @return {boolean} Whether the value is a promise. */ function isPromise(value) { try { // Use array notation so the Closure compiler does not obfuscate away our // contract. return value && (typeof value === 'object' || typeof value === 'function') && typeof value['then'] === 'function'; } catch (ex) { return false; } } /** * Creates a promise that will be resolved at a set time in the future. * @param {number} ms The amount of time, in milliseconds, to wait before * resolving the promise. * @return {!ManagedPromise} The promise. */ function delayed(ms) { var key; return new ManagedPromise(function(fulfill) { key = setTimeout(function() { key = null; fulfill(); }, ms); }).catch(function(e) { clearTimeout(key); key = null; throw e; }); } /** * Creates a new deferred object. * @return {!Deferred<T>} The new deferred object. * @template T */ function defer() { return new Deferred(); } /** * Creates a promise that has been resolved with the given value. * @param {T=} opt_value The resolved value. * @return {!ManagedPromise<T>} The resolved promise. * @template T */ function fulfilled(opt_value) { if (opt_value instanceof ManagedPromise) { return opt_value; } return new ManagedPromise(function(fulfill) { fulfill(opt_value); }); } /** * Creates a promise that has been rejected with the given reason. * @param {*=} opt_reason The rejection reason; may be any value, but is * usually an Error or a string. * @return {!ManagedPromise<T>} The rejected promise. * @template T */ function rejected(opt_reason) { if (opt_reason instanceof ManagedPromise) { return opt_reason; } return new ManagedPromise(function(_, reject) { reject(opt_reason); }); } /** * Wraps a function that expects a node-style callback as its final * argument. This callback expects two arguments: an error value (which will be * null if the call succeeded), and the success value as the second argument. * The callback will the resolve or reject the returned promise, based on its * arguments. * @param {!Function} fn The function to wrap. * @param {...?} var_args The arguments to apply to the function, excluding the * final callback. * @return {!ManagedPromise} A promise that will be resolved with the * result of the provided function's callback. */ function checkedNodeCall(fn, var_args) { let args = Array.prototype.slice.call(arguments, 1); return new ManagedPromise(function(fulfill, reject) { try { args.push(function(error, value) { error ? reject(error) : fulfill(value); }); fn.apply(undefined, args); } catch (ex) { reject(ex); } }); } /** * Registers an observer on a promised {@code value}, returning a new promise * that will be resolved when the value is. If {@code value} is not a promise, * then the return promise will be immediately resolved. * @param {*} value The value to observe. * @param {Function=} opt_callback The function to call when the value is * resolved successfully. * @param {Function=} opt_errback The function to call when the value is * rejected. * @return {!ManagedPromise} A new promise. */ function when(value, opt_callback, opt_errback) { if (Thenable.isImplementation(value)) { return value.then(opt_callback, opt_errback); } return new ManagedPromise(function(fulfill) { fulfill(value); }).then(opt_callback, opt_errback); } /** * Invokes the appropriate callback function as soon as a promised `value` is * resolved. This function is similar to `when()`, except it does not return * a new promise. * @param {*} value The value to observe. * @param {Function} callback The function to call when the value is * resolved successfully. * @param {Function=} opt_errback The function to call when the value is * rejected. */ function asap(value, callback, opt_errback) { if (isPromise(value)) { value.then(callback, opt_errback); } else if (callback) { callback(value); } } /** * Given an array of promises, will return a promise that will be fulfilled * with the fulfillment values of the input array's values. If any of the * input array's promises are rejected, the returned promise will be rejected * with the same reason. * * @param {!Array<(T|!ManagedPromise<T>)>} arr An array of * promises to wait on. * @return {!ManagedPromise<!Array<T>>} A promise that is * fulfilled with an array containing the fulfilled values of the * input array, or rejected with the same reason as the first * rejected value. * @template T */ function all(arr) { return new ManagedPromise(function(fulfill, reject) { var n = arr.length; var values = []; if (!n) { fulfill(values); return; } var toFulfill = n; var onFulfilled = function(index, value) { values[index] = value; toFulfill--; if (toFulfill == 0) { fulfill(values); } }; function processPromise(index) { asap(arr[index], function(value) { onFulfilled(index, value); }, reject); } for (var i = 0; i < n; ++i) { processPromise(i); } }); } /** * Calls a function for each element in an array and inserts the result into a * new array, which is used as the fulfillment value of the promise returned * by this function. * * If the return value of the mapping function is a promise, this function * will wait for it to be fulfilled before inserting it into the new array. * * If the mapping function throws or returns a rejected promise, the * promise returned by this function will be rejected with the same reason. * Only the first failure will be reported; all subsequent errors will be * silently ignored. * * @param {!(Array<TYPE>|ManagedPromise<!Array<TYPE>>)} arr The * array to iterator over, or a promise that will resolve to said array. * @param {function(this: SELF, TYPE, number, !Array<TYPE>): ?} fn The * function to call for each element in the array. This function should * expect three arguments (the element, the index, and the array itself. * @param {SELF=} opt_self The object to be used as the value of 'this' within * {@code fn}. * @template TYPE, SELF */ function map(arr, fn, opt_self) { return fulfilled(arr).then(function(v) { if (!Array.isArray(v)) { throw TypeError('not an array'); } var arr = /** @type {!Array} */(v); return new ManagedPromise(function(fulfill, reject) { var n = arr.length; var values = new Array(n); (function processNext(i) { for (; i < n; i++) { if (i in arr) { break; } } if (i >= n) { fulfill(values); return; } try { asap( fn.call(opt_self, arr[i], i, /** @type {!Array} */(arr)), function(value) { values[i] = value; processNext(i + 1); }, reject); } catch (ex) { reject(ex); } })(0); }); }); } /** * Calls a function for each element in an array, and if the function returns * true adds the element to a new array. * * If the return value of the filter function is a promise, this function * will wait for it to be fulfilled before determining whether to insert the * element into the new array. * * If the filter function throws or returns a rejected promise, the promise * returned by this function will be rejected with the same reason. Only the * first failure will be reported; all subsequent errors will be silently * ignored. * * @param {!(Array<TYPE>|ManagedPromise<!Array<TYPE>>)} arr The * array to iterator over, or a promise that will resolve to said array. * @param {function(this: SELF, TYPE, number, !Array<TYPE>): ( * boolean|ManagedPromise<boolean>)} fn The function * to call for each element in the array. * @param {SELF=} opt_self The object to be used as the value of 'this' within * {@code fn}. * @template TYPE, SELF */ function filter(arr, fn, opt_self) { return fulfilled(arr).then(function(v) { if (!Array.isArray(v)) { throw TypeError('not an array'); } var arr = /** @type {!Array} */(v); return new ManagedPromise(function(fulfill, reject) { var n = arr.length; var values = []; var valuesLength = 0; (function processNext(i) { for (; i < n; i++) { if (i in arr) { break; } } if (i >= n) { fulfill(values); return; } try { var value = arr[i]; var include = fn.call(opt_self, value, i, /** @type {!Array} */(arr)); asap(include, function(include) { if (include) { values[valuesLength++] = value; } processNext(i + 1); }, reject); } catch (ex) { reject(ex); } })(0); }); }); } /** * Returns a promise that will be resolved with the input value in a * fully-resolved state. If the value is an array, each element will be fully * resolved. Likewise, if the value is an object, all keys will be fully * resolved. In both cases, all nested arrays and objects will also be * fully resolved. All fields are resolved in place; the returned promise will * resolve on {@code value} and not a copy. * * Warning: This function makes no checks against objects that contain * cyclical references: * * var value = {}; * value['self'] = value; * promise.fullyResolved(value); // Stack overflow. * * @param {*} value The value to fully resolve. * @return {!ManagedPromise} A promise for a fully resolved version * of the input value. */ function fullyResolved(value) { if (isPromise(value)) { return when(value, fullyResolveValue); } return fullyResolveValue(value); } /** * @param {*} value The value to fully resolve. If a promise, assumed to * already be resolved. * @return {!ManagedPromise} A promise for a fully resolved version * of the input value. */ function fullyResolveValue(value) { if (Array.isArray(value)) { return fullyResolveKeys(/** @type {!Array} */ (value)); } if (isPromise(value)) { if (isPromise(value)) { // We get here when the original input value is a promise that // resolves to itself. When the user provides us with such a promise, // trust that it counts as a "fully resolved" value and return it. // Of course, since it's already a promise, we can just return it // to the user instead of wrapping it in another promise. return /** @type {!ManagedPromise} */ (value); } } if (value && typeof value === 'object') { return fullyResolveKeys(/** @type {!Object} */ (value)); } if (typeof value === 'function') { return fullyResolveKeys(/** @type {!Object} */ (value)); } return fulfilled(value); } /** * @param {!(Array|Object)} obj the object to resolve. * @return {!ManagedPromise} A promise that will be resolved with the * input object once all of its values have been fully resolved. */ function fullyResolveKeys(obj) { var isArray = Array.isArray(obj); var numKeys = isArray ? obj.length : (function() { let n = 0; for (let key in obj) { n += 1; } return n; })(); if (!numKeys) { return fulfilled(obj); } function forEachProperty(obj, fn) { for (let key in obj) { fn.call(null, obj[key], key, obj); } } function forEachElement(arr, fn) { arr.forEach(fn); } var numResolved = 0; return new ManagedPromise(function(fulfill, reject) { var forEachKey = isArray ? forEachElement: forEachProperty; forEachKey(obj, function(partialValue, key) { if (!Array.isArray(partialValue) && (!partialValue || typeof partialValue !== 'object')) { maybeResolveValue(); return; } fullyResolved(partialValue).then( function(resolvedValue) { obj[key] = resolvedValue; maybeResolveValue(); }, reject); }); function maybeResolveValue() { if (++numResolved == numKeys) { fulfill(obj); } } }); } ////////////////////////////////////////////////////////////////////////////// // // ControlFlow // ////////////////////////////////////////////////////////////////////////////// /** * Handles the execution of scheduled tasks, each of which may be an * asynchronous operation. The control flow will ensure tasks are executed in * the ordered scheduled, starting each task only once those before it have * completed. * * Each task scheduled within this flow may return a {@link ManagedPromise} to * indicate it is an asynchronous operation. The ControlFlow will wait for such * promises to be resolved before marking the task as completed. * * Tasks and each callback registered on a {@link ManagedPromise} will be run * in their own ControlFlow frame. Any tasks scheduled within a frame will take * priority over previously scheduled tasks. Furthermore, if any of the tasks in * the frame fail, the remainder of the tasks in that frame will be discarded * and the failure will be propagated to the user through the callback/task's * promised result. * * Each time a ControlFlow empties its task queue, it will fire an * {@link ControlFlow.EventType.IDLE IDLE} event. Conversely, * whenever the flow terminates due to an unhandled error, it will remove all * remaining tasks in its queue and fire an * {@link ControlFlow.EventType.UNCAUGHT_EXCEPTION UNCAUGHT_EXCEPTION} event. * If there are no listeners registered with the flow, the error will be * rethrown to the global error handler. * * Refer to the {@link ./promise} module documentation fora detailed * explanation of how the ControlFlow coordinates task execution. * * @final */ class ControlFlow extends events.EventEmitter { constructor() { super(); /** @private {boolean} */ this.propagateUnhandledRejections_ = true; /** @private {TaskQueue} */ this.activeQueue_ = null; /** @private {Set<TaskQueue>} */ this.taskQueues_ = null; /** * Micro task that controls shutting down the control flow. Upon shut down, * the flow will emit an * {@link ControlFlow.EventType.IDLE} event. Idle events * always follow a brief timeout in order to catch latent errors from the * last completed task. If this task had a callback registered, but no * errback, and the task fails, the unhandled failure would not be reported * by the promise system until the next turn of the event loop: * * // Schedule 1 task that fails. * var result = promise.controlFlow().schedule('example', * function() { return promise.rejected('failed'); }); * // Set a callback on the result. This delays reporting the unhandled * // failure for 1 turn of the event loop. * result.then(function() {}); * * @private {MicroTask} */ this.shutdownTask_ = null; /** * ID for a long running interval used to keep a Node.js process running * while a control flow's event loop is still working. This is a cheap hack * required since JS events are only scheduled to run when there is * _actually_ something to run. When a control flow is waiting on a task, * there will be nothing in the JS event loop and the process would * terminate without this. * @private */ this.hold_ = null; } /** * Returns a string representation of this control flow, which is its current * {@linkplain #getSchedule() schedule}, sans task stack traces. * @return {string} The string representation of this contorl flow. * @override */ toString() { return this.getSchedule(); } /** * Sets whether any unhandled rejections should propagate up through the * control flow stack and cause rejections within parent tasks. If error * propagation is disabled, tasks will not be aborted when an unhandled * promise rejection is detected, but the rejection _will_ trigger an * {@link ControlFlow.EventType.UNCAUGHT_EXCEPTION} * event. * * The default behavior is to propagate all unhandled rejections. _The use * of this option is highly discouraged._ * * @param {boolean} propagate whether to propagate errors. */ setPropagateUnhandledRejections(propagate) { this.propagateUnhandledRejections_ = propagate; } /** * @return {boolean} Whether this flow is currently idle. */ isIdle() { return !this.shutdownTask_ && (!this.taskQueues_ || !this.taskQueues_.size); } /** * Resets this instance, clearing its queue and removing all event listeners. */ reset() { this.cancelQueues_(new FlowResetError); this.emit(ControlFlow.EventType.RESET); this.removeAllListeners(); this.cancelShutdown_(); } /** * Generates an annotated string describing the internal state of this control * flow, including the currently executing as well as pending tasks. If * {@code opt_includeStackTraces === true}, the string will include the * stack trace from when each task was scheduled. * @param {string=} opt_includeStackTraces Whether to include the stack traces * from when each task was scheduled. Defaults to false. * @return {string} String representation of this flow's internal state. */ getSchedule(opt_includeStackTraces) { var ret = 'ControlFlow::' + getUid(this); var activeQueue = this.activeQueue_; if (!this.taskQueues_ || !this.taskQueues_.size) { return ret; } var childIndent = '| '; for (var q of this.taskQueues_) { ret += '\n' + printQ(q, childIndent); } return ret; function printQ(q, indent) { var ret = q.toString(); if (q === activeQueue) { ret = '(active) ' + ret; } var prefix = indent + childIndent; if (q.pending_) { if (q.pending_.q.state_ !== TaskQueueState.FINISHED) { ret += '\n' + prefix + '(pending) ' + q.pending_.task; ret += '\n' + printQ(q.pending_.q, prefix + childIndent); } else { ret += '\n' + prefix + '(blocked) ' + q.pending_.task; } } if (q.interrupts_) { q.interrupts_.forEach((task) => { ret += '\n' + prefix + task; }); } if (q.tasks_) { q.tasks_.forEach((task) => ret += printTask(task, '\n' + prefix)); } return indent + ret; } function printTask(task, prefix) { var ret = prefix + task; if (opt_includeStackTraces && task.promise.stack_) { ret += prefix + childIndent + (task.promise.stack_.stack || task.promise.stack_) .replace(/\n/g, prefix); } return ret; } } /** * Returns the currently actively task queue for this flow. If there is no * active queue, one will be created. * @return {!TaskQueue} the currently active task queue for this flow. * @private */ getActiveQueue_() { if (this.activeQueue_) { return this.activeQueue_; } this.activeQueue_ = new TaskQueue(this); if (!this.taskQueues_) { this.taskQueues_ = new Set(); } this.taskQueues_.add(this.activeQueue_); this.activeQueue_ .once('end', this.onQueueEnd_, this) .once('error', this.onQueueError_, this); asyncRun(() => this.activeQueue_ = null); this.activeQueue_.start(); return this.activeQueue_; } /** * Schedules a task for execution. If there is nothing currently in the * queue, the task will be executed in the next turn of the event loop. If * the task function is a generator, the task will be executed using * {@link ./promise.consume consume()}. * * @param {function(): (T|IThenable<T>)} fn The function to * call to start the task. If the function returns a promise, * this instance will wait for it to be resolved before starting the * next task. * @param {string=} opt_description A description of the task. * @return {!Thenable<T>} A promise that will be resolved * with the result of the action. * @template T */ execute(fn, opt_description) { if (isGenerator(fn)) { let original = fn; fn = () => consume(original); } if (!this.hold_) { var holdIntervalMs = 2147483647; // 2^31-1; max timer length for Node.js this.hold_ = setInterval(function() {}, holdIntervalMs); } var task = new Task( this, fn, opt_description || '<anonymous>', {name: 'Task', top: ControlFlow.prototype.execute}); var q = this.getActiveQueue_(); q.enqueue(task); this.emit(ControlFlow.EventType.SCHEDULE_TASK, task.description); return task.promise; } /** * Inserts a {@code setTimeout} into the command queue. This is equivalent to * a thread sleep in a synchronous programming language. * * @param {number} ms The timeout delay, in milliseconds. * @param {string=} opt_description A description to accompany the timeout. * @return {!Thenable} A promise that will be resolved with * the result of the action. */ timeout(ms, opt_description) { return this.execute(function() { return delayed(ms); }, opt_description); } /** * Schedules a task that shall wait for a condition to hold. Each condition * function may return any value, but it will always be evaluated as a * boolean. * * Condition functions may schedule sub-tasks with this instance, however, * their execution time will be factored into whether a wait has timed out. * * In the event a condition returns a ManagedPromise, the polling loop will wait for * it to be resolved before evaluating whether the condition has been * satisfied. The resolution time for a promise is factored into whether a * wait has timed out. * * If the condition function throws, or returns a rejected promise, the * wait task will fail. * * If the condition is defined as a promise, the flow will wait for it to * settle. If the timeout expires before the promise settles, the promise * returned by this function will be rejected. * * If this function is invoked with `timeout === 0`, or the timeout is * omitted, the flow will wait indefinitely for the condition to be satisfied. * * @param {(!IThenable<T>|function())} condition The condition to poll, * or a promise to wait on. * @param {number=} opt_timeout How long to wait, in milliseconds, for the * condition to hold before timing out. If omitted, the flow will wait * indefinitely. * @param {string=} opt_message An optional error message to include if the * wait times out; defaults to the empty string. * @return {!Thenable<T>} A promise that will be fulfilled * when the condition has been satisified. The promise shall be rejected * if the wait times out waiting for the condition. * @throws {TypeError} If condition is not a function or promise or if timeout * is not a number >= 0. * @template T */ wait(condition, opt_timeout, opt_message) { var timeout = opt_timeout || 0; if (typeof timeout !== 'number' || timeout < 0) { throw TypeError('timeout must be a number >= 0: ' + timeout); } if (isPromise(condition)) { return this.execute(function() { if (!timeout) { return condition; } return new ManagedPromise(function(fulfill, reject) { var start = Date.now(); var timer = setTimeout(function() { timer = null; reject(Error((opt_message ? opt_message + '\n' : '') + 'Timed out waiting for promise to resolve after ' + (Date.now() - start) + 'ms')); }, timeout); /** @type {Thenable} */(condition).then( function(value) { timer && clearTimeout(timer); fulfill(value); }, function(error) { timer && clearTimeout(timer); reject(error); }); }); }, opt_message || '<anonymous wait: promise resolution>'); } if (typeof condition !== 'function') { throw TypeError('Invalid condition; must be a function or promise: ' + typeof condition); } if (isGenerator(condition)) { let original = condition; condition = () => consume(original); } var self = this; return this.execute(function() { var startTime = Date.now(); return new ManagedPromise(function(fulfill, reject) { pollCondition(); function pollCondition() { var conditionFn = /** @type {function()} */(condition); self.execute(conditionFn).then(function(value) { var elapsed = Date.now() - startTime; if (!!value) { fulfill(value); } else if (timeout && elapsed >= timeout) { reject(new Error((opt_message ? opt_message + '\n' : '') + 'Wait timed out after ' + elapsed + 'ms')); } else { // Do not use asyncRun here because we need a non-micro yield // here so the UI thread is given a chance when running in a // browser. setTimeout(pollCondition, 0); } }, reject); } }); }, opt_message || '<anonymous wait>'); } /** * Executes a function in the next available turn of the JavaScript event * loop. This ensures the function runs with its own task queue and any * scheduled tasks will run in "parallel" to those scheduled in the current * function. * * flow.execute(() => console.log('a')); * flow.execute(() => console.log('b')); * flow.execute(() => console.log('c')); * flow.async(() => { * flow.execute(() => console.log('d')); * flow.execute(() => console.log('e')); * }); * flow.async(() => { * flow.execute(() => console.log('f')); * flow.execute(() => console.log('g')); * }); * flow.once('idle', () => console.log('fin')); * // a * // d * // f * // b * // e * // g * // c * // fin * * If the function itself throws, the error will be treated the same as an * unhandled rejection within the control flow. * * __NOTE__: This function is considered _unstable_. * * @param {!Function} fn The function to execute. * @param {Object=} opt_self The object in whose context to run the function. * @param {...*} var_args Any arguments to pass to the function. */ async(fn, opt_self, var_args) { asyncRun(() => { // Clear any lingering queues, forces getActiveQueue_ to create a new one. this.activeQueue_ = null; var q = this.getActiveQueue_(); try { q.execute_(fn.bind(opt_self, var_args)); } catch (ex) { var cancellationError = CancellationError.wrap(ex, 'Function passed to ControlFlow.async() threw'); cancellationError.silent_ = true; q.abort_(cancellationError); } finally { this.activeQueue_ = null; } }); } /** * Event handler for when a task queue is exhausted. This starts the shutdown * sequence for this instance if there are no remaining task queues: after * one turn of the event loop, this object will emit the * {@link ControlFlow.EventType.IDLE IDLE} event to signal * listeners that it has completed. During this wait, if another task is * scheduled, the shutdown will be aborted. * * @param {!TaskQueue} q the completed task queue. * @private */ onQueueEnd_(q) { if (!this.taskQueues_) { return; } this.taskQueues_.delete(q); vlog(1, () => q + ' has finished'); vlog(1, () => this.taskQueues_.size + ' queues remain\n' + this, this); if (!this.taskQueues_.size) { if (this.shutdownTask_) { throw Error('Already have a shutdown task??'); } vlog(1, () => 'Scheduling shutdown\n' + this); this.shutdownTask_ = new MicroTask(() => this.shutdown_()); } } /** * Event handler for when a task queue terminates with an error. This triggers * the cancellation of all other task queues and a * {@link ControlFlow.EventType.UNCAUGHT_EXCEPTION} event. * If there are no error event listeners registered with this instance, the * error will be rethrown to the global error handler. * * @param {*} error the error that caused the task queue to terminate. * @param {!TaskQueue} q the task queue. * @private */ onQueueError_(error, q) { if (this.taskQueues_) { this.taskQueues_.delete(q); } this.cancelQueues_(CancellationError.wrap( error, 'There was an uncaught error in the control flow')); this.cancelShutdown_(); this.cancelHold_(); setTimeout(() => { let listeners = this.listeners(ControlFlow.EventType.UNCAUGHT_EXCEPTION); if (!listeners.size) { throw error; } else { this.reportUncaughtException_(error); } }, 0); } /** * Cancels all remaining task queues. * @param {!CancellationError} reason The cancellation reason. * @private */ cancelQueues_(reason) { reason.silent_ = true; if (this.taskQueues_) { for (var q of this.taskQueues_) { q.removeAllListeners(); q.abort_(reason); } this.taskQueues_.clear(); this.taskQueues_ = null; } } /** * Reports an uncaught exception using a * {@link ControlFlow.EventType.UNCAUGHT_EXCEPTION} event. * * @param {*} e the error to report. * @private */ reportUncaughtException_(e) { this.emit(ControlFlow.EventType.UNCAUGHT_EXCEPTION, e); } /** @private */ cancelHold_() { if (this.hold_) { clearInterval(this.hold_); this.hold_ = null; } } /** @private */ shutdown_() { vlog(1, () => 'Going idle: ' + this); this.cancelHold_(); this.shutdownTask_ = null; this.emit(ControlFlow.EventType.IDLE); } /** * Cancels the shutdown sequence if it is currently scheduled. * @private */ cancelShutdown_() { if (this.shutdownTask_) { this.shutdownTask_.cancel(); this.shutdownTask_ = null; } } } /** * Events that may be emitted by an {@link ControlFlow}. * @enum {string} */ ControlFlow.EventType = { /** Emitted when all tasks have been successfully executed. */ IDLE: 'idle', /** Emitted when a ControlFlow has been reset. */ RESET: 'reset', /** Emitted whenever a new task has been scheduled. */ SCHEDULE_TASK: 'scheduleTask', /** * Emitted whenever a control flow aborts due to an unhandled promise * rejection. This event will be emitted along with the offending rejection * reason. Upon emitting this event, the control flow will empty its task * queue and revert to its initial state. */ UNCAUGHT_EXCEPTION: 'uncaughtException' }; /** * Wraps a function to execute as a cancellable micro task. * @final */ class MicroTask { /** * @param {function()} fn The function to run as a micro task. */ constructor(fn) { /** @private {boolean} */ this.cancelled_ = false; asyncRun(() => { if (!this.cancelled_) { fn(); } }); } /** * Runs the given function after a micro-task yield. * @param {function()} fn The function to run. */ static run(fn) { NativePromise.resolve().then(function() { try { fn(); } catch (ignored) { // Do nothing. } }); } /** * Cancels the execution of this task. Note: this will not prevent the task * timer from firing, just the invocation of the wrapped function. */ cancel() { this.cancelled_ = true; } } /** * A task to be executed by a {@link ControlFlow}. * * @template T * @final */ class Task extends Deferred { /** * @param {!ControlFlow} flow The flow this instances belongs * to. * @param {function(): (T|!ManagedPromise<T>)} fn The function to * call when the task executes. If it returns a * {@link ManagedPromise}, the flow will wait for it to be * resolved before starting the next task. * @param {string} description A description of the task for debugging. * @param {{name: string, top: !Function}=} opt_stackOptions Options to use * when capturing the stacktrace for when this task was created. */ constructor(flow, fn, description, opt_stackOptions) { super(flow); getUid(this); /** @type {function(): (T|!ManagedPromise<T>)} */ this.execute = fn; /** @type {string} */ this.description = description; /** @type {TaskQueue} */ this.queue = null; /** * Whether this task is considered block. A blocked task may be registered * in a task queue, but will be dropped if it is still blocked when it * reaches the front of the queue. A dropped task may always be rescheduled. * * Blocked tasks are used when a callback is attached to an unsettled * promise to reserve a spot in line (in a manner of speaking). If the * promise is not settled before the callback reaches the front of the * of the queue, it will be dropped. Once the promise is settled, the * dropped task will be rescheduled as an interrupt on the currently task * queue. * * @type {boolean} */ this.blocked = false; if (opt_stackOptions) { this.promise.stack_ = captureStackTrace( opt_stackOptions.name, this.description, opt_stackOptions.top); } } /** @override */ toString() { return 'Task::' + getUid(this) + '<' + this.description + '>'; } } /** @enum {string} */ const TaskQueueState = { NEW: 'new', STARTED: 'started', FINISHED: 'finished' }; /** * @final */ class TaskQueue extends events.EventEmitter { /** @param {!ControlFlow} flow . */ constructor(flow) { super(); /** @private {string} */ this.name_ = 'TaskQueue::' + getUid(this); /** @private {!ControlFlow} */ this.flow_ = flow; /** @private {!Array<!Task>} */ this.tasks_ = []; /** @private {Array<!Task>} */ this.interrupts_ = null; /** @private {({task: !Task, q: !TaskQueue}|null)} */ this.pending_ = null; /** @private {TaskQueueState} */ this.state_ = TaskQueueState.NEW; /** @private {!Set<!ManagedPromise>} */ this.unhandledRejections_ = new Set(); } /** @override */ toString() { return 'TaskQueue::' + getUid(this); } /** * @param {!ManagedPromise} promise . */ addUnhandledRejection(promise) { // TODO: node 4.0.0+ vlog(2, () => this + ' registering unhandled rejection: ' + promise, this); this.unhandledRejections_.add(promise); } /** * @param {!ManagedPromise} promise . */ clearUnhandledRejection(promise) { var deleted = this.unhandledRejections_.delete(promise); if (deleted) { // TODO: node 4.0.0+ vlog(2, () => this + ' clearing unhandled rejection: ' + promise, this); } } /** * Enqueues a new task for execution. * @param {!Task} task The task to enqueue. * @throws {Error} If this instance has already started execution. */ enqueue(task) { if (this.state_ !== TaskQueueState.NEW) { throw Error('TaskQueue has started: ' + this); } if (task.queue) { throw Error('Task is already scheduled in another queue'); } this.tasks_.push(task); task.queue = this; ON_CANCEL_HANDLER.set( task.promise, (e) => this.onTaskCancelled_(task, e)); vlog(1, () => this + '.enqueue(' + task + ')', this); vlog(2, () => this.flow_.toString(), this); } /** * Schedules the callbacks registered on the given promise in this queue. * * @param {!ManagedPromise} promise the promise whose callbacks should be * registered as interrupts in this task queue. * @throws {Error} if this queue has already finished. */ scheduleCallbacks(promise) { if (this.state_ === TaskQueueState.FINISHED) { throw new Error('cannot interrupt a finished q(' + this + ')'); } if (this.pending_ && this.pending_.task.promise === promise) { this.pending_.task.promise.queue_ = null; this.pending_ = null; asyncRun(() => this.executeNext_()); } if (!promise.callbacks_) { return; } promise.callbacks_.forEach(function(cb) { cb.blocked = false; if (cb.queue) { return; } ON_CANCEL_HANDLER.set( cb.promise, (e) => this.onTaskCancelled_(cb, e)); if (cb.queue === this && this.tasks_.indexOf(cb) !== -1) { return; } if (cb.queue) { cb.queue.dropTask_(cb); } cb.queue = this; if (!this.interrupts_) { this.interrupts_ = []; } this.interrupts_.push(cb); }, this); promise.callbacks_ = null; vlog(2, () => this + ' interrupted\n' + this.flow_, this); } /** * Starts executing tasks in this queue. Once called, no further tasks may * be {@linkplain #enqueue() enqueued} with this instance. * * @throws {Error} if this queue has already been started. */ start() { if (this.state_ !== TaskQueueState.NEW) { throw new Error('TaskQueue has already started'); } // Always asynchronously execute next, even if there doesn't look like // there is anything in the queue. This will catch pending unhandled // rejections that were registered before start was called. asyncRun(() => this.executeNext_()); } /** * Aborts this task queue. If there are any scheduled tasks, they are silently * cancelled and discarded (their callbacks will never fire). If this queue * has a _pending_ task, the abortion error is used to cancel that task. * Otherwise, this queue will emit an error event. * * @param {*} error The abortion reason. * @private */ abort_(error) { var cancellation; if (error instanceof FlowResetError) { cancellation = error; } else { cancellation = new DiscardedTaskError(error); } if (this.interrupts_ && this.interrupts_.length) { this.interrupts_.forEach((t) => t.reject(cancellation)); this.interrupts_ = []; } if (this.tasks_ && this.tasks_.length) { this.tasks_.forEach((t) => t.reject(cancellation)); this.tasks_ = []; } // Now that all of the remaining tasks have been silently cancelled (e.g. no // exisitng callbacks on those tasks will fire), clear the silence bit on // the cancellation error. This ensures additional callbacks registered in // the future will actually execute. cancellation.silent_ = false; if (this.pending_) { vlog(2, () => this + '.abort(); cancelling pending task', this); this.pending_.task.promise.cancel( /** @type {!CancellationError} */(error)); } else { vlog(2, () => this + '.abort(); emitting error event', this); this.emit('error', error, this); } } /** @private */ executeNext_() { if (this.state_ === TaskQueueState.FINISHED) { return; } this.state_ = TaskQueueState.STARTED; if (this.pending_ !== null || this.processUnhandledRejections_()) { return; } var task; do { task = this.getNextTask_(); } while (task && !task.promise.isPending()); if (!task) { this.state_ = TaskQueueState.FINISHED; this.tasks_ = []; this.interrupts_ = null; vlog(2, () => this + '.emit(end)', this); this.emit('end', this); return; } var self = this; var subQ = new TaskQueue(this.flow_); subQ.once('end', () => self.onTaskComplete_(result)) .once('error', (e) => self.onTaskFailure_(result, e)); vlog(2, () => self + ' created ' + subQ + ' for ' + task); var result = undefined; try { this.pending_ = {task: task, q: subQ}; task.promise.queue_ = this; result = subQ.execute_(task.execute); subQ.start(); } catch (ex) { subQ.abort_(ex); } } /** * @param {!Function} fn . * @return {T} . * @template T * @private */ execute_(fn) { try { activeFlows.push(this.flow_); this.flow_.activeQueue_ = this; return fn(); } finally { this.flow_.activeQueue_ = null; activeFlows.pop(); } } /** * Process any unhandled rejections registered with this task queue. If there * is a rejection, this queue will be aborted with the rejection error. If * there are multiple rejections registered, this queue will be aborted with * a {@link MultipleUnhandledRejectionError}. * @return {boolean} whether there was an unhandled rejection. * @private */ processUnhandledRejections_() { if (!this.unhandledRejections_.size) { return false; } var errors = new Set(); for (var rejection of this.unhandledRejections_) { errors.add(rejection.value_); } this.unhandledRejections_.clear(); var errorToReport = errors.size === 1 ? errors.values().next().value : new MultipleUnhandledRejectionError(errors); vlog(1, () => this + ' aborting due to unhandled rejections', this); if (this.flow_.propagateUnhandledRejections_) { this.abort_(errorToReport); return true; } else { vlog(1, 'error propagation disabled; reporting to control flow'); this.flow_.reportUncaughtException_(errorToReport); return false; } } /** * @param {!Task} task The task to drop. * @private */ dropTask_(task) { var index; if (this.interrupts_) { index = this.interrupts_.indexOf(task); if (index != -1) { task.queue = null; this.interrupts_.splice(index, 1); return; } } index = this.tasks_.indexOf(task); if (index != -1) { task.queue = null; this.tasks_.splice(index, 1); } } /** * @param {!Task} task The task that was cancelled. * @param {!CancellationError} reason The cancellation reason. * @private */ onTaskCancelled_(task, reason) { if (this.pending_ && this.pending_.task === task) { this.pending_.q.abort_(reason); } else { this.dropTask_(task); } } /** * @param {*} value the value originally returned by the task function. * @private */ onTaskComplete_(value) { if (this.pending_) { this.pending_.task.fulfill(value); } } /** * @param {*} taskFnResult the value originally returned by the task function. * @param {*} error the error that caused the task function to terminate. * @private */ onTaskFailure_(taskFnResult, error) { if (Thenable.isImplementation(taskFnResult)) { taskFnResult.cancel(CancellationError.wrap(error)); } this.pending_.task.reject(error); } /** * @return {(Task|undefined)} the next task scheduled within this queue, * if any. * @private */ getNextTask_() { var task = undefined; while (true) { if (this.interrupts_) { task = this.interrupts_.shift(); } if (!task && this.tasks_) { task = this.tasks_.shift(); } if (task && task.blocked) { vlog(2, () => this + ' skipping blocked task ' + task, this); task.queue = null; task = null; // TODO: recurse when tail-call optimization is available in node. } else { break; } } return task; } }; /** * The default flow to use if no others are active. * @type {!ControlFlow} */ var defaultFlow = new ControlFlow(); /** * A stack of active control flows, with the top of the stack used to schedule * commands. When there are multiple flows on the stack, the flow at index N * represents a callback triggered within a task owned by the flow at index * N-1. * @type {!Array<!ControlFlow>} */ var activeFlows = []; /** * Changes the default flow to use when no others are active. * @param {!ControlFlow} flow The new default flow. * @throws {Error} If the default flow is not currently active. */ function setDefaultFlow(flow) { if (activeFlows.length) { throw Error('You may only change the default flow while it is active'); } defaultFlow = flow; } /** * @return {!ControlFlow} The currently active control flow. */ function controlFlow() { return /** @type {!ControlFlow} */ ( activeFlows.length ? activeFlows[activeFlows.length - 1] : defaultFlow); } /** * Creates a new control flow. The provided callback will be invoked as the * first task within the new flow, with the flow as its sole argument. Returns * a promise that resolves to the callback result. * @param {function(!ControlFlow)} callback The entry point * to the newly created flow. * @return {!Thenable} A promise that resolves to the callback result. */ function createFlow(callback) { var flow = new ControlFlow; return flow.execute(function() { return callback(flow); }); } /** * Tests is a function is a generator. * @param {!Function} fn The function to test. * @return {boolean} Whether the function is a generator. */ function isGenerator(fn) { return fn.constructor.name === 'GeneratorFunction'; } /** * Consumes a {@code GeneratorFunction}. Each time the generator yields a * promise, this function will wait for it to be fulfilled before feeding the * fulfilled value back into {@code next}. Likewise, if a yielded promise is * rejected, the rejection error will be passed to {@code throw}. * * __Example 1:__ the Fibonacci Sequence. * * promise.consume(function* fibonacci() { * var n1 = 1, n2 = 1; * for (var i = 0; i < 4; ++i) { * var tmp = yield n1 + n2; * n1 = n2; * n2 = tmp; * } * return n1 + n2; * }).then(function(result) { * console.log(result); // 13 * }); * * __Example 2:__ a generator that throws. * * promise.consume(function* () { * yield promise.delayed(250).then(function() { * throw Error('boom'); * }); * }).catch(function(e) { * console.log(e.toString()); // Error: boom * }); * * @param {!Function} generatorFn The generator function to execute. * @param {Object=} opt_self The object to use as "this" when invoking the * initial generator. * @param {...*} var_args Any arguments to pass to the initial generator. * @return {!ManagedPromise<?>} A promise that will resolve to the * generator's final result. * @throws {TypeError} If the given function is not a generator. */ function consume(generatorFn, opt_self, var_args) { if (!isGenerator(generatorFn)) { throw new TypeError('Input is not a GeneratorFunction: ' + generatorFn.constructor.name); } var deferred = defer(); var generator = generatorFn.apply( opt_self, Array.prototype.slice.call(arguments, 2)); callNext(); return deferred.promise; /** @param {*=} opt_value . */ function callNext(opt_value) { pump(generator.next, opt_value); } /** @param {*=} opt_error . */ function callThrow(opt_error) { // Dictionary lookup required because Closure compiler's built-in // externs does not include GeneratorFunction.prototype.throw. pump(generator['throw'], opt_error); } function pump(fn, opt_arg) { if (!deferred.promise.isPending()) { return; // Defererd was cancelled; silently abort. } try { var result = fn.call(generator, opt_arg); } catch (ex) { deferred.reject(ex); return; } if (result.done) { deferred.fulfill(result.value); return; } asap(result.value, callNext, callThrow); } } // PUBLIC API module.exports = { CancellationError: CancellationError, ControlFlow: ControlFlow, Deferred: Deferred, MultipleUnhandledRejectionError: MultipleUnhandledRejectionError, Thenable: Thenable, Promise: ManagedPromise, all: all, asap: asap, captureStackTrace: captureStackTrace, checkedNodeCall: checkedNodeCall, consume: consume, controlFlow: controlFlow, createFlow: createFlow, defer: defer, delayed: delayed, filter: filter, fulfilled: fulfilled, fullyResolved: fullyResolved, isGenerator: isGenerator, isPromise: isPromise, map: map, rejected: rejected, setDefaultFlow: setDefaultFlow, when: when, get LONG_STACK_TRACES() { return LONG_STACK_TRACES; }, set LONG_STACK_TRACES(v) { LONG_STACK_TRACES = v; }, };
1
13,750
there is still `2` spaces after `a` here.. :P
SeleniumHQ-selenium
java
@@ -25,6 +25,17 @@ import ( const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV +// setupDev returns true if /dev needs to be set up. +func needsSetupDev(config *configs.Config) (bool, error) { + setupDev := true + for _, m := range config.Mounts { + if m.Device == "bind" && m.Destination == "/dev" { + setupDev = false + } + } + return setupDev, nil +} + // setupRootfs sets up the devices, mount points, and filesystems for use inside a // new mount namespace. func setupRootfs(config *configs.Config, console *linuxConsole, pipe io.ReadWriter) (err error) {
1
// +build linux package libcontainer import ( "fmt" "io" "io/ioutil" "os" "os/exec" "path" "path/filepath" "strings" "syscall" "time" "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/symlink" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/label" "github.com/opencontainers/runc/libcontainer/system" libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils" ) const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV // setupRootfs sets up the devices, mount points, and filesystems for use inside a // new mount namespace. func setupRootfs(config *configs.Config, console *linuxConsole, pipe io.ReadWriter) (err error) { if err := prepareRoot(config); err != nil { return newSystemError(err) } setupDev := len(config.Devices) != 0 for _, m := range config.Mounts { for _, precmd := range m.PremountCmds { if err := mountCmd(precmd); err != nil { return newSystemError(err) } } if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil { return newSystemError(err) } for _, postcmd := range m.PostmountCmds { if err := mountCmd(postcmd); err != nil { return newSystemError(err) } } } if setupDev { if err := createDevices(config); err != nil { return newSystemError(err) } if err := setupPtmx(config, console); err != nil { return newSystemError(err) } if err := setupDevSymlinks(config.Rootfs); err != nil { return newSystemError(err) } } // Signal the parent to run the pre-start hooks. // The hooks are run after the mounts are setup, but before we switch to the new // root, so that the old root is still available in the hooks for any mount // manipulations. if err := syncParentHooks(pipe); err != nil { return err } if err := syscall.Chdir(config.Rootfs); err != nil { return newSystemError(err) } if config.NoPivotRoot { err = msMoveRoot(config.Rootfs) } else { err = pivotRoot(config.Rootfs, config.PivotDir) } if err != nil { return newSystemError(err) } if setupDev { if err := reOpenDevNull(); err != nil { return newSystemError(err) } } // remount dev as ro if specifed for _, m := range config.Mounts { if m.Destination == "/dev" { if m.Flags&syscall.MS_RDONLY != 0 { if err := remountReadonly(m.Destination); err != nil { return newSystemError(err) } } break } } // set rootfs ( / ) as readonly if config.Readonlyfs { if err := setReadonly(); err != nil { return newSystemError(err) } } syscall.Umask(0022) return nil } func mountCmd(cmd configs.Command) error { command := exec.Command(cmd.Path, cmd.Args[:]...) command.Env = cmd.Env command.Dir = cmd.Dir if out, err := command.CombinedOutput(); err != nil { return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err) } return nil } func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error { var ( dest = m.Destination ) if !strings.HasPrefix(dest, rootfs) { dest = filepath.Join(rootfs, dest) } switch m.Device { case "proc", "sysfs": if err := os.MkdirAll(dest, 0755); err != nil { return err } // Selinux kernels do not support labeling of /proc or /sys return mountPropagate(m, rootfs, "") case "mqueue": if err := os.MkdirAll(dest, 0755); err != nil { return err } if err := mountPropagate(m, rootfs, mountLabel); err != nil { // older kernels do not support labeling of /dev/mqueue if err := mountPropagate(m, rootfs, ""); err != nil { return err } return label.SetFileLabel(dest, mountLabel) } return nil case "tmpfs": stat, err := os.Stat(dest) if err != nil { if err := os.MkdirAll(dest, 0755); err != nil { return err } } if err := mountPropagate(m, rootfs, mountLabel); err != nil { return err } if stat != nil { if err = os.Chmod(dest, stat.Mode()); err != nil { return err } } return nil case "bind": stat, err := os.Stat(m.Source) if err != nil { // error out if the source of a bind mount does not exist as we will be // unable to bind anything to it. return err } // ensure that the destination of the bind mount is resolved of symlinks at mount time because // any previous mounts can invalidate the next mount's destination. // this can happen when a user specifies mounts within other mounts to cause breakouts or other // evil stuff to try to escape the container's rootfs. if dest, err = symlink.FollowSymlinkInScope(filepath.Join(rootfs, m.Destination), rootfs); err != nil { return err } if err := checkMountDestination(rootfs, dest); err != nil { return err } // update the mount with the correct dest after symlinks are resolved. m.Destination = dest if err := createIfNotExists(dest, stat.IsDir()); err != nil { return err } if err := mountPropagate(m, rootfs, mountLabel); err != nil { return err } // bind mount won't change mount options, we need remount to make mount options effective. // first check that we have non-default options required before attempting a remount if m.Flags&^(syscall.MS_REC|syscall.MS_REMOUNT|syscall.MS_BIND) != 0 { // only remount if unique mount options are set if err := remount(m, rootfs); err != nil { return err } } if m.Relabel != "" { if err := label.Validate(m.Relabel); err != nil { return err } shared := label.IsShared(m.Relabel) if err := label.Relabel(m.Source, mountLabel, shared); err != nil { return err } } case "cgroup": binds, err := getCgroupMounts(m) if err != nil { return err } var merged []string for _, b := range binds { ss := filepath.Base(b.Destination) if strings.Contains(ss, ",") { merged = append(merged, ss) } } tmpfs := &configs.Mount{ Source: "tmpfs", Device: "tmpfs", Destination: m.Destination, Flags: defaultMountFlags, Data: "mode=755", PropagationFlags: m.PropagationFlags, } if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil { return err } for _, b := range binds { if err := mountToRootfs(b, rootfs, mountLabel); err != nil { return err } } // create symlinks for merged cgroups cwd, err := os.Getwd() if err != nil { return err } if err := os.Chdir(filepath.Join(rootfs, m.Destination)); err != nil { return err } for _, mc := range merged { for _, ss := range strings.Split(mc, ",") { if err := os.Symlink(mc, ss); err != nil { // if cgroup already exists, then okay(it could have been created before) if os.IsExist(err) { continue } os.Chdir(cwd) return err } } } if err := os.Chdir(cwd); err != nil { return err } if m.Flags&syscall.MS_RDONLY != 0 { // remount cgroup root as readonly mcgrouproot := &configs.Mount{ Destination: m.Destination, Flags: defaultMountFlags | syscall.MS_RDONLY, } if err := remount(mcgrouproot, rootfs); err != nil { return err } } default: if err := os.MkdirAll(dest, 0755); err != nil { return err } return mountPropagate(m, rootfs, mountLabel) } return nil } func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) { mounts, err := cgroups.GetCgroupMounts() if err != nil { return nil, err } cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup") if err != nil { return nil, err } var binds []*configs.Mount for _, mm := range mounts { dir, err := mm.GetThisCgroupDir(cgroupPaths) if err != nil { return nil, err } relDir, err := filepath.Rel(mm.Root, dir) if err != nil { return nil, err } binds = append(binds, &configs.Mount{ Device: "bind", Source: filepath.Join(mm.Mountpoint, relDir), Destination: filepath.Join(m.Destination, strings.Join(mm.Subsystems, ",")), Flags: syscall.MS_BIND | syscall.MS_REC | m.Flags, PropagationFlags: m.PropagationFlags, }) } return binds, nil } // checkMountDestination checks to ensure that the mount destination is not over the top of /proc. // dest is required to be an abs path and have any symlinks resolved before calling this function. func checkMountDestination(rootfs, dest string) error { if libcontainerUtils.CleanPath(rootfs) == libcontainerUtils.CleanPath(dest) { return fmt.Errorf("mounting into / is prohibited") } invalidDestinations := []string{ "/proc", } // White list, it should be sub directories of invalid destinations validDestinations := []string{ // These entries can be bind mounted by files emulated by fuse, // so commands like top, free displays stats in container. "/proc/cpuinfo", "/proc/diskstats", "/proc/meminfo", "/proc/stat", "/proc/net/dev", } for _, valid := range validDestinations { path, err := filepath.Rel(filepath.Join(rootfs, valid), dest) if err != nil { return err } if path == "." { return nil } } for _, invalid := range invalidDestinations { path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest) if err != nil { return err } if path == "." || !strings.HasPrefix(path, "..") { return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid) } } return nil } func setupDevSymlinks(rootfs string) error { var links = [][2]string{ {"/proc/self/fd", "/dev/fd"}, {"/proc/self/fd/0", "/dev/stdin"}, {"/proc/self/fd/1", "/dev/stdout"}, {"/proc/self/fd/2", "/dev/stderr"}, } // kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink // in /dev if it exists in /proc. if _, err := os.Stat("/proc/kcore"); err == nil { links = append(links, [2]string{"/proc/kcore", "/dev/core"}) } for _, link := range links { var ( src = link[0] dst = filepath.Join(rootfs, link[1]) ) if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) { return fmt.Errorf("symlink %s %s %s", src, dst, err) } } return nil } // If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs // this method will make them point to `/dev/null` in this container's rootfs. This // needs to be called after we chroot/pivot into the container's rootfs so that any // symlinks are resolved locally. func reOpenDevNull() error { var stat, devNullStat syscall.Stat_t file, err := os.OpenFile("/dev/null", os.O_RDWR, 0) if err != nil { return fmt.Errorf("Failed to open /dev/null - %s", err) } defer file.Close() if err := syscall.Fstat(int(file.Fd()), &devNullStat); err != nil { return err } for fd := 0; fd < 3; fd++ { if err := syscall.Fstat(fd, &stat); err != nil { return err } if stat.Rdev == devNullStat.Rdev { // Close and re-open the fd. if err := syscall.Dup3(int(file.Fd()), fd, 0); err != nil { return err } } } return nil } // Create the device nodes in the container. func createDevices(config *configs.Config) error { useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER) oldMask := syscall.Umask(0000) for _, node := range config.Devices { // containers running in a user namespace are not allowed to mknod // devices so we can just bind mount it from the host. if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil { syscall.Umask(oldMask) return err } } syscall.Umask(oldMask) return nil } func bindMountDeviceNode(dest string, node *configs.Device) error { f, err := os.Create(dest) if err != nil && !os.IsExist(err) { return err } if f != nil { f.Close() } return syscall.Mount(node.Path, dest, "bind", syscall.MS_BIND, "") } // Creates the device node in the rootfs of the container. func createDeviceNode(rootfs string, node *configs.Device, bind bool) error { dest := filepath.Join(rootfs, node.Path) if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil { return err } if bind { return bindMountDeviceNode(dest, node) } if err := mknodDevice(dest, node); err != nil { if os.IsExist(err) { return nil } else if os.IsPermission(err) { return bindMountDeviceNode(dest, node) } return err } return nil } func mknodDevice(dest string, node *configs.Device) error { fileMode := node.FileMode switch node.Type { case 'c': fileMode |= syscall.S_IFCHR case 'b': fileMode |= syscall.S_IFBLK default: return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) } if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil { return err } return syscall.Chown(dest, int(node.Uid), int(node.Gid)) } func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { for _, m := range mountinfo { if m.Mountpoint == dir { return m } } return nil } // Get the parent mount point of directory passed in as argument. Also return // optional fields. func getParentMount(rootfs string) (string, string, error) { var path string mountinfos, err := mount.GetMounts() if err != nil { return "", "", err } mountinfo := getMountInfo(mountinfos, rootfs) if mountinfo != nil { return rootfs, mountinfo.Optional, nil } path = rootfs for { path = filepath.Dir(path) mountinfo = getMountInfo(mountinfos, path) if mountinfo != nil { return path, mountinfo.Optional, nil } if path == "/" { break } } // If we are here, we did not find parent mount. Something is wrong. return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs) } // Make parent mount private if it was shared func rootfsParentMountPrivate(config *configs.Config) error { sharedMount := false parentMount, optionalOpts, err := getParentMount(config.Rootfs) if err != nil { return err } optsSplit := strings.Split(optionalOpts, " ") for _, opt := range optsSplit { if strings.HasPrefix(opt, "shared:") { sharedMount = true break } } // Make parent mount PRIVATE if it was shared. It is needed for two // reasons. First of all pivot_root() will fail if parent mount is // shared. Secondly when we bind mount rootfs it will propagate to // parent namespace and we don't want that to happen. if sharedMount { return syscall.Mount("", parentMount, "", syscall.MS_PRIVATE, "") } return nil } func prepareRoot(config *configs.Config) error { flag := syscall.MS_SLAVE | syscall.MS_REC if config.RootPropagation != 0 { flag = config.RootPropagation } if err := syscall.Mount("", "/", "", uintptr(flag), ""); err != nil { return err } if err := rootfsParentMountPrivate(config); err != nil { return err } return syscall.Mount(config.Rootfs, config.Rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, "") } func setReadonly() error { return syscall.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "") } func setupPtmx(config *configs.Config, console *linuxConsole) error { ptmx := filepath.Join(config.Rootfs, "dev/ptmx") if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) { return err } if err := os.Symlink("pts/ptmx", ptmx); err != nil { return fmt.Errorf("symlink dev ptmx %s", err) } if console != nil { return console.mount(config.Rootfs, config.MountLabel) } return nil } func pivotRoot(rootfs, pivotBaseDir string) (err error) { if pivotBaseDir == "" { pivotBaseDir = "/" } tmpDir := filepath.Join(rootfs, pivotBaseDir) if err := os.MkdirAll(tmpDir, 0755); err != nil { return fmt.Errorf("can't create tmp dir %s, error %v", tmpDir, err) } pivotDir, err := ioutil.TempDir(tmpDir, ".pivot_root") if err != nil { return fmt.Errorf("can't create pivot_root dir %s, error %v", pivotDir, err) } defer func() { errVal := os.Remove(pivotDir) if err == nil { err = errVal } }() if err := syscall.PivotRoot(rootfs, pivotDir); err != nil { return fmt.Errorf("pivot_root %s", err) } if err := syscall.Chdir("/"); err != nil { return fmt.Errorf("chdir / %s", err) } // path to pivot dir now changed, update pivotDir = filepath.Join(pivotBaseDir, filepath.Base(pivotDir)) // Make pivotDir rprivate to make sure any of the unmounts don't // propagate to parent. if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { return err } if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { return fmt.Errorf("unmount pivot_root dir %s", err) } return nil } func msMoveRoot(rootfs string) error { if err := syscall.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil { return err } if err := syscall.Chroot("."); err != nil { return err } return syscall.Chdir("/") } // createIfNotExists creates a file or a directory only if it does not already exist. func createIfNotExists(path string, isDir bool) error { if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { if isDir { return os.MkdirAll(path, 0755) } if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { return err } f, err := os.OpenFile(path, os.O_CREATE, 0755) if err != nil { return err } f.Close() } } return nil } // remountReadonly will bind over the top of an existing path and ensure that it is read-only. func remountReadonly(path string) error { for i := 0; i < 5; i++ { if err := syscall.Mount("", path, "", syscall.MS_REMOUNT|syscall.MS_RDONLY, ""); err != nil && !os.IsNotExist(err) { switch err { case syscall.EINVAL: // Probably not a mountpoint, use bind-mount if err := syscall.Mount(path, path, "", syscall.MS_BIND, ""); err != nil { return err } return syscall.Mount(path, path, "", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC|defaultMountFlags, "") case syscall.EBUSY: time.Sleep(100 * time.Millisecond) continue default: return err } } return nil } return fmt.Errorf("unable to mount %s as readonly max retries reached", path) } // maskFile bind mounts /dev/null over the top of the specified path inside a container // to avoid security issues from processes reading information from non-namespace aware mounts ( proc/kcore ). func maskFile(path string) error { if err := syscall.Mount("/dev/null", path, "", syscall.MS_BIND, ""); err != nil && !os.IsNotExist(err) { return err } return nil } // writeSystemProperty writes the value to a path under /proc/sys as determined from the key. // For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward. func writeSystemProperty(key, value string) error { keyPath := strings.Replace(key, ".", "/", -1) return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644) } func remount(m *configs.Mount, rootfs string) error { var ( dest = m.Destination ) if !strings.HasPrefix(dest, rootfs) { dest = filepath.Join(rootfs, dest) } if err := syscall.Mount(m.Source, dest, m.Device, uintptr(m.Flags|syscall.MS_REMOUNT), ""); err != nil { return err } return nil } // Do the mount operation followed by additional mounts required to take care // of propagation flags. func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error { var ( dest = m.Destination data = label.FormatMountLabel(m.Data, mountLabel) flags = m.Flags ) if dest == "/dev" { flags &= ^syscall.MS_RDONLY } if !strings.HasPrefix(dest, rootfs) { dest = filepath.Join(rootfs, dest) } if err := syscall.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil { return err } for _, pflag := range m.PropagationFlags { if err := syscall.Mount("", dest, "", uintptr(pflag), ""); err != nil { return err } } return nil }
1
10,683
What we did in docker before, when working directly with libcontainer, is check if the source is `/dev` and the destination has a `/dev/` prefix. This implementation is probably ok. If someone is mounting their own `/dev` I would expect it to be pre-configured and not need the extra check for if it's the host's `/dev`. (that said, I've not tested yet, just reading through the code)
opencontainers-runc
go
@@ -501,6 +501,10 @@ type Configuration struct { PleaseLocation string // buildEnvStored is a cached form of BuildEnv. buildEnvStored *storedBuildEnv + + FeatureFlags struct { + JavaBinaryExecutableByDefault bool `help:"Makes java_binary rules self executable by default. Target release version 16." var:"FF_JAVA_SELF_EXEC"` + } } // An Alias represents aliases in the config.
1
// Utilities for reading the Please config files. package core import ( "crypto/sha1" "fmt" "io" "os" "path" "path/filepath" "reflect" "runtime" "sort" "strconv" "strings" "sync" "time" "github.com/google/shlex" "github.com/jessevdk/go-flags" "github.com/peterebden/gcfg" "github.com/thought-machine/please/src/cli" "github.com/thought-machine/please/src/fs" ) // OsArch is the os/arch pair, like linux_amd64 etc. const OsArch = runtime.GOOS + "_" + runtime.GOARCH // ConfigFileName is the file name for the typical repo config - this is normally checked in const ConfigFileName string = ".plzconfig" // ArchConfigFileName is the architecture-specific config file which overrides the repo one. // Also normally checked in if needed. const ArchConfigFileName string = ".plzconfig_" + OsArch // LocalConfigFileName is the file name for the local repo config - this is not normally checked // in and used to override settings on the local machine. const LocalConfigFileName string = ".plzconfig.local" // MachineConfigFileName is the file name for the machine-level config - can use this to override // things for a particular machine (eg. build machine with different caching behaviour). const MachineConfigFileName = "/etc/please/plzconfig" // UserConfigFileName is the file name for user-specific config (for all their repos). const UserConfigFileName = "~/.config/please/plzconfig" func readConfigFile(config *Configuration, filename string) error { log.Debug("Attempting to read config from %s...", filename) if err := gcfg.ReadFileInto(config, filename); err != nil && os.IsNotExist(err) { return nil // It's not an error to not have the file at all. } else if gcfg.FatalOnly(err) != nil { return err } else if err != nil { log.Warning("Error in config file: %s", err) } else { log.Debug("Read config from %s", filename) } return nil } // ReadDefaultConfigFiles reads all the config files from the default locations and // merges them into a config object. // The repo root must have already have been set before calling this. func ReadDefaultConfigFiles(profiles []string) (*Configuration, error) { return ReadConfigFiles(defaultConfigFiles(), profiles) } // defaultConfigFiles returns the set of default config file names. func defaultConfigFiles() []string { return []string{ MachineConfigFileName, ExpandHomePath(UserConfigFileName), path.Join(RepoRoot, ConfigFileName), path.Join(RepoRoot, ArchConfigFileName), path.Join(RepoRoot, LocalConfigFileName), } } // ReadConfigFiles reads all the config locations, in order, and merges them into a config object. // Values are filled in by defaults initially and then overridden by each file in turn. func ReadConfigFiles(filenames []string, profiles []string) (*Configuration, error) { config := DefaultConfiguration() for _, filename := range filenames { if err := readConfigFile(config, filename); err != nil { return config, err } for _, profile := range profiles { if err := readConfigFile(config, filename+"."+profile); err != nil { return config, err } } } // Set default values for slices. These add rather than overwriting so we can't set // them upfront as we would with other config values. if usingBazelWorkspace { setDefault(&config.Parse.BuildFileName, "BUILD.bazel", "BUILD", "BUILD.plz") } else { setDefault(&config.Parse.BuildFileName, "BUILD", "BUILD.plz") } setBuildPath(&config.Build.Path, config.Build.PassEnv, config.Build.PassUnsafeEnv) setDefault(&config.Build.PassUnsafeEnv) setDefault(&config.Build.PassEnv) setDefault(&config.Cover.FileExtension, ".go", ".py", ".java", ".tsx", ".ts", ".js", ".cc", ".h", ".c") setDefault(&config.Cover.ExcludeExtension, ".pb.go", "_pb2.py", ".spec.tsx", ".spec.ts", ".spec.js", ".pb.cc", ".pb.h", "_test.py", "_test.go", "_pb.go", "_bindata.go", "_test_main.cc") setDefault(&config.Proto.Language, "cc", "py", "java", "go", "js") setDefault(&config.Parse.BuildDefsDir, "build_defs") // Default values for these guys depend on config.Java.JavaHome if that's been set. if config.Java.JavaHome != "" { defaultPathIfExists(&config.Java.JlinkTool, config.Java.JavaHome, "bin/jlink") } if config.Colours == nil { config.Colours = map[string]string{ "py": "${GREEN}", "java": "${RED}", "go": "${YELLOW}", "js": "${BLUE}", } } else { // You are allowed to just write "yellow" but we map that to a pseudo-variable thing. for k, v := range config.Colours { if v[0] != '$' { config.Colours[k] = "${" + strings.ToUpper(v) + "}" } } } // In a few versions we will deprecate Cpp.Coverage completely in favour of this more generic scheme. if !config.Cpp.Coverage { config.Test.DisableCoverage = append(config.Test.DisableCoverage, "cc") } if len(config.Size) == 0 { config.Size = map[string]*Size{ "small": { Timeout: cli.Duration(1 * time.Minute), TimeoutName: "short", }, "medium": { Timeout: cli.Duration(5 * time.Minute), TimeoutName: "moderate", }, "large": { Timeout: cli.Duration(15 * time.Minute), TimeoutName: "long", }, "enormous": { TimeoutName: "eternal", }, } } // Dump the timeout names back in so we can look them up later for _, size := range config.Size { if size.TimeoutName != "" { config.Size[size.TimeoutName] = size } } if config.Please.Location == "" { // Determine the location based off where we're running from. if exec, err := os.Executable(); err != nil { log.Warning("Can't determine current executable: %s", err) config.Please.Location = "~/.please" } else if strings.HasPrefix(exec, ExpandHomePath("~/.please")) { // Paths within ~/.please are managed by us and have symlinks to subdirectories // that we don't want to follow. config.Please.Location = "~/.please" } else if deref, err := filepath.EvalSymlinks(exec); err != nil { log.Warning("Can't dereference %s: %s", exec, err) config.Please.Location = "~/.please" } else { config.Please.Location = path.Dir(deref) } } config.HomeDir = os.Getenv("HOME") config.PleaseLocation = fs.ExpandHomePathTo(config.Please.Location, config.HomeDir) // We can only verify options by reflection (we need struct tags) so run them quickly through this. return config, config.ApplyOverrides(map[string]string{ "build.hashfunction": config.Build.HashFunction, }) } // setDefault sets a slice of strings in the config if the set one is empty. func setDefault(conf *[]string, def ...string) { if len(*conf) == 0 { *conf = def } } // setDefault checks if "PATH" is in passEnv, if it is set config.build.Path to use the environment variable. func setBuildPath(conf *[]string, passEnv []string, passUnsafeEnv []string) { pathVal := []string{"/usr/local/bin", "/usr/bin", "/bin"} for _, i := range passUnsafeEnv { if i == "PATH" { pathVal = strings.Split(os.Getenv("PATH"), ":") } } for _, i := range passEnv { if i == "PATH" { pathVal = strings.Split(os.Getenv("PATH"), ":") } } setDefault(conf, pathVal...) } // defaultPathIfExists sets a variable to a location in a directory if it's not already set and if the location exists. func defaultPathIfExists(conf *string, dir, file string) { if *conf == "" { location := path.Join(dir, file) // check that the location is valid if _, err := os.Stat(location); err == nil { *conf = location } } } // DefaultConfiguration returns the default configuration object with no overrides. // N.B. Slice fields are not populated by this (since it interferes with reading them) func DefaultConfiguration() *Configuration { config := Configuration{buildEnvStored: &storedBuildEnv{}} config.Please.SelfUpdate = true config.Please.Autoclean = true config.Please.DownloadLocation = "https://get.please.build" config.Please.NumOldVersions = 10 config.Please.NumThreads = runtime.NumCPU() + 2 config.Parse.NumThreads = config.Please.NumThreads config.Parse.BuiltinPleasings = true config.Parse.GitFunctions = true config.Build.Arch = cli.NewArch(runtime.GOOS, runtime.GOARCH) config.Build.Lang = "en_GB.UTF-8" // Not the language of the UI, the language passed to rules. config.Build.Nonce = "1402" // Arbitrary nonce to invalidate config when needed. config.Build.Timeout = cli.Duration(10 * time.Minute) config.Build.Config = "opt" // Optimised builds by default config.Build.FallbackConfig = "opt" // Optimised builds as a fallback on any target that doesn't have a matching one set config.Build.PleaseSandboxTool = "please_sandbox" config.Build.Xattrs = true config.Build.HashFunction = "sha256" config.BuildConfig = map[string]string{} config.BuildEnv = map[string]string{} config.Cache.HTTPWriteable = true config.Cache.HTTPTimeout = cli.Duration(25 * time.Second) config.Cache.HTTPConcurrentRequestLimit = 20 config.Cache.HTTPRetry = 4 if dir, err := os.UserCacheDir(); err == nil { config.Cache.Dir = path.Join(dir, "please") } config.Cache.DirCacheHighWaterMark = 10 * cli.GiByte config.Cache.DirCacheLowWaterMark = 8 * cli.GiByte config.Cache.DirClean = true config.Cache.Workers = runtime.NumCPU() + 2 // Mirrors the number of workers in please.go. config.Test.Timeout = cli.Duration(10 * time.Minute) config.Display.SystemStats = true config.Display.MaxWorkers = 40 config.Remote.NumExecutors = 20 // kind of arbitrary config.Remote.HomeDir = "~" config.Remote.Secure = true config.Remote.Gzip = true config.Remote.VerifyOutputs = true config.Remote.CacheDuration = cli.Duration(10000 * 24 * time.Hour) // Effectively forever. config.Go.GoTool = "go" config.Go.CgoCCTool = "gcc" config.Go.TestTool = "please_go_test" config.Go.FilterTool = "please_go_filter" config.Python.PipTool = "pip3" config.Python.PexTool = "please_pex" config.Python.DefaultInterpreter = "python3" config.Python.TestRunner = "unittest" config.Python.TestRunnerBootstrap = "" config.Python.UsePyPI = true config.Python.InterpreterOptions = "" config.Python.PipFlags = "" config.Java.DefaultTestPackage = "" config.Java.SourceLevel = "8" config.Java.TargetLevel = "8" config.Java.ReleaseLevel = "" config.Java.DefaultMavenRepo = []cli.URL{"https://repo1.maven.org/maven2", "https://jcenter.bintray.com/"} config.Java.JavacFlags = "-Werror -Xlint:-options" // bootstrap class path warnings are pervasive without this. config.Java.JlinkTool = "jlink" config.Java.JavacWorker = "javac_worker" config.Java.JarCatTool = "jarcat" config.Java.JUnitRunner = "junit_runner.jar" config.Java.JavaHome = "" config.Cpp.CCTool = "gcc" config.Cpp.CppTool = "g++" config.Cpp.LdTool = "ld" config.Cpp.ArTool = "ar" config.Cpp.DefaultOptCflags = "--std=c99 -O3 -pipe -DNDEBUG -Wall -Werror" config.Cpp.DefaultDbgCflags = "--std=c99 -g3 -pipe -DDEBUG -Wall -Werror" config.Cpp.DefaultOptCppflags = "--std=c++11 -O3 -pipe -DNDEBUG -Wall -Werror" config.Cpp.DefaultDbgCppflags = "--std=c++11 -g3 -pipe -DDEBUG -Wall -Werror" config.Cpp.Coverage = true config.Cpp.ClangModules = true // At some point in the future it might make sense to remove UnitTest++ as the default // test runner - but for now it's still the default for compatibility. config.Cpp.TestMain = BuildLabel{ Subrepo: "pleasings", PackageName: "cc", Name: "unittest_main", } config.Proto.ProtocTool = "protoc" // We're using the most common names for these; typically gRPC installs the builtin plugins // as grpc_python_plugin etc. config.Proto.ProtocGoPlugin = "protoc-gen-go" config.Proto.GrpcPythonPlugin = "grpc_python_plugin" config.Proto.GrpcJavaPlugin = "protoc-gen-grpc-java" config.Proto.GrpcCCPlugin = "grpc_cpp_plugin" config.Proto.PythonDep = "//third_party/python:protobuf" config.Proto.JavaDep = "//third_party/java:protobuf" config.Proto.GoDep = "//third_party/go:protobuf" config.Proto.JsDep = "" config.Proto.PythonGrpcDep = "//third_party/python:grpc" config.Proto.JavaGrpcDep = "//third_party/java:grpc-all" config.Proto.GoGrpcDep = "//third_party/go:grpc" config.Remote.Timeout = cli.Duration(2 * time.Minute) config.Bazel.Compatibility = usingBazelWorkspace return &config } // A Configuration contains all the settings that can be configured about Please. // This is parsed from .plzconfig etc; we also auto-generate help messages from its tags. type Configuration struct { Please struct { Version cli.Version `help:"Defines the version of plz that this repo is supposed to use currently. If it's not present or the version matches the currently running version no special action is taken; otherwise if SelfUpdate is set Please will attempt to download an appropriate version, otherwise it will issue a warning and continue.\n\nNote that if this is not set, you can run plz update to update to the latest version available on the server." var:"PLZ_VERSION"` VersionChecksum []string `help:"Defines a hex-encoded sha256 checksum that the downloaded version must match. Can be specified multiple times to support different architectures."` Location string `help:"Defines the directory Please is installed into.\nDefaults to ~/.please but you might want it to be somewhere else if you're installing via another method (e.g. the debs and install script still use /opt/please)."` SelfUpdate bool `help:"Sets whether plz will attempt to update itself when the version set in the config file is different."` DownloadLocation cli.URL `help:"Defines the location to download Please from when self-updating. Defaults to the Please web server, but you can point it to some location of your own if you prefer to keep traffic within your network or use home-grown versions."` NumOldVersions int `help:"Number of old versions to keep from autoupdates."` Autoclean bool `help:"Automatically clean stale versions without prompting"` NumThreads int `help:"Number of parallel build operations to run.\nIs overridden by the equivalent command-line flag, if that's passed." example:"6"` Motd []string `help:"Message of the day; is displayed once at the top during builds. If multiple are given, one is randomly chosen."` DefaultRepo string `help:"Location of the default repository; this is used if plz is invoked when not inside a repo, it changes to that directory then does its thing."` } `help:"The [please] section in the config contains non-language-specific settings defining how Please should operate."` Parse struct { ExperimentalDir []string `help:"Directory containing experimental code. This is subject to some extra restrictions:\n - Code in the experimental dir can override normal visibility constraints\n - Code outside the experimental dir can never depend on code inside it\n - Tests are excluded from general detection." example:"experimental"` BuildFileName []string `help:"Sets the names that Please uses instead of BUILD for its build files.\nFor clarity the documentation refers to them simply as BUILD files but you could reconfigure them here to be something else.\nOne case this can be particularly useful is in cases where you have a subdirectory named build on a case-insensitive file system like HFS+." var:"BUILD_FILE_NAMES"` BlacklistDirs []string `help:"Directories to blacklist when recursively searching for BUILD files (e.g. when using plz build ... or similar).\nThis is generally useful when you have large directories within your repo that don't need to be searched, especially things like node_modules that have come from external package managers."` PreloadBuildDefs []string `help:"Files to preload by the parser before loading any BUILD files.\nSince this is done before the first package is parsed they must be files in the repository, they cannot be subinclude() paths." example:"build_defs/go_bindata.build_defs"` BuildDefsDir []string `help:"Directory to look in when prompted for help topics that aren't known internally." example:"build_defs"` BuiltinPleasings bool `help:"Adds github.com/thought-machine/pleasings as a default subrepo named pleasings. This makes some builtin extensions available, but is not fully deterministic (it always uses the latest version). You may prefer to disable this and define your own subrepo for it (or not use it at all, of course)."` NumThreads int `help:"Number of parallel parse operations to run.\nIs overridden by the --num_threads command line flag." example:"6"` GitFunctions bool `help:"Activates built-in functions git_branch, git_commit, git_show and git_state. If disabled they will not be usable at parse time."` } `help:"The [parse] section in the config contains settings specific to parsing files."` Display struct { UpdateTitle bool `help:"Updates the title bar of the shell window Please is running in as the build progresses. This isn't on by default because not everyone's shell is configured to reset it again after and we don't want to alter it forever."` SystemStats bool `help:"Whether or not to show basic system resource usage in the interactive display. Has no effect without that configured."` MaxWorkers int `help:"Maximum number of worker rows to display at any one time."` } `help:"Please has an animated display mode which shows the currently building targets.\nBy default it will autodetect whether it is using an interactive TTY session and choose whether to use it or not, although you can force it on or off via flags.\n\nThe display is heavily inspired by Buck's SuperConsole."` Colours map[string]string `help:"Colour code overrides in interactive output. These correspond to requirements on each target."` Build struct { Arch cli.Arch `help:"Architecture to compile for. Defaults to the host architecture."` Timeout cli.Duration `help:"Default timeout for build actions. Default is ten minutes."` Path []string `help:"The PATH variable that will be passed to the build processes.\nDefaults to /usr/local/bin:/usr/bin:/bin but of course can be modified if you need to get binaries from other locations." example:"/usr/local/bin:/usr/bin:/bin"` Config string `help:"The build config to use when one is not chosen on the command line. Defaults to opt." example:"opt | dbg"` FallbackConfig string `help:"The build config to use when one is chosen and a required target does not have one by the same name. Also defaults to opt." example:"opt | dbg"` Lang string `help:"Sets the language passed to build rules when building. This can be important for some tools (although hopefully not many) - we've mostly observed it with Sass."` Sandbox bool `help:"True to sandbox individual build actions, which isolates them from network access and some aspects of the filesystem. Currently only works on Linux." var:"BUILD_SANDBOX"` Xattrs bool `help:"True (the default) to attempt to use xattrs to record file metadata. If false Please will fall back to using additional files where needed, which is more compatible but has slightly worse performance."` PleaseSandboxTool string `help:"The location of the please_sandbox tool to use."` Nonce string `help:"This is an arbitrary string that is added to the hash of every build target. It provides a way to force a rebuild of everything when it's changed.\nWe will bump the default of this whenever we think it's required - although it's been a pretty long time now and we hope that'll continue."` PassEnv []string `help:"A list of environment variables to pass from the current environment to build rules. For example\n\nPassEnv = HTTP_PROXY\n\nwould copy your HTTP_PROXY environment variable to the build env for any rules."` PassUnsafeEnv []string `help:"Similar to PassEnv, a list of environment variables to pass from the current environment to build rules. Unlike PassEnv, the environment variable values are not used when calculating build target hashes."` HTTPProxy cli.URL `help:"A URL to use as a proxy server for downloads. Only applies to internal ones - e.g. self-updates or remote_file rules."` HashFunction string `help:"The hash function to use internally for build actions." options:"sha1,sha256"` } `help:"A config section describing general settings related to building targets in Please.\nSince Please is by nature about building things, this only has the most generic properties; most of the more esoteric properties are configured in their own sections."` BuildConfig map[string]string `help:"A section of arbitrary key-value properties that are made available in the BUILD language. These are often useful for writing custom rules that need some configurable property.\n\n[buildconfig]\nandroid-tools-version = 23.0.2\n\nFor example, the above can be accessed as CONFIG.ANDROID_TOOLS_VERSION."` BuildEnv map[string]string `help:"A set of extra environment variables to define for build rules. For example:\n\n[buildenv]\nsecret-passphrase = 12345\n\nThis would become SECRET_PASSPHRASE for any rules. These can be useful for passing secrets into custom rules; any variables containing SECRET or PASSWORD won't be logged.\n\nIt's also useful if you'd like internal tools to honour some external variable."` Cache struct { Workers int `help:"Number of workers for uploading artifacts to remote caches, which is done asynchronously."` Dir string `help:"Sets the directory to use for the dir cache.\nThe default is 'please' under the user's cache dir (i.e. ~/.cache/please, ~/Library/Caches/please, etc), if set to the empty string the dir cache will be disabled." example:".plz-cache"` DirCacheHighWaterMark cli.ByteSize `help:"Starts cleaning the directory cache when it is over this number of bytes.\nCan also be given with human-readable suffixes like 10G, 200MB etc."` DirCacheLowWaterMark cli.ByteSize `help:"When cleaning the directory cache, it's reduced to at most this size."` DirClean bool `help:"Controls whether entries in the dir cache are cleaned or not. If disabled the cache will only grow."` DirCompress bool `help:"Compresses stored artifacts in the dir cache. They are slower to store & retrieve but more compact."` HTTPURL cli.URL `help:"Base URL of the HTTP cache.\nNot set to anything by default which means the cache will be disabled."` HTTPWriteable bool `help:"If True this plz instance will write content back to the HTTP cache.\nBy default it runs in read-only mode."` HTTPTimeout cli.Duration `help:"Timeout for operations contacting the HTTP cache, in seconds."` HTTPConcurrentRequestLimit int `help:"The maximum amount of concurrent requests that can be open. Default 20."` HTTPRetry int `help:"The maximum number of retries before a request will give up, if a request is retryable"` } `help:"Please has several built-in caches that can be configured in its config file.\n\nThe simplest one is the directory cache which by default is written into the .plz-cache directory. This allows for fast retrieval of code that has been built before (for example, when swapping Git branches).\n\nThere is also a remote RPC cache which allows using a centralised server to store artifacts. A typical pattern here is to have your CI system write artifacts into it and give developers read-only access so they can reuse its work.\n\nFinally there's a HTTP cache which is very similar, but a little obsolete now since the RPC cache outperforms it and has some extra features. Otherwise the two have similar semantics and share quite a bit of implementation.\n\nPlease has server implementations for both the RPC and HTTP caches."` Test struct { Timeout cli.Duration `help:"Default timeout applied to all tests. Can be overridden on a per-rule basis."` Sandbox bool `help:"True to sandbox individual tests, which isolates them from network access, IPC and some aspects of the filesystem. Currently only works on Linux." var:"TEST_SANDBOX"` DisableCoverage []string `help:"Disables coverage for tests that have any of these labels spcified."` Upload cli.URL `help:"URL to upload test results to (in XML format)"` } `help:"A config section describing settings related to testing in general."` Remote struct { URL string `help:"URL for the remote server."` CASURL string `help:"URL for the CAS service, if it is different to the main one."` AssetURL string `help:"URL for the remote asset server, if it is different to the main one."` NumExecutors int `help:"Maximum number of remote executors to use simultaneously."` Instance string `help:"Remote instance name to request; depending on the server this may be required."` Name string `help:"A name for this worker instance. This is attached to artifacts uploaded to remote storage." example:"agent-001"` DisplayURL string `help:"A URL to browse the remote server with (e.g. using buildbarn-browser). Only used when printing hashes."` TokenFile string `help:"A file containing a token that is attached to outgoing RPCs to authenticate them. This is somewhat bespoke; we are still investigating further options for authentication."` Timeout cli.Duration `help:"Timeout for connections made to the remote server."` Secure bool `help:"Whether to use TLS for communication or not."` Gzip bool `help:"Whether to use gzip compression for communication."` VerifyOutputs bool `help:"Whether to verify all outputs are present after a cached remote execution action. Depending on your server implementation, you may require this to ensure files are really present."` HomeDir string `help:"The home directory on the build machine."` Platform []string `help:"Platform properties to request from remote workers, in the format key=value."` CacheDuration cli.Duration `help:"Length of time before we re-check locally cached build actions. Default is unlimited."` } `help:"Settings related to remote execution & caching using the Google remote execution APIs. This section is still experimental and subject to change."` Size map[string]*Size `help:"Named sizes of targets; these are the definitions of what can be passed to the 'size' argument."` Cover struct { FileExtension []string `help:"Extensions of files to consider for coverage.\nDefaults to a reasonably obvious set for the builtin rules including .go, .py, .java, etc."` ExcludeExtension []string `help:"Extensions of files to exclude from coverage.\nTypically this is for generated code; the default is to exclude protobuf extensions like .pb.go, _pb2.py, etc."` } Gc struct { Keep []BuildLabel `help:"Marks targets that gc should always keep. Can include meta-targets such as //test/... and //docs:all."` KeepLabel []string `help:"Defines a target label to be kept; for example, if you set this to go, no Go targets would ever be considered for deletion." example:"go"` } `help:"Please supports a form of 'garbage collection', by which it means identifying targets that are not used for anything. By default binary targets and all their transitive dependencies are always considered non-garbage, as are any tests directly on those. The config options here allow tweaking this behaviour to retain more things.\n\nNote that it's a very good idea that your BUILD files are in the standard format when running this."` Go struct { GoTool string `help:"The binary to use to invoke Go & its subtools with." var:"GO_TOOL"` GoRoot string `help:"If set, will set the GOROOT environment variable appropriately during build actions."` TestTool string `help:"Sets the location of the please_go_test tool that is used to template the test main for go_test rules." var:"GO_TEST_TOOL"` GoPath string `help:"If set, will set the GOPATH environment variable appropriately during build actions." var:"GOPATH"` ImportPath string `help:"Sets the default Go import path at the root of this repository.\nFor example, in the Please repo, we might set it to github.com/thought-machine/please to allow imports from that package within the repo." var:"GO_IMPORT_PATH"` CgoCCTool string `help:"Sets the location of CC while building cgo_library and cgo_test rules. Defaults to gcc" var:"CGO_CC_TOOL"` FilterTool string `help:"Sets the location of the please_go_filter tool that is used to filter source files against build constraints." var:"GO_FILTER_TOOL"` DefaultStatic bool `help:"Sets Go binaries to default to static linking. Note that enabling this may have negative consequences for some code, including Go's DNS lookup code in the net module." var:"GO_DEFAULT_STATIC"` } `help:"Please has built-in support for compiling Go, and of course is written in Go itself.\nSee the config subfields or the Go rules themselves for more information.\n\nNote that Please is a bit more flexible than Go about directory layout - for example, it is possible to have multiple packages in a directory, but it's not a good idea to push this too far since Go's directory layout is inextricably linked with its import paths."` Python struct { PipTool string `help:"The tool that is invoked during pip_library rules." var:"PIP_TOOL"` PipFlags string `help:"Additional flags to pass to pip invocations in pip_library rules." var:"PIP_FLAGS"` PexTool string `help:"The tool that's invoked to build pexes. Defaults to please_pex in the install directory." var:"PEX_TOOL"` DefaultInterpreter string `help:"The interpreter used for python_binary and python_test rules when none is specified on the rule itself. Defaults to python but you could of course set it to, say, pypy." var:"DEFAULT_PYTHON_INTERPRETER"` TestRunner string `help:"The test runner used to discover & run Python tests; one of unittest, pytest or behave, or a custom import path to bring your own." var:"PYTHON_TEST_RUNNER"` TestRunnerBootstrap string `help:"Target providing test-runner library and its transitive dependencies. Injects plz-provided bootstraps if not given." var:"PYTHON_TEST_RUNNER_BOOTSTRAP"` ModuleDir string `help:"Defines a directory containing modules from which they can be imported at the top level.\nBy default this is empty but by convention we define our pip_library rules in third_party/python and set this appropriately. Hence any of those third-party libraries that try something like import six will have it work as they expect, even though it's actually in a different location within the .pex." var:"PYTHON_MODULE_DIR"` DefaultPipRepo cli.URL `help:"Defines a location for a pip repo to download wheels from.\nBy default pip_library uses PyPI (although see below on that) but you may well want to use this define another location to upload your own wheels to.\nIs overridden by the repo argument to pip_library." var:"PYTHON_DEFAULT_PIP_REPO"` WheelRepo cli.URL `help:"Defines a location for a remote repo that python_wheel rules will download from. See python_wheel for more information." var:"PYTHON_WHEEL_REPO"` UsePyPI bool `help:"Whether or not to use PyPI for pip_library rules or not. Defaults to true, if you disable this you will presumably want to set DefaultPipRepo to use one of your own.\nIs overridden by the use_pypi argument to pip_library." var:"USE_PYPI"` WheelNameScheme string `help:"Defines a custom templatized wheel naming scheme. Templatized variables should be surrounded in curly braces, and the available options are: url_base, package_name, and version. The default search pattern is '{url_base}/{package_name}-{version}-${{OS}}-${{ARCH}}.whl' along with a few common variants." var:"PYTHON_WHEEL_NAME_SCHEME"` InterpreterOptions string `help:"Options to pass to the python interpeter, when writing shebangs for pex executables." var:"PYTHON_INTERPRETER_OPTIONS"` } `help:"Please has built-in support for compiling Python.\nPlease's Python artifacts are pex files, which are essentially self-executable zip files containing all needed dependencies, bar the interpreter itself. This fits our aim of at least semi-static binaries for each language.\nSee https://github.com/pantsbuild/pex for more information.\nNote that due to differences between the environment inside a pex and outside some third-party code may not run unmodified (for example, it cannot simply open() files). It's possible to work around a lot of this, but if it all becomes too much it's possible to mark pexes as not zip-safe which typically resolves most of it at a modest speed penalty."` Java struct { JavacTool string `help:"Defines the tool used for the Java compiler. Defaults to javac." var:"JAVAC_TOOL"` JlinkTool string `help:"Defines the tool used for the Java linker. Defaults to jlink." var:"JLINK_TOOL"` JavaHome string `help:"Defines the path of the Java Home folder." var:"JAVA_HOME"` JavacWorker string `help:"Defines the tool used for the Java persistent compiler. This is significantly (approx 4x) faster for large Java trees than invoking javac separately each time. Default to javac_worker in the install directory, but can be switched off to fall back to javactool and separate invocation." var:"JAVAC_WORKER"` JarCatTool string `help:"Defines the tool used to concatenate .jar files which we use to build the output of java_binary, java_test and various other rules. Defaults to jarcat in the Please install directory." var:"JARCAT_TOOL"` JUnitRunner string `help:"Defines the .jar containing the JUnit runner. This is built into all java_test rules since it's necessary to make JUnit do anything useful.\nDefaults to junit_runner.jar in the Please install directory." var:"JUNIT_RUNNER"` DefaultTestPackage string `help:"The Java classpath to search for functions annotated with @Test. If not specified the compiled sources will be searched for files named *Test.java." var:"DEFAULT_TEST_PACKAGE"` ReleaseLevel string `help:"The default Java release level when compiling.\nSourceLevel and TargetLevel are ignored if this is set. Bear in mind that this flag is only supported in Java version 9+." var:"JAVA_RELEASE_LEVEL"` SourceLevel string `help:"The default Java source level when compiling. Defaults to 8." var:"JAVA_SOURCE_LEVEL"` TargetLevel string `help:"The default Java bytecode level to target. Defaults to 8." var:"JAVA_TARGET_LEVEL"` JavacFlags string `help:"Additional flags to pass to javac when compiling libraries." example:"-Xmx1200M" var:"JAVAC_FLAGS"` JavacTestFlags string `help:"Additional flags to pass to javac when compiling tests." example:"-Xmx1200M" var:"JAVAC_TEST_FLAGS"` DefaultMavenRepo []cli.URL `help:"Default location to load artifacts from in maven_jar rules. Can be overridden on a per-rule basis." var:"DEFAULT_MAVEN_REPO"` Toolchain string `help:"A label identifying a java_toolchain." var:"JAVA_TOOLCHAIN"` } `help:"Please has built-in support for compiling Java.\nIt builds uber-jars for binary and test rules which contain all dependencies and can be easily deployed, and with the help of some of Please's additional tools they are deterministic as well.\n\nWe've only tested support for Java 7 and 8, although it's likely newer versions will work with little or no change."` Cpp struct { CCTool string `help:"The tool invoked to compile C code. Defaults to gcc but you might want to set it to clang, for example." var:"CC_TOOL"` CppTool string `help:"The tool invoked to compile C++ code. Defaults to g++ but you might want to set it to clang++, for example." var:"CPP_TOOL"` LdTool string `help:"The tool invoked to link object files. Defaults to ld but you could also set it to gold, for example." var:"LD_TOOL"` ArTool string `help:"The tool invoked to archive static libraries. Defaults to ar." var:"AR_TOOL"` LinkWithLdTool bool `help:"If true, instructs Please to use the tool set earlier in ldtool to link binaries instead of cctool.\nThis is an esoteric setting that most people don't want; a vanilla ld will not perform all steps necessary here (you'll get lots of missing symbol messages from having no libc etc). Generally best to leave this disabled unless you have very specific requirements." var:"LINK_WITH_LD_TOOL"` DefaultOptCflags string `help:"Compiler flags passed to all C rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c99 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CFLAGS"` DefaultDbgCflags string `help:"Compiler rules passed to all C rules during dbg builds.\nDefaults to --std=c99 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CFLAGS"` DefaultOptCppflags string `help:"Compiler flags passed to all C++ rules during opt builds; these are typically pretty basic things like what language standard you want to target, warning flags, etc.\nDefaults to --std=c++11 -O3 -DNDEBUG -Wall -Wextra -Werror" var:"DEFAULT_OPT_CPPFLAGS"` DefaultDbgCppflags string `help:"Compiler rules passed to all C++ rules during dbg builds.\nDefaults to --std=c++11 -g3 -DDEBUG -Wall -Wextra -Werror." var:"DEFAULT_DBG_CPPFLAGS"` DefaultLdflags string `help:"Linker flags passed to all C++ rules.\nBy default this is empty." var:"DEFAULT_LDFLAGS"` PkgConfigPath string `help:"Custom PKG_CONFIG_PATH for pkg-config.\nBy default this is empty." var:"PKG_CONFIG_PATH"` Coverage bool `help:"If true (the default), coverage will be available for C and C++ build rules.\nThis is still a little experimental but should work for GCC. Right now it does not work for Clang (it likely will in Clang 4.0 which will likely support --fprofile-dir) and so this can be useful to disable it.\nIt's also useful in some cases for CI systems etc if you'd prefer to avoid the overhead, since the tests have to be compiled with extra instrumentation and without optimisation." var:"CPP_COVERAGE"` TestMain BuildLabel `help:"The build target to use for the default main for C++ test rules." example:"///pleasings//cc:unittest_main" var:"CC_TEST_MAIN"` ClangModules bool `help:"Uses Clang-style arguments for compiling cc_module rules. If disabled gcc-style arguments will be used instead. Experimental, expected to be removed at some point once module compilation methods are more consistent." var:"CC_MODULES_CLANG"` } `help:"Please has built-in support for compiling C and C++ code. We don't support every possible nuance of compilation for these languages, but aim to provide something fairly straightforward.\nTypically there is little problem compiling & linking against system libraries although Please has no insight into those libraries and when they change, so cannot rebuild targets appropriately.\n\nThe C and C++ rules are very similar and simply take a different set of tools and flags to facilitate side-by-side usage."` Proto struct { ProtocTool string `help:"The binary invoked to compile .proto files. Defaults to protoc." var:"PROTOC_TOOL"` ProtocGoPlugin string `help:"The binary passed to protoc as a plugin to generate Go code. Defaults to protoc-gen-go.\nWe've found this easier to manage with a go_get rule instead though, so you can also pass a build label here. See the Please repo for an example." var:"PROTOC_GO_PLUGIN"` GrpcPythonPlugin string `help:"The plugin invoked to compile Python code for grpc_library.\nDefaults to protoc-gen-grpc-python." var:"GRPC_PYTHON_PLUGIN"` GrpcJavaPlugin string `help:"The plugin invoked to compile Java code for grpc_library.\nDefaults to protoc-gen-grpc-java." var:"GRPC_JAVA_PLUGIN"` GrpcCCPlugin string `help:"The plugin invoked to compile C++ code for grpc_library.\nDefaults to grpc_cpp_plugin." var:"GRPC_CC_PLUGIN"` Language []string `help:"Sets the default set of languages that proto rules are built for.\nChosen from the set of {cc, java, go, py}.\nDefaults to all of them!" var:"PROTO_LANGUAGES"` PythonDep string `help:"An in-repo dependency that's applied to any Python proto libraries." var:"PROTO_PYTHON_DEP"` JavaDep string `help:"An in-repo dependency that's applied to any Java proto libraries." var:"PROTO_JAVA_DEP"` GoDep string `help:"An in-repo dependency that's applied to any Go proto libraries." var:"PROTO_GO_DEP"` JsDep string `help:"An in-repo dependency that's applied to any Javascript proto libraries." var:"PROTO_JS_DEP"` PythonGrpcDep string `help:"An in-repo dependency that's applied to any Python gRPC libraries." var:"GRPC_PYTHON_DEP"` JavaGrpcDep string `help:"An in-repo dependency that's applied to any Java gRPC libraries." var:"GRPC_JAVA_DEP"` GoGrpcDep string `help:"An in-repo dependency that's applied to any Go gRPC libraries." var:"GRPC_GO_DEP"` } `help:"Please has built-in support for compiling protocol buffers, which are a form of codegen to define common data types which can be serialised and communicated between different languages.\nSee https://developers.google.com/protocol-buffers/ for more information.\n\nThere is also support for gRPC, which is an implementation of protobuf's RPC framework. See http://www.grpc.io/ for more information.\n\nNote that you must have the protocol buffers compiler (and gRPC plugins, if needed) installed on your machine to make use of these rules."` Licences struct { Accept []string `help:"Licences that are accepted in this repository.\nWhen this is empty licences are ignored. As soon as it's set any licence detected or assigned must be accepted explicitly here.\nThere's no fuzzy matching, so some package managers (especially PyPI and Maven, but shockingly not npm which rather nicely uses SPDX) will generate a lot of slightly different spellings of the same thing, which will all have to be accepted here. We'd rather that than trying to 'cleverly' match them which might result in matching the wrong thing."` Reject []string `help:"Licences that are explicitly rejected in this repository.\nAn astute observer will notice that this is not very different to just not adding it to the accept section, but it does have the advantage of explicitly documenting things that the team aren't allowed to use."` } `help:"Please has some limited support for declaring acceptable licences and detecting them from some libraries. You should not rely on this for complete licence compliance, but it can be a useful check to try to ensure that unacceptable licences do not slip in."` Alias map[string]*Alias `help:"Allows defining alias replacements with more detail than the [aliases] section. Otherwise follows the same process, i.e. performs replacements of command strings."` Bazel struct { Compatibility bool `help:"Activates limited Bazel compatibility mode. When this is active several rule arguments are available under different names (e.g. compiler_flags -> copts etc), the WORKSPACE file is interpreted, Makefile-style replacements like $< and $@ are made in genrule commands, etc.\nNote that Skylark is not generally supported and many aspects of compatibility are fairly superficial; it's unlikely this will work for complex setups of either tool." var:"BAZEL_COMPATIBILITY"` } `help:"Bazel is an open-sourced version of Google's internal build tool. Please draws a lot of inspiration from the original tool although the two have now diverged in various ways.\nNonetheless, if you've used Bazel, you will likely find Please familiar."` // HomeDir is not a config setting but is used to construct the path. HomeDir string // Similarly this is a fully expanded form of Please.Location PleaseLocation string // buildEnvStored is a cached form of BuildEnv. buildEnvStored *storedBuildEnv } // An Alias represents aliases in the config. type Alias struct { Cmd string `help:"Command to run for this alias."` Desc string `help:"Description of this alias"` Subcommand []string `help:"Known subcommands of this command"` Flag []string `help:"Known flags of this command"` PositionalLabels bool `help:"Treats positional arguments after commands as build labels for the purpose of tab completion."` } // A Size represents a named size in the config. type Size struct { Timeout cli.Duration `help:"Timeout for targets of this size"` TimeoutName string `help:"Name of the timeout, to be passed to the 'timeout' argument"` } type storedBuildEnv struct { Env, Path []string Once sync.Once } // Hash returns a hash of the parts of this configuration that affect building targets in general. // Most parts are considered not to (e.g. cache settings) or affect specific targets (e.g. changing // tool paths which get accounted for on the targets that use them). func (config *Configuration) Hash() []byte { h := sha1.New() // These fields are the ones that need to be in the general hash; other things will be // picked up by relevant rules (particularly tool paths etc). // Note that container settings are handled separately. h.Write([]byte(config.Build.Lang)) h.Write([]byte(config.Build.Nonce)) for _, l := range config.Licences.Reject { h.Write([]byte(l)) } for _, env := range config.getBuildEnv(false, false) { h.Write([]byte(env)) } return h.Sum(nil) } // GetBuildEnv returns the build environment configured for this config object. func (config *Configuration) GetBuildEnv() []string { config.buildEnvStored.Once.Do(func() { config.buildEnvStored.Env = config.getBuildEnv(true, true) for _, e := range config.buildEnvStored.Env { if strings.HasPrefix(e, "PATH=") { config.buildEnvStored.Path = strings.Split(strings.TrimPrefix(e, "PATH="), ":") } } }) return config.buildEnvStored.Env } // Path returns the slice of strings corresponding to the PATH env var. func (config *Configuration) Path() []string { config.GetBuildEnv() // ensure it is initialised return config.buildEnvStored.Path } func (config *Configuration) getBuildEnv(includePath bool, includeUnsafe bool) []string { env := []string{ // Need to know these for certain rules. "ARCH=" + config.Build.Arch.Arch, "OS=" + config.Build.Arch.OS, // These are slightly modified forms that are more convenient for some things. "XARCH=" + config.Build.Arch.XArch(), "XOS=" + config.Build.Arch.XOS(), // It's easier to just make these available for Go-based rules. "GOARCH=" + config.Build.Arch.GoArch(), "GOOS=" + config.Build.Arch.OS, } // from the BuildEnv config keyword for k, v := range config.BuildEnv { pair := strings.Replace(strings.ToUpper(k), "-", "_", -1) + "=" + v env = append(env, pair) } // from the user's environment based on the PassUnsafeEnv config keyword if includeUnsafe { for _, k := range config.Build.PassUnsafeEnv { if v, isSet := os.LookupEnv(k); isSet { if k == "PATH" { // plz's install location always needs to be on the path. v = fs.ExpandHomePathTo(config.Please.Location, config.HomeDir) + ":" + v includePath = false // skip this in a bit } env = append(env, k+"="+v) } } } // from the user's environment based on the PassEnv config keyword for _, k := range config.Build.PassEnv { if v, isSet := os.LookupEnv(k); isSet { if k == "PATH" { // plz's install location always needs to be on the path. v = fs.ExpandHomePathTo(config.Please.Location, config.HomeDir) + ":" + v includePath = false // skip this in a bit } env = append(env, k+"="+v) } } if includePath { // Use a restricted PATH; it'd be easier for the user if we pass it through // but really external environment variables shouldn't affect this. // The only concession is that ~ is expanded as the user's home directory // in PATH entries. env = append(env, "PATH="+fs.ExpandHomePathTo(strings.Join(append([]string{config.Please.Location}, config.Build.Path...), ":"), config.HomeDir)) } sort.Strings(env) return env } // TagsToFields returns a map of string represent the properties of CONFIG object to the config Structfield func (config *Configuration) TagsToFields() map[string]reflect.StructField { tags := make(map[string]reflect.StructField) v := reflect.ValueOf(config).Elem() for i := 0; i < v.NumField(); i++ { if field := v.Field(i); field.Kind() == reflect.Struct { for j := 0; j < field.NumField(); j++ { if tag := field.Type().Field(j).Tag.Get("var"); tag != "" { tags[tag] = field.Type().Field(j) } } } } return tags } // ApplyOverrides applies a set of overrides to the config. // The keys of the given map are dot notation for the config setting. func (config *Configuration) ApplyOverrides(overrides map[string]string) error { match := func(s1 string) func(string) bool { return func(s2 string) bool { return strings.ToLower(s2) == s1 } } elem := reflect.ValueOf(config).Elem() for k, v := range overrides { split := strings.Split(strings.ToLower(k), ".") if len(split) != 2 { return fmt.Errorf("Bad option format: %s", k) } field := elem.FieldByNameFunc(match(split[0])) if !field.IsValid() { return fmt.Errorf("Unknown config field: %s", split[0]) } else if field.Kind() == reflect.Map { field.SetMapIndex(reflect.ValueOf(split[1]), reflect.ValueOf(v)) continue } else if field.Kind() != reflect.Struct { return fmt.Errorf("Unsettable config field: %s", split[0]) } subfield, ok := field.Type().FieldByNameFunc(match(split[1])) if !ok { return fmt.Errorf("Unknown config field: %s", split[1]) } field = field.FieldByNameFunc(match(split[1])) switch field.Kind() { case reflect.String: // verify this is a legit setting for this field if options := subfield.Tag.Get("options"); options != "" { if !cli.ContainsString(v, strings.Split(options, ",")) { return fmt.Errorf("Invalid value %s for field %s; options are %s", v, k, options) } } if field.Type().Name() == "URL" { field.Set(reflect.ValueOf(cli.URL(v))) } else { field.Set(reflect.ValueOf(v)) } case reflect.Bool: v = strings.ToLower(v) // Mimics the set of truthy things gcfg accepts in our config file. field.SetBool(v == "true" || v == "yes" || v == "on" || v == "1") case reflect.Int: i, err := strconv.Atoi(v) if err != nil { return fmt.Errorf("Invalid value for an integer field: %s", v) } field.Set(reflect.ValueOf(i)) case reflect.Int64: var d cli.Duration if err := d.UnmarshalText([]byte(v)); err != nil { return fmt.Errorf("Invalid value for a duration field: %s", v) } field.Set(reflect.ValueOf(d)) case reflect.Slice: // Comma-separated values are accepted. if field.Type().Elem().Kind() == reflect.Struct { // Assume it must be a slice of BuildLabel. l := []BuildLabel{} for _, s := range strings.Split(v, ",") { l = append(l, ParseBuildLabel(s, "")) } field.Set(reflect.ValueOf(l)) } else if field.Type().Elem().Name() == "URL" { urls := []cli.URL{} for _, s := range strings.Split(v, ",") { urls = append(urls, cli.URL(s)) } field.Set(reflect.ValueOf(urls)) } else { field.Set(reflect.ValueOf(strings.Split(v, ","))) } default: return fmt.Errorf("Can't override config field %s (is %s)", k, field.Kind()) } } return nil } // Completions returns a list of possible completions for the given option prefix. func (config *Configuration) Completions(prefix string) []flags.Completion { ret := []flags.Completion{} t := reflect.TypeOf(config).Elem() for i := 0; i < t.NumField(); i++ { if field := t.Field(i); field.Type.Kind() == reflect.Struct { for j := 0; j < field.Type.NumField(); j++ { subfield := field.Type.Field(j) if name := strings.ToLower(field.Name + "." + subfield.Name); strings.HasPrefix(name, prefix) { help := subfield.Tag.Get("help") if options := subfield.Tag.Get("options"); options != "" { for _, option := range strings.Split(options, ",") { ret = append(ret, flags.Completion{Item: name + ":" + option, Description: help}) } } else { ret = append(ret, flags.Completion{Item: name + ":", Description: help}) } } } } } return ret } // UpdateArgsWithAliases applies the aliases in this config to the given set of arguments. func (config *Configuration) UpdateArgsWithAliases(args []string) []string { for idx, arg := range args[1:] { // Please should not touch anything that comes after `--` if arg == "--" { break } for k, v := range config.Alias { if arg == k { // We could insert every token in v into os.Args at this point and then we could have // aliases defined in terms of other aliases but that seems rather like overkill so just // stick the replacement in wholesale instead. // Do not ask about the inner append and the empty slice. cmd, err := shlex.Split(v.Cmd) if err != nil { log.Fatalf("Invalid alias replacement for %s: %s", k, err) } return append(append(append([]string{}, args[:idx+1]...), cmd...), args[idx+2:]...) } } } return args } // PrintAliases prints the set of aliases defined in the config. func (config *Configuration) PrintAliases(w io.Writer) { aliases := config.Alias names := make([]string, 0, len(aliases)) maxlen := 0 for alias := range aliases { names = append(names, alias) if len(alias) > maxlen { maxlen = len(alias) } } sort.Strings(names) w.Write([]byte("\nAvailable commands for this repository:\n")) tmpl := fmt.Sprintf(" %%-%ds %%s\n", maxlen) for _, name := range names { fmt.Fprintf(w, tmpl, name, aliases[name].Desc) } } // IsABuildFile returns true if given filename is a build file name. func (config *Configuration) IsABuildFile(name string) bool { for _, buildFileName := range config.Parse.BuildFileName { if name == buildFileName { return true } } return false } // NumRemoteExecutors returns the number of actual remote executors we'll have func (config *Configuration) NumRemoteExecutors() int { if config.Remote.URL == "" { return 0 } return config.Remote.NumExecutors } // A ConfigProfile is a string that knows how to handle completions given all the possible config file locations. type ConfigProfile string // Complete implements command-line flags completion for a ConfigProfile. func (profile ConfigProfile) Complete(match string) (completions []flags.Completion) { for _, filename := range defaultConfigFiles() { matches, _ := filepath.Glob(filename + "." + match + "*") for _, match := range matches { if suffix := strings.TrimPrefix(match, filename+"."); suffix != "local" { // .plzconfig.local doesn't count completions = append(completions, flags.Completion{ Item: suffix, Description: "Profile defined at " + match, }) } } } return completions } // ConfigProfiles makes it easier to convert ConfigProfile slices. type ConfigProfiles []ConfigProfile // Strings converts this to a slice of strings. func (profiles ConfigProfiles) Strings() []string { ret := make([]string, len(profiles)) for i, p := range profiles { ret[i] = string(p) } return ret }
1
9,266
you should add some help on the struct too so that `plz halp featureflags` has a bit of explanation. Might be worth mentioning that these generally won't remain around very long relative to other config options (i.e. they're typically enabling "preview" features for the next major).
thought-machine-please
go
@@ -302,7 +302,7 @@ func newSessionManagerFactory( nodeOptions node.Options, ) session.ManagerFactory { return func(dialog communication.Dialog) *session.Manager { - providerBalanceTrackerFactory := func(consumer, provider, issuer identity.Identity) (session.BalanceTracker, error) { + providerBalanceTrackerFactory := func(consumer, receiver, issuer identity.Identity) (session.BalanceTracker, error) { // if the flag ain't set, just return a noop balance tracker if !nodeOptions.ExperimentPayments { return payments_noop.NewSessionBalance(), nil
1
/* * Copyright (C) 2018 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package cmd import ( "path/filepath" "time" "github.com/asaskevich/EventBus" log "github.com/cihub/seelog" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" "github.com/mysteriumnetwork/node/blockchain" "github.com/mysteriumnetwork/node/communication" nats_dialog "github.com/mysteriumnetwork/node/communication/nats/dialog" nats_discovery "github.com/mysteriumnetwork/node/communication/nats/discovery" consumer_session "github.com/mysteriumnetwork/node/consumer/session" "github.com/mysteriumnetwork/node/consumer/statistics" "github.com/mysteriumnetwork/node/core/connection" "github.com/mysteriumnetwork/node/core/ip" "github.com/mysteriumnetwork/node/core/location" "github.com/mysteriumnetwork/node/core/node" "github.com/mysteriumnetwork/node/core/service" "github.com/mysteriumnetwork/node/core/storage/boltdb" "github.com/mysteriumnetwork/node/core/storage/boltdb/migrations/history" "github.com/mysteriumnetwork/node/identity" identity_registry "github.com/mysteriumnetwork/node/identity/registry" "github.com/mysteriumnetwork/node/logconfig" "github.com/mysteriumnetwork/node/market" market_metrics "github.com/mysteriumnetwork/node/market/metrics" "github.com/mysteriumnetwork/node/market/metrics/oracle" "github.com/mysteriumnetwork/node/market/mysterium" "github.com/mysteriumnetwork/node/metadata" "github.com/mysteriumnetwork/node/metrics" "github.com/mysteriumnetwork/node/money" "github.com/mysteriumnetwork/node/nat" service_noop "github.com/mysteriumnetwork/node/services/noop" service_openvpn "github.com/mysteriumnetwork/node/services/openvpn" "github.com/mysteriumnetwork/node/services/openvpn/discovery/dto" "github.com/mysteriumnetwork/node/session" "github.com/mysteriumnetwork/node/session/balance" balance_provider "github.com/mysteriumnetwork/node/session/balance/provider" session_payment "github.com/mysteriumnetwork/node/session/payment" payment_factory "github.com/mysteriumnetwork/node/session/payment/factory" payments_noop "github.com/mysteriumnetwork/node/session/payment/noop" "github.com/mysteriumnetwork/node/session/promise" "github.com/mysteriumnetwork/node/session/promise/validators" "github.com/mysteriumnetwork/node/tequilapi" tequilapi_endpoints "github.com/mysteriumnetwork/node/tequilapi/endpoints" "github.com/mysteriumnetwork/node/utils" ) // Storage stores persistent objects for future usage type Storage interface { Store(issuer string, data interface{}) error Delete(issuer string, data interface{}) error Update(bucket string, object interface{}) error GetAllFrom(bucket string, data interface{}) error GetOneByField(bucket string, fieldName string, key interface{}, to interface{}) error GetLast(bucket string, to interface{}) error GetBuckets() []string Close() error } // Dependencies is DI container for top level components which is reused in several places type Dependencies struct { Node *node.Node NetworkDefinition metadata.NetworkDefinition MysteriumAPI *mysterium.MysteriumAPI MysteriumMorqaClient market_metrics.QualityOracle EtherClient *ethclient.Client NATService nat.NATService Storage Storage Keystore *keystore.KeyStore PromiseStorage *promise.Storage IdentityManager identity.Manager SignerFactory identity.SignerFactory IdentityRegistry identity_registry.IdentityRegistry IdentityRegistration identity_registry.RegistrationDataProvider IPResolver ip.Resolver LocationResolver location.Resolver LocationDetector location.Detector LocationOriginal location.Cache StatisticsTracker *statistics.SessionStatisticsTracker StatisticsReporter *statistics.SessionStatisticsReporter SessionStorage *consumer_session.Storage EventBus EventBus.Bus ConnectionManager connection.Manager ConnectionRegistry *connection.Registry ServicesManager *service.Manager ServiceRegistry *service.Registry ServiceSessionStorage *session.StorageMemory } // Bootstrap initiates all container dependencies func (di *Dependencies) Bootstrap(nodeOptions node.Options) error { logconfig.Bootstrap() nats_discovery.Bootstrap() log.Infof("Starting Mysterium Node (%s)", metadata.VersionAsString()) if err := nodeOptions.Directories.Check(); err != nil { return err } if err := nodeOptions.Openvpn.Check(); err != nil { return err } if err := di.bootstrapNetworkComponents(nodeOptions.OptionsNetwork); err != nil { return err } if err := di.bootstrapStorage(nodeOptions.Directories.Storage); err != nil { return err } di.bootstrapIdentityComponents(nodeOptions) di.bootstrapLocationComponents(nodeOptions.Location, nodeOptions.Directories.Config) di.bootstrapNodeComponents(nodeOptions) di.registerConnections(nodeOptions) err := di.subscribeEventConsumers() if err != nil { return err } if err := di.Node.Start(); err != nil { return err } return nil } func (di *Dependencies) registerOpenvpnConnection(nodeOptions node.Options) { service_openvpn.Bootstrap() connectionFactory := service_openvpn.NewProcessBasedConnectionFactory( // TODO instead of passing binary path here, Openvpn from node options could represent abstract vpn factory itself nodeOptions.Openvpn.BinaryPath(), nodeOptions.Directories.Config, nodeOptions.Directories.Runtime, di.LocationOriginal, di.SignerFactory, ) di.ConnectionRegistry.Register(service_openvpn.ServiceType, connectionFactory) } func (di *Dependencies) registerNoopConnection() { service_noop.Bootstrap() di.ConnectionRegistry.Register(service_noop.ServiceType, service_noop.NewConnectionCreator()) } // Shutdown stops container func (di *Dependencies) Shutdown() (err error) { var errs []error defer func() { for i := range errs { log.Error("Dependencies shutdown failed: ", errs[i]) if err == nil { err = errs[i] } } }() if di.ServicesManager != nil { if err := di.ServicesManager.Kill(); err != nil { errs = append(errs, err) } } if di.NATService != nil { if err := di.NATService.Disable(); err != nil { errs = append(errs, err) } } if di.Node != nil { if err := di.Node.Kill(); err != nil { errs = append(errs, err) } } if di.Storage != nil { if err := di.Storage.Close(); err != nil { errs = append(errs, err) } } log.Flush() return nil } func (di *Dependencies) bootstrapStorage(path string) error { localStorage, err := boltdb.NewStorage(path) if err != nil { return err } migrator := boltdb.NewMigrator(localStorage) err = migrator.RunMigrations(history.Sequence) if err != nil { return err } di.Storage = localStorage return nil } func (di *Dependencies) subscribeEventConsumers() error { // state events err := di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsTracker.ConsumeSessionEvent) if err != nil { return err } err = di.EventBus.Subscribe(connection.SessionEventTopic, di.StatisticsReporter.ConsumeSessionEvent) if err != nil { return err } err = di.EventBus.Subscribe(connection.SessionEventTopic, di.SessionStorage.ConsumeSessionEvent) if err != nil { return err } // statistics events err = di.EventBus.Subscribe(connection.StatisticsEventTopic, di.StatisticsTracker.ConsumeStatisticsEvent) if err != nil { return err } return nil } func (di *Dependencies) bootstrapNodeComponents(nodeOptions node.Options) { dialogFactory := func(consumerID, providerID identity.Identity, contact market.Contact) (communication.Dialog, error) { dialogEstablisher := nats_dialog.NewDialogEstablisher(consumerID, di.SignerFactory(consumerID)) return dialogEstablisher.EstablishDialog(providerID, contact) } di.StatisticsTracker = statistics.NewSessionStatisticsTracker(time.Now) di.StatisticsReporter = statistics.NewSessionStatisticsReporter( di.StatisticsTracker, di.MysteriumAPI, di.SignerFactory, di.LocationOriginal.Get, time.Minute, ) di.SessionStorage = consumer_session.NewSessionStorage(di.Storage, di.StatisticsTracker) di.PromiseStorage = promise.NewStorage(di.Storage) di.EventBus = EventBus.New() di.ConnectionRegistry = connection.NewRegistry() di.ConnectionManager = connection.NewManager( dialogFactory, payment_factory.PaymentIssuerFactoryFunc(nodeOptions, di.SignerFactory), di.ConnectionRegistry.CreateConnection, di.EventBus, ) router := tequilapi.NewAPIRouter() tequilapi_endpoints.AddRouteForStop(router, utils.SoftKiller(di.Shutdown)) tequilapi_endpoints.AddRoutesForIdentities(router, di.IdentityManager, di.SignerFactory) tequilapi_endpoints.AddRoutesForConnection(router, di.ConnectionManager, di.IPResolver, di.StatisticsTracker, di.MysteriumAPI) tequilapi_endpoints.AddRoutesForLocation(router, di.ConnectionManager, di.LocationDetector, di.LocationOriginal) tequilapi_endpoints.AddRoutesForProposals(router, di.MysteriumAPI, di.MysteriumMorqaClient) tequilapi_endpoints.AddRoutesForSession(router, di.SessionStorage) tequilapi_endpoints.AddRoutesForService(router, di.ServicesManager) identity_registry.AddIdentityRegistrationEndpoint(router, di.IdentityRegistration, di.IdentityRegistry) httpAPIServer := tequilapi.NewServer(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort, router) metricsSender := metrics.CreateSender(nodeOptions.DisableMetrics, nodeOptions.MetricsAddress) di.Node = node.NewNode(di.ConnectionManager, httpAPIServer, di.LocationOriginal, metricsSender) } func newSessionManagerFactory( proposal market.ServiceProposal, sessionStorage *session.StorageMemory, promiseStorage session_payment.PromiseStorage, nodeOptions node.Options, ) session.ManagerFactory { return func(dialog communication.Dialog) *session.Manager { providerBalanceTrackerFactory := func(consumer, provider, issuer identity.Identity) (session.BalanceTracker, error) { // if the flag ain't set, just return a noop balance tracker if !nodeOptions.ExperimentPayments { return payments_noop.NewSessionBalance(), nil } timeTracker := session.NewTracker(time.Now) // TODO: set the time and proper payment info payment := dto.PaymentPerTime{ Price: money.Money{ Currency: money.CurrencyMyst, Amount: uint64(10), }, Duration: time.Minute, } amountCalc := session.AmountCalc{PaymentDef: payment} sender := balance.NewBalanceSender(dialog) promiseChan := make(chan promise.Message, 1) listener := promise.NewListener(promiseChan) err := dialog.Receive(listener.GetConsumer()) if err != nil { return nil, err } // TODO: the ints and times here need to be passed in as well, or defined as constants tracker := balance_provider.NewBalanceTracker(&timeTracker, amountCalc, 0) validator := validators.NewIssuedPromiseValidator(consumer, provider, issuer) return session_payment.NewSessionBalance(sender, tracker, promiseChan, time.Second*5, time.Second*1, validator, promiseStorage, issuer), nil } return session.NewManager( proposal, session.GenerateUUID, sessionStorage, providerBalanceTrackerFactory, ) } } // function decides on network definition combined from testnet/localnet flags and possible overrides func (di *Dependencies) bootstrapNetworkComponents(options node.OptionsNetwork) (err error) { network := metadata.DefaultNetwork switch { case options.Testnet: network = metadata.TestnetDefinition case options.Localnet: network = metadata.LocalnetDefinition } //override defined values one by one from options if options.DiscoveryAPIAddress != metadata.DefaultNetwork.DiscoveryAPIAddress { network.DiscoveryAPIAddress = options.DiscoveryAPIAddress } if options.BrokerAddress != metadata.DefaultNetwork.BrokerAddress { network.BrokerAddress = options.BrokerAddress } normalizedAddress := common.HexToAddress(options.EtherPaymentsAddress) if normalizedAddress != metadata.DefaultNetwork.PaymentsContractAddress { network.PaymentsContractAddress = normalizedAddress } if options.EtherClientRPC != metadata.DefaultNetwork.EtherClientRPC { network.EtherClientRPC = options.EtherClientRPC } di.NetworkDefinition = network di.MysteriumAPI = mysterium.NewClient(network.DiscoveryAPIAddress) di.MysteriumMorqaClient = oracle.NewMorqaClient(network.QualityOracle) log.Info("Using Eth endpoint: ", network.EtherClientRPC) if di.EtherClient, err = blockchain.NewClient(network.EtherClientRPC); err != nil { return err } log.Info("Using Eth contract at address: ", network.PaymentsContractAddress.String()) if options.ExperimentIdentityCheck { if di.IdentityRegistry, err = identity_registry.NewIdentityRegistryContract(di.EtherClient, network.PaymentsContractAddress); err != nil { return err } } else { di.IdentityRegistry = &identity_registry.FakeRegistry{Registered: true, RegistrationEventExists: true} } return nil } func (di *Dependencies) bootstrapIdentityComponents(options node.Options) { di.Keystore = identity.NewKeystoreFilesystem(options.Directories.Keystore, options.Keystore.UseLightweight) di.IdentityManager = identity.NewIdentityManager(di.Keystore) di.SignerFactory = func(id identity.Identity) identity.Signer { return identity.NewSigner(di.Keystore, id) } di.IdentityRegistration = identity_registry.NewRegistrationDataProvider(di.Keystore) } func (di *Dependencies) bootstrapLocationComponents(options node.OptionsLocation, configDirectory string) { di.IPResolver = ip.NewResolver(options.IpifyUrl) switch { case options.Country != "": di.LocationResolver = location.NewStaticResolver(options.Country) case options.ExternalDb != "": di.LocationResolver = location.NewExternalDbResolver(filepath.Join(configDirectory, options.ExternalDb)) default: di.LocationResolver = location.NewBuiltInResolver() } di.LocationDetector = location.NewDetector(di.IPResolver, di.LocationResolver) di.LocationOriginal = location.NewLocationCache(di.LocationDetector) }
1
13,593
Maybe `consumerID, receiverID, issuerID`, at least it's a convension in overal repo
mysteriumnetwork-node
go
@@ -14,6 +14,12 @@ // Package s3blob provides an implementation of blob using S3. // +// For blob.Open URLs, s3blob registers for the "s3" protocol. +// The URL's Host is used as the bucket name. +// The following query options are supported: +// - region: The AWS region for requests. +// Example URL: blob.Open("s3://mybucket?region=us-east-1") +// // It exposes the following types for As: // Bucket: *s3.S3 // ListObject: s3.Object for objects, s3.CommonPrefix for "directories".
1
// Copyright 2018 The Go Cloud Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package s3blob provides an implementation of blob using S3. // // It exposes the following types for As: // Bucket: *s3.S3 // ListObject: s3.Object for objects, s3.CommonPrefix for "directories". // ListOptions.BeforeList: *s3.ListObjectsV2Input // Reader: s3.GetObjectOutput // Attributes: s3.HeadObjectOutput // WriterOptions.BeforeWrite: *s3manager.UploadInput package s3blob import ( "context" "encoding/base64" "errors" "fmt" "io" "io/ioutil" "sort" "strconv" "strings" "github.com/google/go-cloud/blob" "github.com/google/go-cloud/blob/driver" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/s3/s3manager" ) const defaultPageSize = 1000 // OpenBucket returns an S3 Bucket. func OpenBucket(ctx context.Context, sess client.ConfigProvider, bucketName string) (*blob.Bucket, error) { if sess == nil { return nil, errors.New("sess must be provided to get bucket") } return blob.NewBucket(&bucket{ name: bucketName, sess: sess, client: s3.New(sess), }), nil } var emptyBody = ioutil.NopCloser(strings.NewReader("")) // reader reads an S3 object. It implements io.ReadCloser. type reader struct { body io.ReadCloser attrs driver.ReaderAttributes raw *s3.GetObjectOutput } func (r *reader) Read(p []byte) (int, error) { return r.body.Read(p) } // Close closes the reader itself. It must be called when done reading. func (r *reader) Close() error { return r.body.Close() } func (r *reader) As(i interface{}) bool { p, ok := i.(*s3.GetObjectOutput) if !ok { return false } *p = *r.raw return true } func (r *reader) Attributes() driver.ReaderAttributes { return r.attrs } // writer writes an S3 object, it implements io.WriteCloser. type writer struct { w *io.PipeWriter ctx context.Context uploader *s3manager.Uploader req *s3manager.UploadInput donec chan struct{} // closed when done writing // The following fields will be written before donec closes: err error } // Write appends p to w. User must call Close to close the w after done writing. func (w *writer) Write(p []byte) (int, error) { if w.w == nil { if err := w.open(); err != nil { return 0, err } } select { case <-w.donec: return 0, w.err default: } return w.w.Write(p) } func (w *writer) open() error { pr, pw := io.Pipe() w.w = pw go func() { defer close(w.donec) w.req.Body = pr _, err := w.uploader.UploadWithContext(w.ctx, w.req) if err != nil { w.err = err pr.CloseWithError(err) return } }() return nil } // Close completes the writer and close it. Any error occuring during write will // be returned. If a writer is closed before any Write is called, Close will // create an empty file at the given key. func (w *writer) Close() error { if w.w == nil { w.touch() } else if err := w.w.Close(); err != nil { return err } <-w.donec return w.err } // touch creates an empty object in the bucket. It is called if user creates a // new writer but never calls write before closing it. func (w *writer) touch() { if w.w != nil { return } defer close(w.donec) w.req.Body = emptyBody _, w.err = w.uploader.UploadWithContext(w.ctx, w.req) } // bucket represents an S3 bucket and handles read, write and delete operations. type bucket struct { name string sess client.ConfigProvider client *s3.S3 } // ListPaged implements driver.ListPaged. func (b *bucket) ListPaged(ctx context.Context, opts *driver.ListOptions) (*driver.ListPage, error) { pageSize := opts.PageSize if pageSize == 0 { pageSize = defaultPageSize } in := &s3.ListObjectsV2Input{ Bucket: aws.String(b.name), MaxKeys: aws.Int64(int64(pageSize)), } if len(opts.PageToken) > 0 { in.ContinuationToken = aws.String(string(opts.PageToken)) } if opts.Prefix != "" { in.Prefix = aws.String(opts.Prefix) } if opts.Delimiter != "" { in.Delimiter = aws.String(opts.Delimiter) } if opts.BeforeList != nil { asFunc := func(i interface{}) bool { p, ok := i.(**s3.ListObjectsV2Input) if !ok { return false } *p = in return true } if err := opts.BeforeList(asFunc); err != nil { return nil, err } } req, resp := b.client.ListObjectsV2Request(in) if err := req.Send(); err != nil { return nil, err } page := driver.ListPage{} if resp.NextContinuationToken != nil { page.NextPageToken = []byte(*resp.NextContinuationToken) } if n := len(resp.Contents) + len(resp.CommonPrefixes); n > 0 { page.Objects = make([]*driver.ListObject, n) for i, obj := range resp.Contents { page.Objects[i] = &driver.ListObject{ Key: *obj.Key, ModTime: *obj.LastModified, Size: *obj.Size, AsFunc: func(i interface{}) bool { p, ok := i.(*s3.Object) if !ok { return false } *p = *obj return true }, } } for i, prefix := range resp.CommonPrefixes { page.Objects[i+len(resp.Contents)] = &driver.ListObject{ Key: *prefix.Prefix, IsDir: true, AsFunc: func(i interface{}) bool { p, ok := i.(*s3.CommonPrefix) if !ok { return false } *p = *prefix return true }, } } if len(resp.Contents) > 0 && len(resp.CommonPrefixes) > 0 { // S3 gives us blobs and "directories" in separate lists; sort them. sort.Slice(page.Objects, func(i, j int) bool { return page.Objects[i].Key < page.Objects[j].Key }) } } return &page, nil } // As implements driver.As. func (b *bucket) As(i interface{}) bool { p, ok := i.(**s3.S3) if !ok { return false } *p = b.client return true } // Attributes implements driver.Attributes. func (b *bucket) Attributes(ctx context.Context, key string) (driver.Attributes, error) { in := &s3.HeadObjectInput{ Bucket: aws.String(b.name), Key: aws.String(key), } req, resp := b.client.HeadObjectRequest(in) if err := req.Send(); err != nil { if e := isErrNotExist(err); e != nil { return driver.Attributes{}, s3Error{bucket: b.name, key: key, msg: e.Message(), kind: driver.NotFound} } return driver.Attributes{}, err } var md map[string]string if len(resp.Metadata) > 0 { md = make(map[string]string, len(resp.Metadata)) for k, v := range resp.Metadata { if v != nil { md[k] = aws.StringValue(v) } } } return driver.Attributes{ ContentType: aws.StringValue(resp.ContentType), Metadata: md, ModTime: aws.TimeValue(resp.LastModified), Size: aws.Int64Value(resp.ContentLength), AsFunc: func(i interface{}) bool { p, ok := i.(*s3.HeadObjectOutput) if !ok { return false } *p = *resp return true }, }, nil } // NewRangeReader implements driver.NewRangeReader. func (b *bucket) NewRangeReader(ctx context.Context, key string, offset, length int64) (driver.Reader, error) { in := &s3.GetObjectInput{ Bucket: aws.String(b.name), Key: aws.String(key), } if offset > 0 && length < 0 { in.Range = aws.String(fmt.Sprintf("bytes=%d-", offset)) } else if length > 0 { in.Range = aws.String(fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) } req, resp := b.client.GetObjectRequest(in) if err := req.Send(); err != nil { if e := isErrNotExist(err); e != nil { return nil, s3Error{bucket: b.name, key: key, msg: e.Message(), kind: driver.NotFound} } return nil, err } return &reader{ body: resp.Body, attrs: driver.ReaderAttributes{ ContentType: aws.StringValue(resp.ContentType), ModTime: aws.TimeValue(resp.LastModified), Size: getSize(resp), }, raw: resp, }, nil } func getSize(resp *s3.GetObjectOutput) int64 { // Default size to ContentLength, but that's incorrect for partial-length reads, // where ContentLength refers to the size of the returned Body, not the entire // size of the blob. ContentRange has the full size. size := aws.Int64Value(resp.ContentLength) if cr := aws.StringValue(resp.ContentRange); cr != "" { // Sample: bytes 10-14/27 (where 27 is the full size). parts := strings.Split(cr, "/") if len(parts) == 2 { if i, err := strconv.ParseInt(parts[1], 10, 64); err == nil { size = i } } } return size } // NewTypedWriter implements driver.NewTypedWriter. func (b *bucket) NewTypedWriter(ctx context.Context, key string, contentType string, opts *driver.WriterOptions) (driver.Writer, error) { uploader := s3manager.NewUploader(b.sess, func(u *s3manager.Uploader) { if opts.BufferSize != 0 { u.PartSize = int64(opts.BufferSize) } }) var metadata map[string]*string if len(opts.Metadata) > 0 { metadata = make(map[string]*string, len(opts.Metadata)) for k, v := range opts.Metadata { metadata[k] = aws.String(v) } } req := &s3manager.UploadInput{ Bucket: aws.String(b.name), ContentType: aws.String(contentType), Key: aws.String(key), Metadata: metadata, } if len(opts.ContentMD5) > 0 { req.ContentMD5 = aws.String(base64.StdEncoding.EncodeToString(opts.ContentMD5)) } if opts.BeforeWrite != nil { asFunc := func(i interface{}) bool { p, ok := i.(**s3manager.UploadInput) if !ok { return false } *p = req return true } if err := opts.BeforeWrite(asFunc); err != nil { return nil, err } } return &writer{ ctx: ctx, uploader: uploader, req: req, donec: make(chan struct{}), }, nil } // Delete implements driver.Delete. func (b *bucket) Delete(ctx context.Context, key string) error { if _, err := b.Attributes(ctx, key); err != nil { return err } input := &s3.DeleteObjectInput{ Bucket: aws.String(b.name), Key: aws.String(key), } req, _ := b.client.DeleteObjectRequest(input) return req.Send() } func (b *bucket) SignedURL(ctx context.Context, key string, opts *driver.SignedURLOptions) (string, error) { in := &s3.GetObjectInput{ Bucket: aws.String(b.name), Key: aws.String(key), } req, _ := b.client.GetObjectRequest(in) return req.Presign(opts.Expiry) } type s3Error struct { bucket, key, msg string kind driver.ErrorKind } func (e s3Error) Kind() driver.ErrorKind { return e.kind } func (e s3Error) Error() string { return fmt.Sprintf("s3://%s/%s: %s", e.bucket, e.key, e.msg) } func isErrNotExist(err error) awserr.Error { if e, ok := err.(awserr.Error); ok && (e.Code() == "NoSuchKey" || e.Code() == "NotFound") { return e } return nil }
1
11,703
Where does the auth go?
google-go-cloud
go
@@ -119,7 +119,10 @@ func ClassifyActions(actions []SealedEnvelope) ([]*Transfer, []*Execution) { } func calculateIntrinsicGas(baseIntrinsicGas uint64, payloadGas uint64, payloadSize uint64) (uint64, error) { - if payloadGas == 0 || (math.MaxUint64-baseIntrinsicGas)/payloadGas < payloadSize { + if payloadGas == 0 { + return 0, ErrUnderpriced + } + if (math.MaxUint64-baseIntrinsicGas)/payloadGas < payloadSize { return 0, ErrInsufficientFunds } return payloadSize*payloadGas + baseIntrinsicGas, nil
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package action import ( "encoding/hex" "math" "math/big" "github.com/iotexproject/go-pkgs/crypto" "github.com/pkg/errors" "go.uber.org/zap" "github.com/iotexproject/iotex-core/pkg/log" ) // Action is the action can be Executed in protocols. The method is added to avoid mistakenly used empty interface as action. type Action interface { SetEnvelopeContext(SealedEnvelope) SanityCheck() error } type actionPayload interface { Cost() (*big.Int, error) IntrinsicGas() (uint64, error) SetEnvelopeContext(SealedEnvelope) SanityCheck() error } type hasDestination interface { Destination() string } // Sign signs the action using sender's private key func Sign(act Envelope, sk crypto.PrivateKey) (SealedEnvelope, error) { sealed := SealedEnvelope{ Envelope: act, srcPubkey: sk.PublicKey(), } h, err := sealed.envelopeHash() if err != nil { return sealed, errors.Wrap(err, "failed to generate envelope hash") } sig, err := sk.Sign(h[:]) if err != nil { return sealed, ErrInvalidSender } sealed.signature = sig act.Action().SetEnvelopeContext(sealed) return sealed, nil } // FakeSeal creates a SealedActionEnvelope without signature. // This method should be only used in tests. func FakeSeal(act Envelope, pubk crypto.PublicKey) SealedEnvelope { sealed := SealedEnvelope{ Envelope: act, srcPubkey: pubk, } act.Action().SetEnvelopeContext(sealed) return sealed } // AssembleSealedEnvelope assembles a SealedEnvelope use Envelope, Sender Address and Signature. // This method should be only used in tests. func AssembleSealedEnvelope(act Envelope, pk crypto.PublicKey, sig []byte) SealedEnvelope { sealed := SealedEnvelope{ Envelope: act, srcPubkey: pk, signature: sig, } act.Action().SetEnvelopeContext(sealed) return sealed } // Verify verifies the action using sender's public key func Verify(sealed SealedEnvelope) error { if sealed.SrcPubkey() == nil { return errors.New("empty public key") } // Reject action with insufficient gas limit intrinsicGas, err := sealed.IntrinsicGas() if intrinsicGas > sealed.GasLimit() || err != nil { return ErrIntrinsicGas } h, err := sealed.envelopeHash() if err != nil { return errors.Wrap(err, "failed to generate envelope hash") } if !sealed.SrcPubkey().Verify(h[:], sealed.Signature()) { log.L().Info("failed to verify action hash", zap.String("hash", hex.EncodeToString(h[:])), zap.String("signature", hex.EncodeToString(sealed.Signature()))) return ErrInvalidSender } return nil } // ClassifyActions classfies actions func ClassifyActions(actions []SealedEnvelope) ([]*Transfer, []*Execution) { tsfs := make([]*Transfer, 0) exes := make([]*Execution, 0) for _, elp := range actions { act := elp.Action() switch act := act.(type) { case *Transfer: tsfs = append(tsfs, act) case *Execution: exes = append(exes, act) } } return tsfs, exes } func calculateIntrinsicGas(baseIntrinsicGas uint64, payloadGas uint64, payloadSize uint64) (uint64, error) { if payloadGas == 0 || (math.MaxUint64-baseIntrinsicGas)/payloadGas < payloadSize { return 0, ErrInsufficientFunds } return payloadSize*payloadGas + baseIntrinsicGas, nil }
1
24,111
0 < minimum gas price, `ErrUnderpriced` is more proper
iotexproject-iotex-core
go
@@ -12,7 +12,7 @@ from sqlalchemy import func from . import app, db from .const import (VALID_EMAIL_RE, VALID_USERNAME_RE, blacklisted_name, ACTIVATE_SALT, PASSWORD_RESET_SALT, MAX_LINK_AGE, - CODE_EXP_MINUTES) + CODE_EXP_MINUTES, TOKEN_EXP_DEFAULT) from .mail import (send_activation_email, send_reset_email, send_new_user_email, send_welcome_email) from .models import ActivationToken, Code, PasswordResetToken, Token, User
1
import base64 from datetime import datetime, timedelta import json import uuid from flask import redirect, request import itsdangerous import jwt from passlib.context import CryptContext from sqlalchemy import func from . import app, db from .const import (VALID_EMAIL_RE, VALID_USERNAME_RE, blacklisted_name, ACTIVATE_SALT, PASSWORD_RESET_SALT, MAX_LINK_AGE, CODE_EXP_MINUTES) from .mail import (send_activation_email, send_reset_email, send_new_user_email, send_welcome_email) from .models import ActivationToken, Code, PasswordResetToken, Token, User CATALOG_URL = app.config['CATALOG_URL'] pwd_context = CryptContext( schemes=['pbkdf2_sha512', 'django_pbkdf2_sha256'], pbkdf2_sha512__default_rounds=500000 ) # Each round should take about half a second, # 500000 rounds experimentally determined class AuthException(Exception): """ Base class for Auth exceptions. """ def __init__(self, msg): super().__init__() self.message = msg class ValidationException(AuthException): """ Represents a failure to deserialize a signed link, a password that is too short, etc. """ pass class ConflictException(AuthException): """ Represents an exception involving an attempt to register a username that already exists, etc. """ pass class NotFoundException(AuthException): """ Represents an exception involving an attempted operation on an entity that could not be located. """ pass class CredentialException(AuthException): """ Represents an exception involving things like an incorrect token, an incorrect password, etc. """ pass def generate_uuid(): return str(uuid.uuid4()) def hash_password(password): return pwd_context.hash(password) def get_admins(): return [user.email for user in User.query.filter_by(is_admin=True).all()] def activate_response(link): payload = verify_activation_link(link) if payload: _activate_user(User.query.filter_by(id=payload['id']).with_for_update().one_or_none()) db.session.commit() return redirect("{CATALOG_URL}/signin".format(CATALOG_URL=CATALOG_URL), code=302) return redirect("{CATALOG_URL}/activation_error".format(CATALOG_URL=CATALOG_URL), code=302) def validate_password(password): if len(password) < 8: raise ValidationException("Password must be at least 8 characters long.") def reset_password_from_email(email): user = User.query.filter_by(email=email).with_for_update().one_or_none() if user: reset_password(user) def change_password(raw_password, link): validate_password(raw_password) payload = verify_reset_link(link) if not payload: raise CredentialException("Reset token invalid") user_id = payload['id'] user = User.query.filter_by(id=user_id).with_for_update().one_or_none() if not user: raise NotFoundException("User not found") user.password = hash_password(raw_password) db.session.add(user) def _create_user(username, password='', email=None, is_admin=False, requires_activation=True, requires_reset=False): def check_conflicts(username, email): if not VALID_USERNAME_RE.match(username): raise ValidationException("Unacceptable username.") if blacklisted_name(username): raise ValidationException("Unacceptable username.") if email is None: raise ValidationException("Must provide email.") if not VALID_EMAIL_RE.match(email): raise ValidationException("Unacceptable email.") if User.query.filter_by(name=username).one_or_none(): raise ConflictException("Username already taken.") if User.query.filter_by(email=email).one_or_none(): raise ConflictException("Email already taken.") check_conflicts(username, email) validate_password(password) new_password = "" if requires_reset else hash_password(password) if requires_activation: is_active = False else: is_active = True user = User( id=generate_uuid(), name=username, password=new_password, email=email, is_active=is_active, is_admin=is_admin ) db.session.add(user) if requires_activation: db.session.flush() # necessary due to link token foreign key relationship with User send_activation_email(user, generate_activation_link(user.id)) if requires_reset: db.session.flush() # necessary due to link token foreign key relationship with User send_welcome_email(user, user.email, generate_reset_link(user.id)) def _update_user(username, password=None, email=None, is_admin=None, is_active=None): existing_user = User.query.filter_by(name=username).with_for_update().one_or_none() if not existing_user: raise NotFoundException("User to update not found") if password is not None: new_password = hash_password(password) existing_user.password = new_password if email is not None: existing_user.email = email if is_admin is not None: existing_user.is_admin = is_admin if is_active is not None: existing_user.is_active = is_active db.session.add(existing_user) def _activate_user(user): if user is None: raise NotFoundException("User not found") user.is_active = True db.session.add(user) admins = get_admins() if admins: send_new_user_email(user.name, user.email, admins) def update_last_login(user): user.last_login = func.now() db.session.add(user) def _delete_user(user): if user: revoke_user_code_tokens(user) db.session.delete(user) else: raise NotFoundException("User to delete not found") return user def _enable_user(user): if user: user.is_active = True db.session.add(user) else: raise NotFoundException("User to enable not found") def _disable_user(user): if user: revoke_user_code_tokens(user) user.is_active = False db.session.add(user) else: raise NotFoundException("User to disable not found") def issue_code(user): user_id = user.id expires = datetime.utcnow() + timedelta(minutes=CODE_EXP_MINUTES) code = Code(user_id=user_id, code=generate_uuid(), expires=expires) db.session.add(code) return encode_code({'id': user_id, 'code': code.code}) def encode_code(code_dict): return base64.b64encode(bytes(json.dumps(code_dict), 'utf-8')).decode('utf8') def decode_code(code_str): try: return json.loads(base64.b64decode(code_str).decode('utf8')) except Exception: raise ValidationException("Decoding code failed") def decode_token(token_str): try: return jwt.decode(token_str, app.secret_key, algorithm='HS256') except jwt.exceptions.InvalidTokenError: raise ValidationException("Token could not be deserialized") def check_token(user_id, token): return Token.query.filter_by(user_id=user_id, token=token).one_or_none() is not None def _verify(payload): user_id = payload['id'] uuid = payload['uuid'] user = User.query.filter_by(id=user_id).one_or_none() if user is None: raise CredentialException('User ID invalid') if not check_token(user_id, uuid): raise CredentialException('Token invalid') return user def verify_token_string(token_string): token = decode_token(token_string) user = _verify(token) return user def exp_from_token(token): token = decode_token(token) return token['exp'] def revoke_token_string(token_str): token = decode_token(token_str) user_id = token['id'] uuid = token['uuid'] return revoke_token(user_id, uuid) def revoke_token(user_id, token): found = Token.query.filter_by(user_id=user_id, token=token).with_for_update().one_or_none() if found is None: return False db.session.delete(found) return True def revoke_tokens(user): tokens = Token.query.filter_by(user_id=user.id).with_for_update().all() for token in tokens: db.session.delete(token) def revoke_user_code_tokens(user): codes = Code.query.filter_by(user_id=user.id).with_for_update().all() for code in codes: db.session.delete(code) revoke_tokens(user) def get_exp(mins=30): return datetime.utcnow() + timedelta(minutes=mins) def issue_token(user, exp=None): uuid = generate_uuid() token = Token(user_id=user.id, token=uuid) db.session.add(token) exp = exp or get_exp() payload = {'id': user.id, 'uuid': uuid, 'exp': exp} token = jwt.encode(payload, app.secret_key, algorithm='HS256') return token.decode('utf-8') def consume_code_string(code_str): code = decode_code(code_str) return consume_code(code['id'], code['code']) def consume_code(user_id, code): found = Code.query.filter_by(user_id=user_id, code=code).with_for_update().one_or_none() if found is None: raise ValidationException("Code not found") if found.expires.timetuple() < datetime.utcnow().timetuple(): db.session.delete(found) raise CredentialException("Code expired") db.session.delete(found) return User.query.filter_by(id=user_id).one_or_none() def verify_hash(password, pw_hash): try: if not pwd_context.verify(password, pw_hash): raise CredentialException('Password verification failed') except ValueError: raise CredentialException('Password verification failed') def try_login(user, password): if not user.is_active: return False try: verify_hash(password, user.password) except CredentialException: return False update_last_login(user) return True linkgenerator = itsdangerous.URLSafeTimedSerializer( app.secret_key, salt='quilt' ) def dump_link(payload, salt=None): link = linkgenerator.dumps(payload, salt=salt) return link.replace('.', '~') def load_link(link, max_age, salt=None): payload = link.replace('~', '.') return linkgenerator.loads(payload, max_age=max_age, salt=salt) def generate_activation_token(user_id): new_token = ActivationToken(user_id=user_id, token=generate_uuid()) db.session.add(new_token) return new_token.token def consume_activation_token(user_id, token): found = ( ActivationToken.query .filter_by(user_id=user_id, token=token) .with_for_update() .one_or_none() ) if not found: return False db.session.delete(found) return True def generate_reset_token(user_id): reset_token = generate_uuid() PasswordResetToken.upsert(user_id, reset_token) return reset_token def consume_reset_token(user_id, token): found = ( PasswordResetToken .query .filter_by(user_id=user_id, token=token) .with_for_update() .one_or_none() ) if not found: return False db.session.delete(found) return True def generate_activation_link(user_id): token = generate_activation_token(user_id) payload = {'id': user_id, 'token': token} return dump_link(payload, ACTIVATE_SALT) def generate_reset_link(user_id): token = generate_reset_token(user_id) payload = {'id': user_id, 'token': token} return dump_link(payload, PASSWORD_RESET_SALT) def verify_activation_link(link, max_age=None): max_age = max_age if max_age is not None else MAX_LINK_AGE try: payload = load_link(link, max_age=max_age, salt=ACTIVATE_SALT) if not consume_activation_token(payload['id'], payload['token']): return None return payload except (TypeError, KeyError, ValueError, itsdangerous.BadData): return None def verify_reset_link(link, max_age=None): max_age = max_age if max_age is not None else MAX_LINK_AGE try: payload = load_link(link, max_age=max_age, salt=PASSWORD_RESET_SALT) if not consume_reset_token(payload['id'], payload['token']): return None return payload except (TypeError, KeyError, ValueError, itsdangerous.BadData): return None def reset_password(user, set_unusable=False): if set_unusable: user.password = '' db.session.add(user) link = generate_reset_link(user.id) send_reset_email(user, link)
1
16,860
these should be alphabetized
quiltdata-quilt
py
@@ -72,7 +72,7 @@ public class VectorizedDictionaryEncodedParquetValuesReader extends BaseVectoriz } void readBatchOfDictionaryEncodedLongs(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, - NullabilityHolder nullabilityHolder) { + NullabilityHolder nullabilityHolder, int typeWidth) { int left = numValuesToRead; int idx = startOffset; while (left > 0) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.arrow.vectorized.parquet; import io.netty.buffer.ArrowBuf; import java.nio.ByteBuffer; import org.apache.arrow.vector.BaseVariableWidthVector; import org.apache.arrow.vector.BitVectorHelper; import org.apache.arrow.vector.DecimalVector; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.IntVector; import org.apache.iceberg.arrow.vectorized.NullabilityHolder; import org.apache.parquet.column.Dictionary; /** * This decoder reads Parquet dictionary encoded data in a vectorized fashion. Unlike other * vectorized readers, methods in this decoder don't need to read definition levels. In other * words, these methods are called when there are non-null values to be read. */ public class VectorizedDictionaryEncodedParquetValuesReader extends BaseVectorizedParquetValuesReader { public VectorizedDictionaryEncodedParquetValuesReader(int maxDefLevel, boolean setValidityVector) { super(maxDefLevel, setValidityVector); } void readBatchOfDictionaryIds(IntVector intVector, int startOffset, int numValuesToRead, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < numValues; i++) { intVector.set(idx, currentValue); nullabilityHolder.setNotNull(idx); idx++; } break; case PACKED: for (int i = 0; i < numValues; i++) { intVector.set(idx, packedValuesBuffer[packedValuesBufferIdx]); nullabilityHolder.setNotNull(idx); packedValuesBufferIdx++; idx++; } break; } left -= numValues; currentCount -= numValues; } } void readBatchOfDictionaryEncodedLongs(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < numValues; i++) { vector.getDataBuffer().setLong(idx, dict.decodeToLong(currentValue)); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; case PACKED: for (int i = 0; i < numValues; i++) { vector.getDataBuffer() .setLong(idx, dict.decodeToLong(packedValuesBuffer[packedValuesBufferIdx++])); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; } left -= numValues; currentCount -= numValues; } } void readBatchOfDictionaryEncodedTimestampMillis(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < numValues; i++) { vector.getDataBuffer().setLong(idx, dict.decodeToLong(currentValue) * 1000); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; case PACKED: for (int i = 0; i < numValues; i++) { vector.getDataBuffer() .setLong(idx, dict.decodeToLong(packedValuesBuffer[packedValuesBufferIdx++]) * 1000); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; } left -= numValues; currentCount -= numValues; } } void readBatchOfDictionaryEncodedIntegers(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); ArrowBuf dataBuffer = vector.getDataBuffer(); switch (mode) { case RLE: for (int i = 0; i < num; i++) { dataBuffer.setInt(idx, dict.decodeToInt(currentValue)); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; case PACKED: for (int i = 0; i < num; i++) { dataBuffer.setInt(idx, dict.decodeToInt(packedValuesBuffer[packedValuesBufferIdx++])); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; } left -= num; currentCount -= num; } } void readBatchOfDictionaryEncodedFloats(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < num; i++) { vector.getDataBuffer().setFloat(idx, dict.decodeToFloat(currentValue)); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; case PACKED: for (int i = 0; i < num; i++) { vector.getDataBuffer().setFloat(idx, dict.decodeToFloat(packedValuesBuffer[packedValuesBufferIdx++])); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; } left -= num; currentCount -= num; } } void readBatchOfDictionaryEncodedDoubles(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < num; i++) { vector.getDataBuffer().setDouble(idx, dict.decodeToDouble(currentValue)); nullabilityHolder.setNotNull(idx); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; case PACKED: for (int i = 0; i < num; i++) { vector.getDataBuffer().setDouble(idx, dict.decodeToDouble(packedValuesBuffer[packedValuesBufferIdx++])); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; } left -= num; currentCount -= num; } } void readBatchOfDictionaryEncodedFixedWidthBinary(FieldVector vector, int typeWidth, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < num; i++) { ByteBuffer buffer = dict.decodeToBinary(currentValue).toByteBuffer(); vector.getDataBuffer().setBytes(idx * typeWidth, buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; case PACKED: for (int i = 0; i < num; i++) { ByteBuffer buffer = dict.decodeToBinary(packedValuesBuffer[packedValuesBufferIdx++]).toByteBuffer(); vector.getDataBuffer() .setBytes(idx * typeWidth, buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } idx++; } break; } left -= num; currentCount -= num; } } void readBatchOfDictionaryEncodedFixedLengthDecimals(FieldVector vector, int typeWidth, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < num; i++) { byte[] decimalBytes = dict.decodeToBinary(currentValue).getBytesUnsafe(); byte[] vectorBytes = new byte[DecimalVector.TYPE_WIDTH]; System.arraycopy(decimalBytes, 0, vectorBytes, DecimalVector.TYPE_WIDTH - typeWidth, typeWidth); ((DecimalVector) vector).setBigEndian(idx, vectorBytes); nullabilityHolder.setNotNull(idx); idx++; } break; case PACKED: for (int i = 0; i < num; i++) { byte[] decimalBytes = dict.decodeToBinary(packedValuesBuffer[packedValuesBufferIdx++]).getBytesUnsafe(); byte[] vectorBytes = new byte[DecimalVector.TYPE_WIDTH]; System.arraycopy(decimalBytes, 0, vectorBytes, DecimalVector.TYPE_WIDTH - typeWidth, typeWidth); ((DecimalVector) vector).setBigEndian(idx, vectorBytes); nullabilityHolder.setNotNull(idx); idx++; } break; } left -= num; currentCount -= num; } } void readBatchOfDictionaryEncodedVarWidthBinary(FieldVector vector, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < num; i++) { ByteBuffer buffer = dict.decodeToBinary(currentValue).toByteBuffer(); ((BaseVariableWidthVector) vector).setSafe(idx, buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); nullabilityHolder.setNotNull(idx); idx++; } break; case PACKED: for (int i = 0; i < num; i++) { ByteBuffer buffer = dict.decodeToBinary(packedValuesBuffer[packedValuesBufferIdx++]).toByteBuffer(); ((BaseVariableWidthVector) vector).setSafe(idx, buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); nullabilityHolder.setNotNull(idx); idx++; } break; } left -= num; currentCount -= num; } } void readBatchOfDictionaryEncodedIntLongBackedDecimals(FieldVector vector, int typeWidth, int startOffset, int numValuesToRead, Dictionary dict, NullabilityHolder nullabilityHolder) { int left = numValuesToRead; int idx = startOffset; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: for (int i = 0; i < num; i++) { ((DecimalVector) vector).set( idx, typeWidth == Integer.BYTES ? dict.decodeToInt(currentValue) : dict.decodeToLong(currentValue)); nullabilityHolder.setNotNull(idx); idx++; } break; case PACKED: for (int i = 0; i < num; i++) { ((DecimalVector) vector).set( idx, typeWidth == Integer.BYTES ? dict.decodeToInt(currentValue) : dict.decodeToLong(packedValuesBuffer[packedValuesBufferIdx++])); nullabilityHolder.setNotNull(idx); idx++; } break; } left -= num; currentCount -= num; } } }
1
18,276
These changes look concerning. It looks like the old offset (only index) must not have been correct. If so, there are places where `getDataBuffer().setLong(...)` and similar methods are called but aren't updated like these. Are those cases bugs as well?
apache-iceberg
java
@@ -164,7 +164,7 @@ type Counters struct { // flush writes the current state of in memory counters into the given db. func (cs *Counters) flush(db *shed.DB, batch *leveldb.Batch) error { - if cs.dirty.Load() > 1 { + if cs.dirty.Load() < 3 { return nil } cs.dirty.CAS(3, 2)
1
// Copyright 2021 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package metrics provides service for collecting various metrics about peers. // It is intended to be used with the kademlia where the metrics are collected. package metrics import ( "fmt" "sync" "time" "github.com/ethersphere/bee/pkg/shed" "github.com/ethersphere/bee/pkg/swarm" "github.com/hashicorp/go-multierror" "github.com/syndtr/goleveldb/leveldb" "go.uber.org/atomic" ) const ( peerLastSeenTimestamp string = "peer-last-seen-timestamp" peerTotalConnectionDuration string = "peer-total-connection-duration" ) // PeerConnectionDirection represents peer connection direction. type PeerConnectionDirection string const ( PeerConnectionDirectionInbound PeerConnectionDirection = "inbound" PeerConnectionDirectionOutbound PeerConnectionDirection = "outbound" ) // peerKey is used to store peers' persistent metrics counters. type peerKey struct { prefix string address string } // String implements Stringer.String method. func (pk peerKey) String() string { return fmt.Sprintf("%s-%s", pk.prefix, pk.address) } // newPeerKey is a convenient constructor for creating new peerKey. func newPeerKey(p, a string) *peerKey { return &peerKey{ prefix: p, address: a, } } // RecordOp is a definition of a peer metrics Record // operation whose execution modifies a specific metrics. type RecordOp func(*Counters) // PeerLogIn will first update the current last seen to the give time t and as // the second it'll set the direction of the session connection to the given // value. The force flag will force the peer re-login if he's already logged in. // The time is set as Unix timestamp ignoring the timezone. The operation will // panics if the given time is before the Unix epoch. func PeerLogIn(t time.Time, dir PeerConnectionDirection) RecordOp { return func(cs *Counters) { cs.Lock() defer cs.Unlock() if cs.loggedIn { return // Ignore when the peer is already logged in. } cs.loggedIn = true ls := t.UnixNano() if ls < 0 { panic(fmt.Errorf("time before unix epoch: %s", t)) } cs.sessionConnDirection = dir cs.lastSeenTimestamp = ls cs.dirty.Store(3) } } // PeerLogOut will first update the connection session and total duration with // the difference of the given time t and the current last seen value. As the // second it'll also update the last seen peer metrics to the given time t. // The time is set as Unix timestamp ignoring the timezone. The operation will // panics if the given time is before the Unix epoch. func PeerLogOut(t time.Time) RecordOp { return func(cs *Counters) { cs.Lock() defer cs.Unlock() if !cs.loggedIn { return // Ignore when the peer is not logged in. } cs.loggedIn = false curLs := cs.lastSeenTimestamp newLs := t.UnixNano() if newLs < 0 { panic(fmt.Errorf("time before unix epoch: %s", t)) } cs.sessionConnDuration = time.Duration(newLs - curLs) cs.connTotalDuration += cs.sessionConnDuration cs.lastSeenTimestamp = newLs cs.dirty.Store(3) } } // IncSessionConnectionRetry increments the session connection retry // counter by 1. func IncSessionConnectionRetry() RecordOp { return func(cs *Counters) { cs.Lock() defer cs.Unlock() cs.sessionConnRetry++ } } // Snapshot represents a snapshot of peers' metrics counters. type Snapshot struct { LastSeenTimestamp int64 SessionConnectionRetry uint64 ConnectionTotalDuration time.Duration SessionConnectionDuration time.Duration SessionConnectionDirection PeerConnectionDirection } // HasAtMaxOneConnectionAttempt returns true if the snapshot represents a new // peer which has at maximum one session connection attempt but it still isn't // logged in. func (ss *Snapshot) HasAtMaxOneConnectionAttempt() bool { return ss.LastSeenTimestamp == 0 && ss.SessionConnectionRetry <= 1 } // Counters represents a collection of peer metrics // mainly collected for statistics and debugging. type Counters struct { sync.Mutex // Bookkeeping. peer *swarm.Address loggedIn bool // Watches in-memory counters which has to be persisted. // 3 - dirty, need to be persisted // 2 - snapshot of counters in progress // 1 - batched for persistent write // 0 - persisted dirty atomic.Int32 // In-memory counters. lastSeenTimestamp int64 connTotalDuration time.Duration sessionConnRetry uint64 sessionConnDuration time.Duration sessionConnDirection PeerConnectionDirection // Persistent counters. persistentLastSeenTimestamp atomic.Value persistentConnTotalDuration atomic.Value } // flush writes the current state of in memory counters into the given db. func (cs *Counters) flush(db *shed.DB, batch *leveldb.Batch) error { if cs.dirty.Load() > 1 { return nil } cs.dirty.CAS(3, 2) cs.Lock() var ( key = cs.peer.String() lastSeenTimestampSnapshot = cs.lastSeenTimestamp connectionTotalDurationSnapshot = cs.connTotalDuration ) cs.Unlock() ls, ok := cs.persistentLastSeenTimestamp.Load().(*shed.Uint64Field) if !ok { mk := newPeerKey(peerLastSeenTimestamp, key) field, err := db.NewUint64Field(mk.String()) if err != nil { return fmt.Errorf("field initialization for %q failed: %w", mk, err) } ls = &field cs.persistentLastSeenTimestamp.Store(ls) } cd, ok := cs.persistentConnTotalDuration.Load().(*shed.Uint64Field) if !ok { mk := newPeerKey(peerTotalConnectionDuration, key) field, err := db.NewUint64Field(mk.String()) if err != nil { return fmt.Errorf("field initialization for %q failed: %w", mk, err) } cd = &field cs.persistentConnTotalDuration.Store(cd) } ls.PutInBatch(batch, uint64(lastSeenTimestampSnapshot)) cd.PutInBatch(batch, uint64(connectionTotalDurationSnapshot)) cs.dirty.CAS(2, 1) return nil } // snapshot returns current snapshot of counters referenced to the given t. func (cs *Counters) snapshot(t time.Time) *Snapshot { cs.Lock() defer cs.Unlock() connTotalDuration := cs.connTotalDuration sessionConnDuration := cs.sessionConnDuration if cs.loggedIn { sessionConnDuration = t.Sub(time.Unix(0, cs.lastSeenTimestamp)) connTotalDuration += sessionConnDuration } return &Snapshot{ LastSeenTimestamp: cs.lastSeenTimestamp, SessionConnectionRetry: cs.sessionConnRetry, ConnectionTotalDuration: connTotalDuration, SessionConnectionDuration: sessionConnDuration, SessionConnectionDirection: cs.sessionConnDirection, } } // NewCollector is a convenient constructor for creating new Collector. func NewCollector(db *shed.DB) *Collector { return &Collector{db: db} } // Collector collects various metrics about // peers specified be the swarm.Address. type Collector struct { db *shed.DB counters sync.Map } // Record records a set of metrics for peer specified by the given address. func (c *Collector) Record(addr swarm.Address, rop ...RecordOp) { val, _ := c.counters.LoadOrStore(addr.ByteString(), &Counters{peer: &addr}) for _, op := range rop { op(val.(*Counters)) } } // Snapshot returns the current state of the metrics collector for peer(s). // The given time t is used to calculate the duration of the current session, // if any. If an address or a set of addresses is specified then only metrics // related to them will be returned, otherwise metrics for all peers will be // returned. If the peer is still logged in, the session-related counters will // be evaluated against the last seen time, which equals to the login time. If // the peer is logged out, then the session counters will reflect its last // session. func (c *Collector) Snapshot(t time.Time, addresses ...swarm.Address) map[string]*Snapshot { snapshot := make(map[string]*Snapshot) for _, addr := range addresses { val, ok := c.counters.Load(addr.ByteString()) if !ok { continue } cs := val.(*Counters) snapshot[addr.ByteString()] = cs.snapshot(t) } if len(addresses) == 0 { c.counters.Range(func(key, val interface{}) bool { cs := val.(*Counters) snapshot[cs.peer.ByteString()] = cs.snapshot(t) return true }) } return snapshot } // Inspect allows to inspect current snapshot for the given // peer address by executing the inspection function. func (c *Collector) Inspect(addr swarm.Address, fn func(ss *Snapshot)) { snapshots := c.Snapshot(time.Now(), addr) fn(snapshots[addr.ByteString()]) } // Flush sync the dirty in memory counters by flushing their values to the // underlying storage. If an address or a set of addresses is specified then // only counters related to them will be flushed, otherwise counters for all // peers will be flushed. func (c *Collector) Flush(addresses ...swarm.Address) error { var ( mErr error dirty []string batch = new(leveldb.Batch) ) for _, addr := range addresses { val, ok := c.counters.Load(addr.ByteString()) if !ok { continue } cs := val.(*Counters) if err := cs.flush(c.db, batch); err != nil { mErr = multierror.Append(mErr, fmt.Errorf("unable to batch the counters of peer %q for flash: %w", addr, err)) continue } dirty = append(dirty, addr.ByteString()) } if len(addresses) == 0 { c.counters.Range(func(_, val interface{}) bool { cs := val.(*Counters) if err := cs.flush(c.db, batch); err != nil { mErr = multierror.Append(mErr, fmt.Errorf("unable to batch the counters of peer %q for flash: %w", cs.peer, err)) return true } dirty = append(dirty, cs.peer.ByteString()) return true }) } if batch.Len() == 0 { return mErr } if err := c.db.WriteBatch(batch); err != nil { mErr = multierror.Append(mErr, fmt.Errorf("unable to persist counters in batch: %w", err)) } for _, addr := range dirty { val, ok := c.counters.Load(addr) if !ok { continue } cs := val.(*Counters) cs.dirty.CAS(1, 0) } return mErr } // Finalize logs out all ongoing peer sessions // and flushes all in-memory metrics counters. func (c *Collector) Finalize(t time.Time) error { var ( mErr error batch = new(leveldb.Batch) ) c.counters.Range(func(_, val interface{}) bool { cs := val.(*Counters) PeerLogOut(t)(cs) if err := cs.flush(c.db, batch); err != nil { mErr = multierror.Append(mErr, fmt.Errorf("unable to flush counters for peer %q: %w", cs.peer, err)) } return true }) if batch.Len() > 0 { if err := c.db.WriteBatch(batch); err != nil { mErr = multierror.Append(mErr, fmt.Errorf("unable to persist counters in batch: %w", err)) } } c.counters.Range(func(_, val interface{}) bool { cs := val.(*Counters) c.counters.Delete(cs.peer.ByteString()) return true }) return mErr }
1
15,321
the CAS call here is now wrong since dirty will never be 3 anymore. it might be useful to sweep through the entire usage of this field to see that everything is correct
ethersphere-bee
go
@@ -18,7 +18,11 @@ use Symfony\Component\Validator\Constraints\NotBlank; use Symfony\Component\Validator\Constraints\NotEqualTo; use Symfony\Component\Validator\ExecutionContextInterface; use Thelia\Core\Translation\Translator; +use Thelia\Model\Base\CountryQuery; use Thelia\Model\Base\LangQuery; +use Thelia\Model\Base\ModuleQuery; +use Thelia\Model\Module; +use Thelia\Module\BaseModule; /** * Allow to build a form Coupon
1
<?php /*************************************************************************************/ /* This file is part of the Thelia package. */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* For the full copyright and license information, please view the LICENSE.txt */ /* file that was distributed with this source code. */ /*************************************************************************************/ namespace Thelia\Form; use Symfony\Component\Validator\Constraints\Callback; use Symfony\Component\Validator\Constraints\GreaterThanOrEqual; use Symfony\Component\Validator\Constraints\NotBlank; use Symfony\Component\Validator\Constraints\NotEqualTo; use Symfony\Component\Validator\ExecutionContextInterface; use Thelia\Core\Translation\Translator; use Thelia\Model\Base\LangQuery; /** * Allow to build a form Coupon * * @package Coupon * @author Guillaume MOREL <[email protected]> * */ class CouponCreationForm extends BaseForm { /** * Build Coupon form * * @return void */ protected function buildForm() { $this->formBuilder ->add( 'code', 'text', array( 'constraints' => array( new NotBlank() ) ) ) ->add( 'title', 'text', array( 'constraints' => array( new NotBlank() ) ) ) ->add( 'shortDescription', 'text' ) ->add( 'description', 'textarea' ) ->add( 'type', 'text', array( 'constraints' => array( new NotBlank(), new NotEqualTo( array( 'value' => -1 ) ) ) ) ) ->add( 'amount', 'money', array( 'constraints' => array( new NotBlank() )) ) ->add( 'isEnabled', 'text', array() ) ->add( 'expirationDate', 'text', array( 'constraints' => array( new NotBlank(), new Callback(array( "methods" => array( array($this, "checkLocalizedDate"), ), )) ) ) ) ->add( 'isCumulative', 'text', array() ) ->add( 'isRemovingPostage', 'text', array() ) ->add( 'isAvailableOnSpecialOffers', 'text', array() ) ->add( 'maxUsage', 'text', array( 'constraints' => array( new NotBlank(), new GreaterThanOrEqual(['value' => -1]) ) ) ) ->add( 'locale', 'hidden', array( 'constraints' => array( new NotBlank() ) ) ); } /** * Validate a date entered with the default Language date format. * * @param string $value * @param ExecutionContextInterface $context */ public function checkLocalizedDate($value, ExecutionContextInterface $context) { $format = LangQuery::create()->findOneByByDefault(true)->getDateFormat(); if (false === \DateTime::createFromFormat($format, $value)) { $context->addViolation(Translator::getInstance()->trans("Date '%date' is invalid, please enter a valid date using %fmt format", [ '%fmt' => $format, '%date' => $value ])); } } /** * Get form name * * @return string */ public function getName() { return 'thelia_coupon_creation'; } }
1
10,094
Base model is imported here
thelia-thelia
php
@@ -16,6 +16,7 @@ */ package org.apache.servicecomb.serviceregistry.task; +import org.apache.servicecomb.foundation.common.event.EventManager; import org.apache.servicecomb.serviceregistry.RegistryUtils; import org.apache.servicecomb.serviceregistry.api.registry.Microservice; import org.apache.servicecomb.serviceregistry.api.registry.MicroserviceInstance;
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.serviceregistry.task; import org.apache.servicecomb.serviceregistry.RegistryUtils; import org.apache.servicecomb.serviceregistry.api.registry.Microservice; import org.apache.servicecomb.serviceregistry.api.registry.MicroserviceInstance; import org.apache.servicecomb.serviceregistry.client.ServiceRegistryClient; import org.apache.servicecomb.serviceregistry.config.ServiceRegistryConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.util.StringUtils; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; public class MicroserviceInstanceRegisterTask extends AbstractRegisterTask { private static final Logger LOGGER = LoggerFactory.getLogger(MicroserviceInstanceRegisterTask.class); private ServiceRegistryConfig serviceRegistryConfig; private MicroserviceInstance microserviceInstance; public MicroserviceInstanceRegisterTask(EventBus eventBus, ServiceRegistryConfig serviceRegistryConfig, ServiceRegistryClient srClient, Microservice microservice) { super(eventBus, srClient, microservice); this.serviceRegistryConfig = serviceRegistryConfig; this.microserviceInstance = microservice.getInstance(); } @Subscribe public void onMicroserviceRegisterTask(MicroserviceRegisterTask task) { if (task.taskStatus == TaskStatus.FINISHED && isSameMicroservice(task.getMicroservice())) { this.taskStatus = TaskStatus.READY; this.registered = false; } else { this.taskStatus = TaskStatus.INIT; } } @Override protected boolean doRegister() { LOGGER.info("running microservice instance register task."); String hostName = ""; if (serviceRegistryConfig.isPreferIpAddress()) { hostName = RegistryUtils.getPublishAddress(); } else { hostName = RegistryUtils.getPublishHostName(); } microserviceInstance.setHostName(hostName); microserviceInstance.getHealthCheck().setInterval(serviceRegistryConfig.getHeartbeatInterval()); microserviceInstance.getHealthCheck().setTimes(serviceRegistryConfig.getResendHeartBeatTimes()); String instanceId = srClient.registerMicroserviceInstance(microserviceInstance); if (StringUtils.isEmpty(instanceId)) { LOGGER.error("Register microservice instance failed. microserviceId={}", microserviceInstance.getServiceId()); return false; } microserviceInstance.setInstanceId(instanceId); LOGGER.info( "Register microservice instance success. microserviceId={} instanceId={} endpoints={} lease {}s", microserviceInstance.getServiceId(), instanceId, microserviceInstance.getEndpoints(), microserviceInstance.getHealthCheck().getTTL()); return true; } }
1
9,508
import but not used
apache-servicecomb-java-chassis
java
@@ -193,7 +193,7 @@ public final class VectorizedParquetDefinitionLevelReader extends BaseVectorized case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedLongs(vector, - idx, numValues, dict, nullabilityHolder); + idx, numValues, dict, nullabilityHolder, typeWidth); } else { setNulls(nullabilityHolder, idx, numValues, validityBuffer); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.arrow.vectorized.parquet; import io.netty.buffer.ArrowBuf; import java.nio.ByteBuffer; import org.apache.arrow.vector.BaseVariableWidthVector; import org.apache.arrow.vector.BitVector; import org.apache.arrow.vector.BitVectorHelper; import org.apache.arrow.vector.DecimalVector; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.IntVector; import org.apache.arrow.vector.VarBinaryVector; import org.apache.iceberg.arrow.vectorized.NullabilityHolder; import org.apache.iceberg.parquet.ValuesAsBytesReader; import org.apache.parquet.column.Dictionary; public final class VectorizedParquetDefinitionLevelReader extends BaseVectorizedParquetValuesReader { public VectorizedParquetDefinitionLevelReader(int bitWidth, int maxDefLevel, boolean setArrowValidityVector) { super(bitWidth, maxDefLevel, setArrowValidityVector); } public void readBatchOfDictionaryIds( final IntVector vector, final int startOffset, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryIds(vector, idx, numValues, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, numValues, vector.getValidityBuffer()); } idx += numValues; break; case PACKED: for (int i = 0; i < numValues; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.set(idx, dictionaryEncodedValuesReader.readInteger()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= numValues; currentCount -= numValues; } } public void readBatchOfLongs( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); switch (mode) { case RLE: setNextNValuesInVector( typeWidth, nullabilityHolder, valuesReader, bufferIdx, vector, numValues); bufferIdx += numValues; break; case PACKED: for (int i = 0; i < numValues; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setLong(bufferIdx * typeWidth, valuesReader.readLong()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= numValues; currentCount -= numValues; } } public void readBatchOfTimestampMillis(final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); switch (mode) { case RLE: ArrowBuf validityBuffer = vector.getValidityBuffer(); if (currentValue == maxDefLevel) { for (int i = 0; i < numValues; i++) { vector.getDataBuffer().setLong(bufferIdx * typeWidth, valuesReader.readLong() * 1000); } if (setArrowValidityVector) { for (int i = 0; i < numValues; i++) { BitVectorHelper.setValidityBitToOne(validityBuffer, bufferIdx + i); } } else { nullabilityHolder.setNotNulls(bufferIdx, numValues); } } else { setNulls(nullabilityHolder, bufferIdx, numValues, validityBuffer); } bufferIdx += numValues; break; case PACKED: for (int i = 0; i < numValues; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setLong(bufferIdx * typeWidth, valuesReader.readLong() * 1000); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= numValues; currentCount -= numValues; } } public void readBatchOfDictionaryEncodedLongs( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); ArrowBuf validityBuffer = vector.getValidityBuffer(); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedLongs(vector, idx, numValues, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, numValues, validityBuffer); } idx += numValues; break; case PACKED: for (int i = 0; i < numValues; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setLong(idx, dict.decodeToLong(dictionaryEncodedValuesReader.readInteger())); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, validityBuffer); } idx++; } break; } left -= numValues; currentCount -= numValues; } } public void readBatchOfDictionaryEncodedTimestampMillis( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int numValues = Math.min(left, this.currentCount); ArrowBuf validityBuffer = vector.getValidityBuffer(); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedTimestampMillis(vector, idx, numValues, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, numValues, validityBuffer); } idx += numValues; break; case PACKED: for (int i = 0; i < numValues; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setLong(idx, dict.decodeToLong(dictionaryEncodedValuesReader.readInteger()) * 1000); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, validityBuffer); } idx++; } break; } left -= numValues; currentCount -= numValues; } } public void readBatchOfIntegers(final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: setNextNValuesInVector( typeWidth, nullabilityHolder, valuesReader, bufferIdx, vector, num); bufferIdx += num; break; case PACKED: for (int i = 0; i < num; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setInt(bufferIdx * typeWidth, valuesReader.readInteger()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfDictionaryEncodedIntegers( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedIntegers(vector, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, vector.getValidityBuffer()); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setInt(idx, dict.decodeToInt(dictionaryEncodedValuesReader.readInteger())); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfFloats(final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: setNextNValuesInVector( typeWidth, nullabilityHolder, valuesReader, bufferIdx, vector, num); bufferIdx += num; break; case PACKED: for (int i = 0; i < num; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setFloat(bufferIdx * typeWidth, valuesReader.readFloat()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfDictionaryEncodedFloats( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); ArrowBuf validityBuffer = vector.getValidityBuffer(); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedFloats(vector, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, validityBuffer); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setFloat(idx, dict.decodeToFloat(dictionaryEncodedValuesReader.readInteger())); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, validityBuffer); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfDoubles( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: setNextNValuesInVector( typeWidth, nullabilityHolder, valuesReader, bufferIdx, vector, num); bufferIdx += num; break; case PACKED: for (int i = 0; i < num; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setDouble(bufferIdx * typeWidth, valuesReader.readDouble()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfDictionaryEncodedDoubles( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedDoubles(vector, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, vector.getValidityBuffer()); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { vector.getDataBuffer().setDouble(idx, dict.decodeToDouble(dictionaryEncodedValuesReader.readInteger())); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfFixedWidthBinary( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { for (int i = 0; i < num; i++) { setBinaryInVector((VarBinaryVector) vector, typeWidth, valuesReader, bufferIdx, nullabilityHolder); bufferIdx++; } } else { setNulls(nullabilityHolder, bufferIdx, num, vector.getValidityBuffer()); bufferIdx += num; } break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { setBinaryInVector((VarBinaryVector) vector, typeWidth, valuesReader, bufferIdx, nullabilityHolder); } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfDictionaryEncodedFixedWidthBinary( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedFixedWidthBinary(vector, typeWidth, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, vector.getValidityBuffer()); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { ByteBuffer buffer = dict.decodeToBinary(dictionaryEncodedValuesReader.readInteger()).toByteBuffer(); vector.getDataBuffer().setBytes(idx * typeWidth, buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), idx); } else { nullabilityHolder.setNotNull(idx); } } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfFixedLengthDecimals( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); byte[] byteArray = new byte[DecimalVector.TYPE_WIDTH]; switch (mode) { case RLE: if (currentValue == maxDefLevel) { for (int i = 0; i < num; i++) { valuesReader.getBuffer(typeWidth).get(byteArray, DecimalVector.TYPE_WIDTH - typeWidth, typeWidth); ((DecimalVector) vector).setBigEndian(bufferIdx, byteArray); nullabilityHolder.setNotNull(bufferIdx); bufferIdx++; } } else { setNulls(nullabilityHolder, bufferIdx, num, vector.getValidityBuffer()); bufferIdx += num; } break; case PACKED: for (int i = 0; i < num; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { valuesReader.getBuffer(typeWidth).get(byteArray, DecimalVector.TYPE_WIDTH - typeWidth, typeWidth); ((DecimalVector) vector).setBigEndian(bufferIdx, byteArray); nullabilityHolder.setNotNull(bufferIdx); } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfDictionaryEncodedFixedLengthDecimals( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedFixedLengthDecimals(vector, typeWidth, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, vector.getValidityBuffer()); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { ByteBuffer decimalBytes = dict.decodeToBinary(dictionaryEncodedValuesReader.readInteger()).toByteBuffer(); byte[] vectorBytes = new byte[DecimalVector.TYPE_WIDTH]; System.arraycopy(decimalBytes, 0, vectorBytes, DecimalVector.TYPE_WIDTH - typeWidth, typeWidth); ((DecimalVector) vector).setBigEndian(idx, vectorBytes); nullabilityHolder.setNotNull(idx); } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchVarWidth( final FieldVector vector, final int startOffset, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { for (int i = 0; i < num; i++) { setVarWidthBinaryValue(vector, valuesReader, bufferIdx, nullabilityHolder); bufferIdx++; } } else { setNulls(nullabilityHolder, bufferIdx, num, vector.getValidityBuffer()); bufferIdx += num; } break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { setVarWidthBinaryValue(vector, valuesReader, bufferIdx, nullabilityHolder); } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } private void setVarWidthBinaryValue(FieldVector vector, ValuesAsBytesReader valuesReader, int bufferIdx, NullabilityHolder nullabilityHolder) { int len = valuesReader.readInteger(); ByteBuffer buffer = valuesReader.getBuffer(len); // Calling setValueLengthSafe takes care of allocating a larger buffer if // running out of space. ((BaseVariableWidthVector) vector).setValueLengthSafe(bufferIdx, len); // It is possible that the data buffer was reallocated. So it is important to // not cache the data buffer reference but instead use vector.getDataBuffer(). vector.getDataBuffer().writeBytes(buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); // Similarly, we need to get the latest reference to the validity buffer as well // since reallocation changes reference of the validity buffers as well. if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } public void readBatchOfDictionaryEncodedVarWidth( final FieldVector vector, final int startOffset, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedVarWidthBinary(vector, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, vector.getValidityBuffer()); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { ((BaseVariableWidthVector) vector).setSafe( idx, dict.decodeToBinary(dictionaryEncodedValuesReader.readInteger()).getBytesUnsafe()); nullabilityHolder.setNotNull(idx); } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfIntLongBackedDecimals( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); byte[] byteArray = new byte[DecimalVector.TYPE_WIDTH]; switch (mode) { case RLE: if (currentValue == maxDefLevel) { for (int i = 0; i < num; i++) { setIntLongBackedDecimal(vector, typeWidth, nullabilityHolder, valuesReader, bufferIdx, byteArray); bufferIdx++; } } else { setNulls(nullabilityHolder, bufferIdx, num, vector.getValidityBuffer()); bufferIdx += num; } break; case PACKED: for (int i = 0; i < num; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { setIntLongBackedDecimal(vector, typeWidth, nullabilityHolder, valuesReader, bufferIdx, byteArray); } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } private void setIntLongBackedDecimal(FieldVector vector, int typeWidth, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader, int bufferIdx, byte[] byteArray) { valuesReader.getBuffer(typeWidth).get(byteArray, 0, typeWidth); vector.getDataBuffer().setBytes(bufferIdx * DecimalVector.TYPE_WIDTH, byteArray); if (setArrowValidityVector) { BitVectorHelper.setValidityBitToOne(vector.getValidityBuffer(), bufferIdx); } else { nullabilityHolder.setNotNull(bufferIdx); } } public void readBatchOfDictionaryEncodedIntLongBackedDecimals( final FieldVector vector, final int startOffset, final int typeWidth, final int numValsToRead, NullabilityHolder nullabilityHolder, VectorizedDictionaryEncodedParquetValuesReader dictionaryEncodedValuesReader, Dictionary dict) { int idx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { dictionaryEncodedValuesReader.readBatchOfDictionaryEncodedIntLongBackedDecimals(vector, typeWidth, idx, num, dict, nullabilityHolder); } else { setNulls(nullabilityHolder, idx, num, vector.getValidityBuffer()); } idx += num; break; case PACKED: for (int i = 0; i < num; i++) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { ((DecimalVector) vector).set( idx, typeWidth == Integer.BYTES ? dict.decodeToInt(dictionaryEncodedValuesReader.readInteger()) : dict.decodeToLong(dictionaryEncodedValuesReader.readInteger())); nullabilityHolder.setNotNull(idx); } else { setNull(nullabilityHolder, idx, vector.getValidityBuffer()); } idx++; } break; } left -= num; currentCount -= num; } } public void readBatchOfBooleans( final FieldVector vector, final int startOffset, final int numValsToRead, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader) { int bufferIdx = startOffset; int left = numValsToRead; while (left > 0) { if (this.currentCount == 0) { this.readNextGroup(); } int num = Math.min(left, this.currentCount); switch (mode) { case RLE: if (currentValue == maxDefLevel) { for (int i = 0; i < num; i++) { ((BitVector) vector).setSafe(bufferIdx, valuesReader.readBooleanAsInt()); nullabilityHolder.setNotNull(bufferIdx); bufferIdx++; } } else { setNulls(nullabilityHolder, bufferIdx, num, vector.getValidityBuffer()); bufferIdx += num; } break; case PACKED: for (int i = 0; i < num; ++i) { if (packedValuesBuffer[packedValuesBufferIdx++] == maxDefLevel) { ((BitVector) vector).setSafe(bufferIdx, valuesReader.readBooleanAsInt()); nullabilityHolder.setNotNull(bufferIdx); } else { setNull(nullabilityHolder, bufferIdx, vector.getValidityBuffer()); } bufferIdx++; } break; } left -= num; currentCount -= num; } } private static void setBinaryInVector( VarBinaryVector vector, int typeWidth, ValuesAsBytesReader valuesReader, int bufferIdx, NullabilityHolder nullabilityHolder) { ByteBuffer buffer = valuesReader.getBuffer(typeWidth); vector.setSafe(bufferIdx, buffer.array(), buffer.position() + buffer.arrayOffset(), buffer.limit() - buffer.position()); nullabilityHolder.setNotNull(bufferIdx); } private void setNextNValuesInVector( int typeWidth, NullabilityHolder nullabilityHolder, ValuesAsBytesReader valuesReader, int bufferIdx, FieldVector vector, int numValues) { ArrowBuf validityBuffer = vector.getValidityBuffer(); if (currentValue == maxDefLevel) { ByteBuffer buffer = valuesReader.getBuffer(numValues * typeWidth); vector.getDataBuffer().setBytes(bufferIdx * typeWidth, buffer); if (setArrowValidityVector) { for (int i = 0; i < numValues; i++) { BitVectorHelper.setValidityBitToOne(validityBuffer, bufferIdx + i); } } else { nullabilityHolder.setNotNulls(bufferIdx, numValues); } } else { setNulls(nullabilityHolder, bufferIdx, numValues, validityBuffer); } } private void setNull(NullabilityHolder nullabilityHolder, int bufferIdx, ArrowBuf validityBuffer) { if (setArrowValidityVector) { BitVectorHelper.setValidityBit(validityBuffer, bufferIdx, 0); } else { nullabilityHolder.setNull(bufferIdx); } } private void setNulls(NullabilityHolder nullabilityHolder, int idx, int numValues, ArrowBuf validityBuffer) { if (setArrowValidityVector) { for (int i = 0; i < numValues; i++) { BitVectorHelper.setValidityBit(validityBuffer, idx + i, 0); } } else { nullabilityHolder.setNulls(idx, numValues); } } }
1
18,275
What about the call in `case PACKED` just below? Does that also need to use the `typeWidth`?
apache-iceberg
java
@@ -8,13 +8,14 @@ import ( "errors" "github.com/ethersphere/bee/pkg/collection" + "github.com/ethersphere/bee/pkg/encryption" "github.com/ethersphere/bee/pkg/swarm" ) var ( _ = collection.Entry(&Entry{}) serializedDataSize = swarm.SectionSize * 2 - encryptedSerializedDataSize = swarm.EncryptedReferenceSize * 2 + encryptedSerializedDataSize = encryption.ReferenceSize * 2 ) // Entry provides addition of metadata to a data reference.
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package entry import ( "errors" "github.com/ethersphere/bee/pkg/collection" "github.com/ethersphere/bee/pkg/swarm" ) var ( _ = collection.Entry(&Entry{}) serializedDataSize = swarm.SectionSize * 2 encryptedSerializedDataSize = swarm.EncryptedReferenceSize * 2 ) // Entry provides addition of metadata to a data reference. // Implements collection.Entry. type Entry struct { reference swarm.Address metadata swarm.Address } // New creates a new Entry. func New(reference, metadata swarm.Address) *Entry { return &Entry{ reference: reference, metadata: metadata, } } // Reference implements collection.Entry func (e *Entry) Reference() swarm.Address { return e.reference } // Metadata implements collection.Entry func (e *Entry) Metadata() swarm.Address { return e.metadata } // MarshalBinary implements encoding.BinaryMarshaler func (e *Entry) MarshalBinary() ([]byte, error) { br := e.reference.Bytes() bm := e.metadata.Bytes() b := append(br, bm...) return b, nil } // UnmarshalBinary implements encoding.BinaryUnmarshaler func (e *Entry) UnmarshalBinary(b []byte) error { var size int if len(b) == serializedDataSize { size = serializedDataSize } else if len(b) == encryptedSerializedDataSize { size = encryptedSerializedDataSize } else { return errors.New("invalid data length") } e.reference = swarm.NewAddress(b[:size/2]) e.metadata = swarm.NewAddress(b[size/2:]) return nil }
1
11,952
not sure i like this change. The encryption package does not need to know about references
ethersphere-bee
go
@@ -280,6 +280,7 @@ ifneq ($(MAKECMDGOALS),clean) ifeq ($(llvm_version),3.9.1) else ifeq ($(llvm_version),5.0.2) else ifeq ($(llvm_version),6.0.1) + else ifeq ($(llvm_version),7.0.0) # this is what is shipped with ubuntu bionic llvm-7 package right now else ifeq ($(llvm_version),7.0.1) else $(warning WARNING: Unsupported LLVM version: $(llvm_version))
1
# Determine the operating system OSTYPE ?= ifeq ($(OS),Windows_NT) OSTYPE = windows else UNAME_S := $(shell uname -s) ifeq ($(UNAME_S),Linux) OSTYPE = linux ifndef AR ifneq (,$(shell which gcc-ar 2> /dev/null)) AR = gcc-ar endif endif ALPINE=$(wildcard /etc/alpine-release) endif ifeq ($(UNAME_S),Darwin) OSTYPE = osx endif ifeq ($(UNAME_S),FreeBSD) OSTYPE = bsd CXX = c++ endif ifeq ($(UNAME_S),DragonFly) OSTYPE = bsd CXX = c++ endif ifeq ($(UNAME_S),OpenBSD) OSTYPE = bsd CXX = c++ endif endif ifdef LTO_PLUGIN lto := yes endif # Default settings (silent release build). config ?= release arch ?= native tune ?= generic cpu ?= $(arch) fpu ?= bits ?= $(shell getconf LONG_BIT) ifndef verbose SILENT = @ else SILENT = endif ifneq ($(wildcard .git),) tag := $(shell cat VERSION)-$(shell git rev-parse --short HEAD) else tag := $(shell cat VERSION) endif version_str = "$(tag) [$(config)]\ncompiled with: llvm $(llvm_version) \ -- "$(compiler_version) # package_name, _version, and _iteration can be overridden by Travis or AppVeyor package_base_version ?= $(tag) package_iteration ?= "1" package_name ?= "ponyc" package_version = $(package_base_version)-$(package_iteration) archive = $(package_name)-$(package_version).tar package = build/$(package_name)-$(package_version) prefix ?= /usr/local bindir ?= $(prefix)/bin includedir ?= $(prefix)/include libdir ?= $(prefix)/lib # destdir is for backward compatibility only, use ponydir instead. ifdef destdir $(warning Please use ponydir instead of destdir.) ponydir ?= $(destdir) endif ponydir ?= $(libdir)/pony/$(tag) symlink := yes ifdef ponydir ifndef prefix symlink := no endif endif ifneq (,$(filter $(OSTYPE), osx bsd)) symlink.flags = -sf else symlink.flags = -srf endif ifneq (,$(filter $(OSTYPE), osx bsd)) SED_INPLACE = sed -i -e else SED_INPLACE = sed -i endif LIB_EXT ?= a BUILD_FLAGS = -march=$(arch) -mtune=$(tune) -Werror -Wconversion \ -Wno-sign-conversion -Wextra -Wall LINKER_FLAGS = -march=$(arch) -mtune=$(tune) $(LDFLAGS) AR_FLAGS ?= rcs ALL_CFLAGS = -std=gnu11 -fexceptions \ -DPONY_VERSION=\"$(tag)\" -DLLVM_VERSION=\"$(llvm_version)\" \ -DPONY_COMPILER=\"$(CC)\" -DPONY_ARCH=\"$(arch)\" \ -DBUILD_COMPILER=\"$(compiler_version)\" \ -DPONY_BUILD_CONFIG=\"$(config)\" \ -DPONY_VERSION_STR=\"$(version_str)\" \ -D_FILE_OFFSET_BITS=64 ALL_CXXFLAGS = -std=gnu++11 -fno-rtti LL_FLAGS = -mcpu=$(cpu) # Determine pointer size in bits. BITS := $(bits) UNAME_M := $(shell uname -m) ifeq ($(BITS),64) ifeq ($(UNAME_M),x86_64) ifeq (,$(filter $(arch), armv8-a)) BUILD_FLAGS += -mcx16 LINKER_FLAGS += -mcx16 endif endif endif ifneq ($(fpu),) BUILD_FLAGS += -mfpu=$(fpu) LINKER_FLAGS += -mfpu=$(fpu) endif PONY_BUILD_DIR ?= build/$(config) PONY_SOURCE_DIR ?= src PONY_TEST_DIR ?= test PONY_BENCHMARK_DIR ?= benchmark ifdef use ifneq (,$(filter $(use), valgrind)) ALL_CFLAGS += -DUSE_VALGRIND PONY_BUILD_DIR := $(PONY_BUILD_DIR)-valgrind endif ifneq (,$(filter $(use), coverage)) ifneq (,$(shell $(CC) -v 2>&1 | grep clang)) # clang COVERAGE_FLAGS = -O0 -fprofile-instr-generate -fcoverage-mapping LINKER_FLAGS += -fprofile-instr-generate -fcoverage-mapping else ifneq (,$(shell $(CC) -v 2>&1 | grep "gcc version")) # gcc COVERAGE_FLAGS = -O0 -fprofile-arcs -ftest-coverage LINKER_FLAGS += -fprofile-arcs else $(error coverage not supported for this compiler/platform) endif ALL_CFLAGS += $(COVERAGE_FLAGS) ALL_CXXFLAGS += $(COVERAGE_FLAGS) endif PONY_BUILD_DIR := $(PONY_BUILD_DIR)-coverage endif ifneq (,$(filter $(use), pooltrack)) ALL_CFLAGS += -DUSE_POOLTRACK PONY_BUILD_DIR := $(PONY_BUILD_DIR)-pooltrack endif ifneq (,$(filter $(use), dtrace)) DTRACE ?= $(shell which dtrace) ifeq (, $(DTRACE)) $(error No dtrace compatible user application static probe generation tool found) endif ALL_CFLAGS += -DUSE_DYNAMIC_TRACE PONY_BUILD_DIR := $(PONY_BUILD_DIR)-dtrace endif ifneq (,$(filter $(use), actor_continuations)) ALL_CFLAGS += -DUSE_ACTOR_CONTINUATIONS PONY_BUILD_DIR := $(PONY_BUILD_DIR)-actor_continuations endif ifneq (,$(filter $(use), scheduler_scaling_pthreads)) ALL_CFLAGS += -DUSE_SCHEDULER_SCALING_PTHREADS PONY_BUILD_DIR := $(PONY_BUILD_DIR)-scheduler_scaling_pthreads endif endif ifdef config ifeq (,$(filter $(config),debug release)) $(error Unknown configuration "$(config)") endif endif ifeq ($(config),release) BUILD_FLAGS += -O3 -DNDEBUG LL_FLAGS += -O3 ifeq ($(lto),yes) BUILD_FLAGS += -flto -DPONY_USE_LTO LINKER_FLAGS += -flto ifdef LTO_PLUGIN AR_FLAGS += --plugin $(LTO_PLUGIN) endif ifneq (,$(filter $(OSTYPE),linux bsd)) LINKER_FLAGS += -fuse-linker-plugin -fuse-ld=gold endif endif else BUILD_FLAGS += -g -DDEBUG endif ifeq ($(OSTYPE),osx) ALL_CFLAGS += -mmacosx-version-min=10.12 -DUSE_SCHEDULER_SCALING_PTHREADS ALL_CXXFLAGS += -stdlib=libc++ -mmacosx-version-min=10.12 endif # If we are not cleaning we need LLVM_CONFIG ifneq ($(MAKECMDGOALS),clean) ifndef LLVM_CONFIG ifneq (,$(shell which llvm-config 2> /dev/null)) LLVM_CONFIG = llvm-config else $(error No LLVM installation found! Set LLVM_CONFIG environment variable \ to the `llvm-config` binary for your installation) endif else ifeq (,$(shell which $(LLVM_CONFIG) 2> /dev/null)) $(error LLVM config $(LLVM_CONFIG) not found! Set LLVM_CONFIG environment \ variable to a valid LLVM installation.) endif LLVM_BINDIR := $(shell $(LLVM_CONFIG) --bindir 2> /dev/null) LLVM_LINK := $(LLVM_BINDIR)/llvm-link LLVM_OPT := $(LLVM_BINDIR)/opt LLVM_LLC := $(LLVM_BINDIR)/llc LLVM_AS := $(LLVM_BINDIR)/llvm-as llvm_build_mode := $(shell $(LLVM_CONFIG) --build-mode) ifeq (Release,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_Release else ifeq (RelWithDebInfo,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_RelWithDebInfo else ifeq (MinSizeRel,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_MinSizeRel else ifeq (Debug,$(llvm_build_mode)) LLVM_BUILD_MODE=LLVM_BUILD_MODE_Debug else $(error "Unknown llvm build-mode of $(llvm_build_mode)", aborting) endif llvm_version := $(shell $(LLVM_CONFIG) --version) ifeq (,$(LLVM_LINK_STATIC)) ifneq (,$(filter $(use), llvm_link_static)) LLVM_LINK_STATIC=--link-static $(warning "linking llvm statically") endif endif ifeq ($(OSTYPE),osx) ifneq (,$(shell which $(LLVM_BINDIR)/llvm-ar 2> /dev/null)) AR = $(LLVM_BINDIR)/llvm-ar AR_FLAGS := rcs else AR = /usr/bin/ar AR_FLAGS := -rcs endif endif ifeq ($(llvm_version),3.9.1) else ifeq ($(llvm_version),5.0.2) else ifeq ($(llvm_version),6.0.1) else ifeq ($(llvm_version),7.0.1) else $(warning WARNING: Unsupported LLVM version: $(llvm_version)) $(warning Please use LLVM 3.9.1, 5.0.2, 6.0.1, 7.0.1) endif # Third party, but prebuilt. Prebuilt libraries are defined as # (1) a name (stored in prebuilt) # (2) the linker flags necessary to link against the prebuilt libraries # (3) a list of include directories for a set of libraries # (4) a list of the libraries to link against llvm.ldflags := -L$(CROSS_SYSROOT)$(subst -L,,$(shell $(LLVM_CONFIG) --ldflags $(LLVM_LINK_STATIC))) # Get cflags using llvm-config llvm.get_cflags := $(LLVM_CONFIG) --cflags $(LLVM_LINK_STATIC) #$(warning llvm.get_cflags="$(llvm.get_cflags)") llvm.cflags := $(shell sh -c "$(llvm.get_cflags)") #$(warning llvm.cflags="$(llvm.cflags)") # Get include dirs using grep & sed to extract "-I<dir>" and "-isystem<dir>" entries # that can occur anywhere in the string and <dir> may have a leading spaces, but the # regex assumes a directory does NOT contain spaces. # Note: [:space:] is used for greater portability. llvm.get_include_dirs := echo '$(llvm.cflags)' | grep -oE -- '(^-I[[:space:]]*| -I[[:space:]]*|^-isystem[[:space:]]*| -isystem[[:space:]]*)[^[:space:]]+' | sed -E 's/^[[:space:]]*(-I[[:space:]]*|-isystem[[:space:]]*)//' #$(warning llvm.get_include_dirs="$(llvm.get_include_dirs)") llvm.include_dirs := $(shell sh -c "$(llvm.get_include_dirs)") #$(warning llvm.include_dirs="$(llvm.include_dirs)") # Get the compiler output of verbose "-v" and preprocess, "-E" parameters which # contains the search paths. verbose_preprocess_string := $(shell echo | $(CC) -v -E - 2>&1) #$(warning verbose_preprocess_string="$(verbose_preprocess_string)") # We must escape any double quotes, ", and any hash, #, characters. quoteDblQuote := $(subst ",\",$(verbose_preprocess_string)) #$(warning quoteDblQuote="$(quoteDblQuote)") quoted_verbose_preprocess_string := $(subst \#,\\\#,$(quoteDblQuote)) #$(warning quoted_verbose_preprocess_string="$(quoted_verbose_preprocess_string)") # Now use a sed command line to extract the search paths from the # quoted verbose preprocess string get_search_paths := sed 's/\(.*\)search starts here:\(.*\)End of search list.\(.*\)/\2/' #$(warning get_search_paths="$(get_search_paths)") search_paths := $(shell echo "$(quoted_verbose_preprocess_string)" | $(get_search_paths)) #$(warning search_paths="$(search_paths)") # Note: $(search_paths) is padded with a space on front and back so # that when we iterate the ${inc_dir} variable is guaranteed to have # a space at the beginning and end making finding a match easy. If # there is no match we output the ${inc_dir}. loopit := \ for inc_dir in $(llvm.include_dirs); do \ if ! echo " $(search_paths) " | grep -q " $${inc_dir} "; then \ echo "-isystem $(CROSS_SYSROOT)$${inc_dir}"; \ fi \ done #$(warning loopit="$(loopit)") llvm.include = $(shell $(loopit)) #$(warning llvm.include="$(llvm.include)") llvm.libs := $(shell $(LLVM_CONFIG) --libs $(LLVM_LINK_STATIC)) -lz -lncurses endif compiler_version := "$(shell $(CC) --version | sed -n 1p)" ifeq ($(runtime-bitcode),yes) ifeq (,$(shell $(CC) -v 2>&1 | grep clang)) $(error Compiling the runtime as a bitcode file requires clang) endif endif # Set default ssl version ifdef default_ssl ifeq ("openssl_0.9.0","$(default_ssl)") default_ssl_valid:=ok endif ifeq ("openssl_1.1.0","$(default_ssl)") default_ssl_valid:=ok endif ifeq (ok,$(default_ssl_valid)) $(warning default_ssl is $(default_ssl)) else $(error default_ssl=$(default_ssl) is invalid, expecting one of openssl_0.9.0 or openssl_1.1.0) endif BUILD_FLAGS += -DPONY_DEFAULT_SSL=\"$(default_ssl)\" endif makefile_abs_path := $(realpath $(lastword $(MAKEFILE_LIST))) packages_abs_src := $(shell dirname $(makefile_abs_path))/packages $(shell mkdir -p $(PONY_BUILD_DIR)) lib := $(PONY_BUILD_DIR)/lib/$(arch) bin := $(PONY_BUILD_DIR) tests := $(PONY_BUILD_DIR) benchmarks := $(PONY_BUILD_DIR) obj := $(PONY_BUILD_DIR)/obj-$(arch) # Libraries. Defined as # (1) a name and output directory libponyc := $(lib) libponycc := $(lib) libponyrt := $(lib) ifeq ($(OSTYPE),linux) libponyrt-pic := $(lib) endif # Define special case rules for a targets source files. By default # this makefile assumes that a targets source files can be found # relative to a parent directory of the same name in $(PONY_SOURCE_DIR). # Note that it is possible to collect files and exceptions with # arbitrarily complex shell commands, as long as ':=' is used # for definition, instead of '='. ifneq ($(OSTYPE),windows) libponyc.except += src/libponyc/platform/signed.cc libponyc.except += src/libponyc/platform/unsigned.cc libponyc.except += src/libponyc/platform/vcvars.c endif # Handle platform specific code to avoid "no symbols" warnings. libponyrt.except = ifneq ($(OSTYPE),windows) libponyrt.except += src/libponyrt/asio/iocp.c libponyrt.except += src/libponyrt/lang/win_except.c endif ifneq ($(OSTYPE),linux) libponyrt.except += src/libponyrt/asio/epoll.c endif ifneq ($(OSTYPE),osx) ifneq ($(OSTYPE),bsd) libponyrt.except += src/libponyrt/asio/kqueue.c endif endif libponyrt.except += src/libponyrt/asio/sock.c libponyrt.except += src/libponyrt/dist/dist.c libponyrt.except += src/libponyrt/dist/proto.c ifeq ($(OSTYPE),linux) libponyrt-pic.dir := src/libponyrt libponyrt-pic.except := $(libponyrt.except) endif # Third party, but requires compilation. Defined as # (1) a name and output directory. # (2) a list of the source files to be compiled. libgtest := $(lib) libgtest.dir := lib/gtest libgtest.files := $(libgtest.dir)/gtest-all.cc libgbenchmark := $(lib) libgbenchmark.dir := lib/gbenchmark libgbenchmark.srcdir := $(libgbenchmark.dir)/src libblake2 := $(lib) libblake2.dir := lib/blake2 libblake2.files := $(libblake2.dir)/blake2b-ref.c # We don't add libponyrt here. It's a special case because it can be compiled # to LLVM bitcode. ifeq ($(OSTYPE), linux) libraries := libponyc libponyrt-pic libgtest libgbenchmark libblake2 else libraries := libponyc libgtest libgbenchmark libblake2 endif ifeq ($(OSTYPE), bsd) extra.bsd.libs = -lpthread -lexecinfo llvm.libs += $(extra.bsd.libs) endif prebuilt := llvm # Binaries. Defined as # (1) a name and output directory. ponyc := $(bin) binaries := ponyc # Tests suites are directly attached to the libraries they test. libponyc.tests := $(tests) libponyrt.tests := $(tests) tests := libponyc.tests libponyrt.tests # Benchmark suites are directly attached to the libraries they test. libponyc.benchmarks := $(benchmarks) libponyc.benchmarks.dir := benchmark/libponyc libponyc.benchmarks.srcdir := $(libponyc.benchmarks.dir) libponyrt.benchmarks := $(benchmarks) libponyrt.benchmarks.dir := benchmark/libponyrt libponyrt.benchmarks.srcdir := $(libponyrt.benchmarks.dir) benchmarks := libponyc.benchmarks libponyrt.benchmarks # Define include paths for targets if necessary. Note that these include paths # will automatically apply to the test suite of a target as well. libponyc.include := -I src/common/ -I src/libponyrt/ $(llvm.include) \ -isystem lib/blake2 libponycc.include := -I src/common/ $(llvm.include) libponyrt.include := -I src/common/ -I src/libponyrt/ libponyrt-pic.include := $(libponyrt.include) libponyc.tests.include := -I src/common/ -I src/libponyc/ -I src/libponyrt \ $(llvm.include) -isystem lib/gtest/ libponyrt.tests.include := -I src/common/ -I src/libponyrt/ -isystem lib/gtest/ libponyc.benchmarks.include := -I src/common/ -I src/libponyc/ \ $(llvm.include) -isystem lib/gbenchmark/include/ libponyrt.benchmarks.include := -I src/common/ -I src/libponyrt/ -isystem \ lib/gbenchmark/include/ ponyc.include := -I src/common/ -I src/libponyrt/ $(llvm.include) libgtest.include := -isystem lib/gtest/ libgbenchmark.include := -isystem lib/gbenchmark/include/ libblake2.include := -isystem lib/blake2/ ifneq (,$(filter $(OSTYPE), osx bsd)) libponyrt.include += -I $(CROSS_SYSROOT)/usr/local/include endif # target specific build options libponyrt.tests.linkoptions += -rdynamic ifneq ($(ALPINE),) libponyrt.tests.linkoptions += -lexecinfo endif libponyc.buildoptions = -D__STDC_CONSTANT_MACROS libponyc.buildoptions += -D__STDC_FORMAT_MACROS libponyc.buildoptions += -D__STDC_LIMIT_MACROS libponyc.buildoptions += -DPONY_ALWAYS_ASSERT libponyc.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libponyc.tests.buildoptions = -D__STDC_CONSTANT_MACROS libponyc.tests.buildoptions += -D__STDC_FORMAT_MACROS libponyc.tests.buildoptions += -D__STDC_LIMIT_MACROS libponyc.tests.buildoptions += -DPONY_ALWAYS_ASSERT libponyc.tests.buildoptions += -DPONY_PACKAGES_DIR=\"$(packages_abs_src)\" libponyc.tests.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libponyc.tests.linkoptions += -rdynamic ifneq ($(ALPINE),) libponyc.tests.linkoptions += -lexecinfo endif libponyc.benchmarks.buildoptions = -D__STDC_CONSTANT_MACROS libponyc.benchmarks.buildoptions += -D__STDC_FORMAT_MACROS libponyc.benchmarks.buildoptions += -D__STDC_LIMIT_MACROS libponyc.benchmarks.buildoptions += -DLLVM_BUILD_MODE=$(LLVM_BUILD_MODE) libgbenchmark.buildoptions := \ -Wshadow -pedantic -pedantic-errors \ -Wfloat-equal -fstrict-aliasing -Wstrict-aliasing -Wno-invalid-offsetof \ -DHAVE_POSIX_REGEX -DHAVE_STD_REGEX -DHAVE_STEADY_CLOCK ifneq ($(ALPINE),) libponyc.benchmarks.linkoptions += -lexecinfo libponyrt.benchmarks.linkoptions += -lexecinfo endif ponyc.buildoptions = $(libponyc.buildoptions) ponyc.linkoptions += -rdynamic ifneq ($(ALPINE),) ponyc.linkoptions += -lexecinfo BUILD_FLAGS += -DALPINE_LINUX endif ifeq ($(OSTYPE), linux) libponyrt-pic.buildoptions += -fpic libponyrt-pic.buildoptions-ll += -relocation-model=pic endif # Set default PIC for compiling if requested ifdef default_pic ifeq (true,$(default_pic)) libponyrt.buildoptions += -fpic libponyrt.buildoptions-ll += -relocation-model=pic BUILD_FLAGS += -DPONY_DEFAULT_PIC=true else ifneq (false,$(default_pic)) $(error default_pic must be true or false) endif endif endif # target specific disabling of build options libgtest.disable = -Wconversion -Wno-sign-conversion -Wextra libgbenchmark.disable = -Wconversion -Wno-sign-conversion libblake2.disable = -Wconversion -Wno-sign-conversion -Wextra # Link relationships. ponyc.links = libponyc libponyrt llvm libblake2 libponyc.tests.links = libgtest libponyc llvm libblake2 libponyc.tests.links.whole = libponyrt libponyrt.tests.links = libgtest libponyrt libponyc.benchmarks.links = libblake2 libgbenchmark libponyc libponyrt llvm libponyrt.benchmarks.links = libgbenchmark libponyrt ifeq ($(OSTYPE),linux) ponyc.links += libpthread libdl libatomic libponyc.tests.links += libpthread libdl libatomic libponyrt.tests.links += libpthread libdl libatomic libponyc.benchmarks.links += libpthread libdl libatomic libponyrt.benchmarks.links += libpthread libdl libatomic endif ifeq ($(OSTYPE),bsd) libponyc.tests.links += libpthread libponyrt.tests.links += $(extra.bsd.libs) libponyc.benchmarks.links += libpthread libponyrt.benchmarks.links += $(extra.bsd.libs) endif ifneq (, $(DTRACE)) $(shell $(DTRACE) -h -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_SOURCE_DIR)/common/dtrace_probes.h) endif # Overwrite the default linker for a target. ponyc.linker = $(CXX) #compile as C but link as CPP (llvm) libponyc.benchmarks.linker = $(CXX) libponyrt.benchmarks.linker = $(CXX) # make targets targets := $(libraries) libponyrt $(binaries) $(tests) $(benchmarks) .PHONY: all $(targets) install uninstall clean stats deploy prerelease check-version test-core test-stdlib-debug test-stdlib test-examples validate-grammar test-ci test-cross-ci benchmark stdlib stdlib-debug all: $(targets) @: # Dependencies libponyc.depends := libponyrt libblake2 libponyc.tests.depends := libponyc libgtest libponyrt.tests.depends := libponyrt libgtest libponyc.benchmarks.depends := libponyc libgbenchmark libponyrt.benchmarks.depends := libponyrt libgbenchmark ponyc.depends := libponyc libponyrt # Generic make section, edit with care. ########################################################################## # # # DIRECTORY: Determines the source dir of a specific target # # # # ENUMERATE: Enumerates input and output files for a specific target # # # # CONFIGURE_COMPILER: Chooses a C or C++ compiler depending on the # # target file. # # # # CONFIGURE_LIBS: Builds a string of libraries to link for a targets # # link dependency. # # # # CONFIGURE_LINKER: Assembles the linker flags required for a target. # # # # EXPAND_COMMAND: Macro that expands to a proper make command for each # # target. # # # ########################################################################## define DIRECTORY $(eval sourcedir := ) $(eval outdir := $(obj)/$(1)) ifdef $(1).srcdir sourcedir := $($(1).srcdir) else ifdef $(1).dir sourcedir := $($(1).dir) else ifneq ($$(filter $(1),$(tests)),) sourcedir := $(PONY_TEST_DIR)/$(subst .tests,,$(1)) outdir := $(obj)/tests/$(subst .tests,,$(1)) else ifneq ($$(filter $(1),$(benchmarks)),) sourcedir := $(PONY_BENCHMARK_DIR)/$(subst .benchmarks,,$(1)) outdir := $(obj)/benchmarks/$(subst .benchmarks,,$(1)) else sourcedir := $(PONY_SOURCE_DIR)/$(1) endif endef define ENUMERATE $(eval sourcefiles := ) ifdef $(1).files sourcefiles := $$($(1).files) else sourcefiles := $$(shell find $$(sourcedir) -type f -name "*.c" -or -name\ "*.cc" -or -name "*.ll" | grep -v '.*/\.') endif ifdef $(1).except sourcefiles := $$(filter-out $($(1).except),$$(sourcefiles)) endif endef define CONFIGURE_COMPILER ifeq ($(suffix $(1)),.cc) compiler := $(CXX) flags := $(ALL_CXXFLAGS) $(CXXFLAGS) endif ifeq ($(suffix $(1)),.c) compiler := $(CC) flags := $(ALL_CFLAGS) $(CFLAGS) endif ifeq ($(suffix $(1)),.bc) compiler := $(CC) flags := $(ALL_CFLAGS) $(CFLAGS) endif ifeq ($(suffix $(1)),.ll) compiler := $(CC) flags := $(ALL_CFLAGS) $(CFLAGS) -Wno-override-module endif endef define CONFIGURE_LIBS ifneq (,$$(filter $(1),$(prebuilt))) linkcmd += $($(1).ldflags) libs += $($(1).libs) else libs += $(subst lib,-l,$(1)) endif endef define CONFIGURE_LIBS_WHOLE ifeq ($(OSTYPE),osx) wholelibs += -Wl,-force_load,$(lib)/$(1).a else wholelibs += $(subst lib,-l,$(1)) endif endef define CONFIGURE_LINKER_WHOLE $(eval wholelibs :=) ifneq ($($(1).links.whole),) $(foreach lk,$($(1).links.whole),$(eval $(call CONFIGURE_LIBS_WHOLE,$(lk)))) ifeq ($(OSTYPE),osx) libs += $(wholelibs) else libs += -Wl,--whole-archive $(wholelibs) -Wl,--no-whole-archive endif endif endef define CONFIGURE_LINKER $(eval linkcmd := $(LINKER_FLAGS) -L $(lib)) $(eval linker := $(CC)) $(eval libs :=) ifdef $(1).linker linker := $($(1).linker) else ifneq (,$$(filter .cc,$(suffix $(sourcefiles)))) linker := $(CXX) endif $(eval $(call CONFIGURE_LINKER_WHOLE,$(1))) $(foreach lk,$($(1).links),$(eval $(call CONFIGURE_LIBS,$(lk)))) linkcmd += $(libs) -L $(CROSS_SYSROOT)/usr/local/lib $($(1).linkoptions) endef define PREPARE $(eval $(call DIRECTORY,$(1))) $(eval $(call ENUMERATE,$(1))) $(eval $(call CONFIGURE_LINKER,$(1))) $(eval objectfiles := $(subst $(sourcedir)/,$(outdir)/,$(addsuffix .o,\ $(sourcefiles)))) $(eval bitcodefiles := $(subst .o,.bc,$(objectfiles))) $(eval dependencies := $(subst .c,,$(subst .cc,,$(subst .ll,,$(subst .o,.d,\ $(objectfiles)))))) endef define EXPAND_OBJCMD $(eval file := $(subst .o,,$(1))) $(eval $(call CONFIGURE_COMPILER,$(file))) ifeq ($(3),libponyrtyes) ifneq ($(suffix $(file)),.bc) $(subst .c,,$(subst .cc,,$(subst .ll,,$(1)))): $(subst .c,.bc,$(subst .cc,.bc,$(subst .ll,.bc,$(file)))) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(compiler) $(flags) -c -o $$@ $$< else ifeq ($(suffix $(subst .bc,,$(file))),.ll) $(subst .ll,,$(1)): $(subst $(outdir)/,$(sourcedir)/,$(subst .bc,,$(file))) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(LLVM_AS) -o $$@ $$< else $(subst .c,,$(subst .cc,,$(1))): $(subst $(outdir)/,$(sourcedir)/,$(subst .bc,,$(file))) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(compiler) -MMD -MP $(filter-out $($(2).disable),$(BUILD_FLAGS)) \ $(flags) $($(2).buildoptions) -emit-llvm -c -o $$@ $$< $($(2).include) endif else ifeq ($(suffix $(file)),.ll) $(subst .ll,,$(1)): $(subst $(outdir)/,$(sourcedir)/,$(file)) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(LLVM_LLC) $(LL_FLAGS) $($(2).buildoptions-ll) -filetype=obj -o $$@ $$< else $(subst .c,,$(subst .cc,,$(1))): $(subst $(outdir)/,$(sourcedir)/,$(file)) @echo '$$(notdir $$<)' @mkdir -p $$(dir $$@) $(SILENT)$(compiler) -MMD -MP $(filter-out $($(2).disable),$(BUILD_FLAGS)) \ $(flags) $($(2).buildoptions) -c -o $$@ $$< $($(2).include) endif endef define EXPAND_COMMAND $(eval $(call PREPARE,$(1))) $(eval ofiles := $(subst .c,,$(subst .cc,,$(subst .ll,,$(objectfiles))))) $(eval bcfiles := $(subst .c,,$(subst .cc,,$(subst .ll,,$(bitcodefiles))))) $(eval depends := ) $(foreach d,$($(1).depends),$(eval depends += $($(d))/$(d).$(LIB_EXT))) ifeq ($(1),libponyrt) $($(1))/libponyrt.$(LIB_EXT): $(depends) $(ofiles) @mkdir -p $$(dir $$@) @echo 'Linking libponyrt' ifneq (,$(DTRACE)) ifeq ($(OSTYPE), linux) @echo 'Generating dtrace object file (linux)' $(SILENT)$(DTRACE) -G -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_BUILD_DIR)/dtrace_probes.o $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(PONY_BUILD_DIR)/dtrace_probes.o else ifeq ($(OSTYPE), bsd) @echo 'Generating dtrace object file (bsd)' $(SILENT)rm -f $(PONY_BUILD_DIR)/dtrace_probes.o $(SILENT)$(DTRACE) -G -s $(PONY_SOURCE_DIR)/common/dtrace_probes.d -o $(PONY_BUILD_DIR)/dtrace_probes.o $(ofiles) $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(PONY_BUILD_DIR)/dtrace_probes.o $(SILENT)$(AR) $(AR_FLAGS) $(lib)/libdtrace_probes.a $(PONY_BUILD_DIR)/dtrace_probes.o else $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) endif else $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) endif ifeq ($(runtime-bitcode),yes) $($(1))/libponyrt.bc: $(depends) $(bcfiles) @mkdir -p $$(dir $$@) @echo 'Generating bitcode for libponyrt' $(SILENT)$(LLVM_LINK) -o $$@ $(bcfiles) ifeq ($(config),release) $(SILENT)$(LLVM_OPT) -O3 -o $$@ $$@ endif libponyrt: $($(1))/libponyrt.bc $($(1))/libponyrt.$(LIB_EXT) else libponyrt: $($(1))/libponyrt.$(LIB_EXT) endif else ifneq ($(filter $(1),$(libraries)),) $($(1))/$(1).$(LIB_EXT): $(depends) $(ofiles) @mkdir -p $$(dir $$@) @echo 'Linking $(1)' $(SILENT)$(AR) $(AR_FLAGS) $$@ $(ofiles) $(1): $($(1))/$(1).$(LIB_EXT) else $($(1))/$(1): $(depends) $(ofiles) @mkdir -p $$(dir $$@) @echo 'Linking $(1)' $(SILENT)$(linker) -o $$@ $(ofiles) $(linkcmd) $(1): $($(1))/$(1) endif $(foreach bcfile,$(bitcodefiles),$(eval $(call EXPAND_OBJCMD,$(bcfile),$(1),$(addsuffix $(runtime-bitcode),$(1))))) $(foreach ofile,$(objectfiles),$(eval $(call EXPAND_OBJCMD,$(ofile),$(1),$(addsuffix $(runtime-bitcode),$(1))))) -include $(dependencies) endef $(foreach target,$(targets),$(eval $(call EXPAND_COMMAND,$(target)))) define EXPAND_INSTALL ifeq ($(OSTYPE),linux) install-libponyrt-pic: libponyrt-pic @mkdir -p $(destdir)/lib/$(arch) $(SILENT)cp $(lib)/libponyrt-pic.a $(DESTDIR)$(ponydir)/lib/$(arch) endif install-libponyrt: libponyrt @mkdir -p $(destdir)/lib/$(arch) $(SILENT)cp $(lib)/libponyrt.a $(DESTDIR)$(ponydir)/lib/$(arch) ifeq ($(OSTYPE),linux) install: libponyc libponyrt libponyrt-pic ponyc else install: libponyc libponyrt ponyc endif @mkdir -p $(DESTDIR)$(ponydir)/bin @mkdir -p $(DESTDIR)$(ponydir)/lib/$(arch) @mkdir -p $(DESTDIR)$(ponydir)/include/pony/detail $(SILENT)cp $(lib)/libponyrt.a $(DESTDIR)$(ponydir)/lib/$(arch) ifeq ($(OSTYPE),linux) $(SILENT)cp $(lib)/libponyrt-pic.a $(DESTDIR)$(ponydir)/lib/$(arch) endif ifneq ($(wildcard $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.bc),) $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.bc $(DESTDIR)$(ponydir)/lib/$(arch) endif ifneq ($(wildcard $(lib)/libdtrace_probes.a),) $(SILENT)cp $(lib)/libdtrace_probes.a $(DESTDIR)$(ponydir)/lib/$(arch) endif $(SILENT)cp $(lib)/libponyc.a $(DESTDIR)$(ponydir)/lib/$(arch) $(SILENT)cp $(bin)/ponyc $(DESTDIR)$(ponydir)/bin $(SILENT)cp src/libponyrt/pony.h $(DESTDIR)$(ponydir)/include $(SILENT)cp src/common/pony/detail/atomics.h $(DESTDIR)$(ponydir)/include/pony/detail $(SILENT)cp -r packages $(DESTDIR)$(ponydir)/ ifeq ($$(symlink),yes) @mkdir -p $(DESTDIR)$(bindir) @mkdir -p $(DESTDIR)$(libdir) @mkdir -p $(DESTDIR)$(includedir)/pony/detail $(SILENT)ln $(symlink.flags) $(ponydir)/bin/ponyc $(DESTDIR)$(bindir)/ponyc $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyrt.a $(DESTDIR)$(libdir)/libponyrt.a ifeq ($(OSTYPE),linux) $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyrt-pic.a $(DESTDIR)$(libdir)/libponyrt-pic.a endif ifneq ($(wildcard $(DESTDIR)$(ponydir)/lib/libponyrt.bc),) $(SILENT)ln $(symlink.flags) $(ponydir)/lib/libponyrt.bc $(DESTDIR)$(libdir)/libponyrt.bc endif ifneq ($(wildcard $(PONY_BUILD_DIR)/libdtrace_probes.a),) $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libdtrace_probes.a $(DESTDIR)$(libdir)/libdtrace_probes.a endif $(SILENT)ln $(symlink.flags) $(ponydir)/lib/$(arch)/libponyc.a $(DESTDIR)$(libdir)/libponyc.a $(SILENT)ln $(symlink.flags) $(ponydir)/include/pony.h $(DESTDIR)$(includedir)/pony.h $(SILENT)ln $(symlink.flags) $(ponydir)/include/pony/detail/atomics.h $(DESTDIR)$(includedir)/pony/detail/atomics.h endif endef $(eval $(call EXPAND_INSTALL)) define EXPAND_UNINSTALL uninstall: -$(SILENT)rm -rf $(ponydir) 2>/dev/null ||: -$(SILENT)rm $(bindir)/ponyc 2>/dev/null ||: -$(SILENT)rm $(libdir)/libponyrt.a 2>/dev/null ||: ifeq ($(OSTYPE),linux) -$(SILENT)rm $(libdir)/libponyrt-pic.a 2>/dev/null ||: endif ifneq ($(wildcard $(libdir)/libponyrt.bc),) -$(SILENT)rm $(libdir)/libponyrt.bc 2>/dev/null ||: endif ifneq ($(wildcard $(libdir)/libdtrace_probes.a),) -$(SILENT)rm $(libdir)/libdtrace_probes.a 2>/dev/null ||: endif -$(SILENT)rm $(libdir)/libponyc.a 2>/dev/null ||: -$(SILENT)rm $(includedir)/pony.h 2>/dev/null ||: -$(SILENT)rm -r $(includedir)/pony/ 2>/dev/null ||: endef $(eval $(call EXPAND_UNINSTALL)) ifdef verbose bench_verbose = -DCMAKE_VERBOSE_MAKEFILE=true endif ifeq ($(lto),yes) bench_lto = -DBENCHMARK_ENABLE_LTO=true endif benchmark: all $(SILENT)echo "Running libponyc benchmarks..." $(SILENT)$(PONY_BUILD_DIR)/libponyc.benchmarks $(SILENT)echo "Running libponyrt benchmarks..." $(SILENT)(PONY_BUILD_DIR)/libponyrt.benchmarks stdlib-debug: all $(SILENT)PONYPATH=.:$(PONYPATH) $(PONY_BUILD_DIR)/ponyc $(cross_args) -d -s --checktree --verify packages/stdlib stdlib: all $(SILENT)PONYPATH=.:$(PONYPATH) $(PONY_BUILD_DIR)/ponyc $(cross_args) --checktree --verify packages/stdlib test-stdlib-debug: stdlib-debug $(SILENT)$(cross_runner) ./stdlib --sequential $(SILENT)rm stdlib test-stdlib: stdlib $(SILENT)$(cross_runner) ./stdlib --sequential $(SILENT)rm stdlib test-core: all $(SILENT)$(PONY_BUILD_DIR)/libponyc.tests $(SILENT)$(PONY_BUILD_DIR)/libponyrt.tests test: test-core test-stdlib test-examples test-examples: all $(SILENT)PONYPATH=.:$(PONYPATH) find examples/*/* -name '*.pony' -print | xargs -n 1 dirname | sort -u | grep -v ffi- | xargs -n 1 -I {} $(PONY_BUILD_DIR)/ponyc $(cross_args) -d -s --checktree -o {} {} check-version: all $(SILENT)$(PONY_BUILD_DIR)/ponyc --version validate-grammar: all $(SILENT)$(PONY_BUILD_DIR)/ponyc --antlr > pony.g.new $(SILENT)diff pony.g pony.g.new $(SILENT)rm pony.g.new test-ci: all check-version test-core test-stdlib-debug test-stdlib test-examples validate-grammar test-cross-ci: cross_args=--triple=$(cross_triple) --cpu=$(cross_cpu) --link-arch=$(cross_arch) --linker='$(cross_linker)' test-cross-ci: cross_runner=$(QEMU_RUNNER) test-cross-ci: test-ci docs: all $(SILENT)$(PONY_BUILD_DIR)/ponyc packages/stdlib --docs --pass expr docs-online: docs $(SILENT)$(SED_INPLACE) 's/site_name:\ stdlib/site_name:\ Pony Standard Library/' stdlib-docs/mkdocs.yml # Note: linux only define EXPAND_DEPLOY deploy: test docs $(SILENT)bash .bintray.bash debian "$(package_base_version)" "$(package_name)" $(SILENT)bash .bintray.bash rpm "$(package_base_version)" "$(package_name)" $(SILENT)bash .bintray.bash source "$(package_base_version)" "$(package_name)" $(SILENT)rm -rf build/bin @mkdir -p build/bin @mkdir -p $(package)/usr/bin @mkdir -p $(package)/usr/include/pony/detail @mkdir -p $(package)/usr/lib @mkdir -p $(package)/usr/lib/pony/$(package_version)/bin @mkdir -p $(package)/usr/lib/pony/$(package_version)/include/pony/detail @mkdir -p $(package)/usr/lib/pony/$(package_version)/lib $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyc.a $(package)/usr/lib/pony/$(package_version)/lib $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt.a $(package)/usr/lib/pony/$(package_version)/lib ifeq ($(OSTYPE),linux) $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libponyrt-pic.a $(package)/usr/lib/pony/$(package_version)/lib endif ifneq ($(wildcard $(PONY_BUILD_DIR)/libponyrt.bc),) $(SILENT)cp $(PONY_BUILD_DIR)/libponyrt.bc $(package)/usr/lib/pony/$(package_version)/lib endif ifneq ($(wildcard $(PONY_BUILD_DIR)/libdtrace_probes.a),) $(SILENT)cp $(PONY_BUILD_DIR)/lib/$(arch)/libdtrace_probes.a $(package)/usr/lib/pony/$(package_version)/lib endif $(SILENT)cp $(PONY_BUILD_DIR)/ponyc $(package)/usr/lib/pony/$(package_version)/bin $(SILENT)cp src/libponyrt/pony.h $(package)/usr/lib/pony/$(package_version)/include $(SILENT)cp src/common/pony/detail/atomics.h $(package)/usr/lib/pony/$(package_version)/include/pony/detail $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt.a $(package)/usr/lib/libponyrt.a ifeq ($(OSTYPE),linux) $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt-pic.a $(package)/usr/lib/libponyrt-pic.a endif ifneq ($(wildcard /usr/lib/pony/$(package_version)/lib/libponyrt.bc),) $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyrt.bc $(package)/usr/lib/libponyrt.bc endif ifneq ($(wildcard /usr/lib/pony/$(package_version)/lib/libdtrace_probes.a),) $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libdtrace_probes.a $(package)/usr/lib/libdtrace_probes.a endif $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/lib/libponyc.a $(package)/usr/lib/libponyc.a $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/bin/ponyc $(package)/usr/bin/ponyc $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/include/pony.h $(package)/usr/include/pony.h $(SILENT)ln -f -s /usr/lib/pony/$(package_version)/include/pony/detail/atomics.h $(package)/usr/include/pony/detail/atomics.h $(SILENT)cp -r packages $(package)/usr/lib/pony/$(package_version)/ $(SILENT)fpm -s dir -t deb -C $(package) -p build/bin --name $(package_name) --conflicts "ponyc-master" --conflicts "ponyc-release" --version $(package_base_version) --description "The Pony Compiler" --provides "ponyc" --provides "ponyc-release" $(SILENT)fpm -s dir -t rpm -C $(package) -p build/bin --name $(package_name) --conflicts "ponyc-master" --conflicts "ponyc-release" --version $(package_base_version) --description "The Pony Compiler" --provides "ponyc" --provides "ponyc-release" --depends "ponydep-ncurses" $(SILENT)git archive HEAD > build/bin/$(archive) $(SILENT)tar rvf build/bin/$(archive) stdlib-docs $(SILENT)bzip2 build/bin/$(archive) $(SILENT)rm -rf $(package) build/bin/$(archive) endef $(eval $(call EXPAND_DEPLOY)) stats: @echo @echo '------------------------------' @echo 'Compiler and standard library ' @echo '------------------------------' @echo @cloc --read-lang-def=pony.cloc src packages @echo @echo '------------------------------' @echo 'Test suite:' @echo '------------------------------' @echo @cloc --read-lang-def=pony.cloc test clean: @rm -rf $(PONY_BUILD_DIR) @rm -rf $(package) @rm -rf build/bin @rm -rf stdlib-docs @rm -f src/common/dtrace_probes.h -@rmdir build 2>/dev/null ||: @echo 'Repository cleaned ($(PONY_BUILD_DIR)).' help: @echo 'Usage: make [config=name] [arch=name] [use=opt,...] [target]' @echo @echo 'CONFIGURATIONS:' @echo ' debug' @echo ' release (default)' @echo @echo 'ARCHITECTURE:' @echo ' native (default)' @echo ' [any compiler supported architecture]' @echo @echo 'Compile time default options:' @echo ' default_pic=true Make --pic the default' @echo ' default_ssl=Name Make Name the default ssl version' @echo ' where Name is one of:' @echo ' openssl_0.9.0' @echo ' openssl_1.1.0' @echo @echo 'USE OPTIONS:' @echo ' valgrind' @echo ' pooltrack' @echo ' dtrace' @echo ' actor_continuations' @echo ' coverage' @echo ' llvm_link_static' @echo ' scheduler_scaling_pthreads' @echo @echo 'TARGETS:' @echo ' libponyc Pony compiler library' @echo ' libponyrt Pony runtime' @echo ' libponyrt-pic Pony runtime -fpic' @echo ' libponyc.tests Test suite for libponyc' @echo ' libponyrt.tests Test suite for libponyrt' @echo ' libponyc.benchmarks Benchmark suite for libponyc' @echo ' libponyrt.benchmarks Benchmark suite for libponyrt' @echo ' ponyc Pony compiler executable' @echo @echo ' all Build all of the above (default)' @echo ' test Run test suite' @echo ' benchmark Build and run benchmark suite' @echo ' install Install ponyc' @echo ' install-libponyrt Install libponyrt only (for cross' @echo ' linking)' @echo ' install-libponyrt-pic Install libponyrt-pic only (for cross' @echo ' linking)' @echo ' uninstall Remove all versions of ponyc' @echo ' stats Print Pony cloc statistics' @echo ' clean Delete all build files' @echo
1
13,194
This shouldn't be added. It's not supported. It's not event the default. If we are going to support this, we need to have CI for it.
ponylang-ponyc
c
@@ -6,6 +6,5 @@ type API interface { Client() Client Daemon() Daemon Ping() Ping - RetrievalClient() RetrievalClient Swarm() Swarm }
1
// Package api holds the interface definitions for the Filecoin api. package api // API is the user interface to a Filecoin node. type API interface { Client() Client Daemon() Daemon Ping() Ping RetrievalClient() RetrievalClient Swarm() Swarm }
1
17,903
omg only four left!!!!
filecoin-project-venus
go
@@ -214,6 +214,12 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic return } + proto, ok := config.Consensus[protocol.ConsensusCurrentVersion] + if !ok { + err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion) + return + } + // After successful install, remove the input copy of the // partkey so that old keys cannot be recovered after they // are used by algod. We try to delete the data inside
1
// Copyright (C) 2019 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package libgoal import ( "fmt" "io/ioutil" "math" "os" "path/filepath" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) // chooseParticipation chooses which participation keys to use for going online // based on the address, round number, and available participation databases func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part account.Participation, err error) { genID, err := c.GenesisID() if err != nil { return } // Get a list of files in the participation keys directory keyDir := filepath.Join(c.DataDir(), genID) files, err := ioutil.ReadDir(keyDir) if err != nil { return } // This lambda will be used for finding the desired file. checkIfFileIsDesiredKey := func(file os.FileInfo, expiresAfter basics.Round) (part account.Participation, err error) { var handle db.Accessor var partCandidate account.Participation // If it can't be a participation key database, skip it if !config.IsPartKeyFilename(file.Name()) { return } filename := file.Name() // Fetch a handle to this database handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename)) if err != nil { // Couldn't open it, skip it return } defer handle.Close() // Fetch an account.Participation from the database partCandidate, err = account.RestoreParticipation(handle) if err != nil { // Couldn't read it, skip it return } // Return the Participation valid for this round that relates to the passed address // that expires farthest in the future. // Note that algod will sign votes with all possible Participations. so any should work // in the short-term. // In the future we should allow the user to specify exactly which partkeys to register. if partCandidate.FirstValid <= round && round <= partCandidate.LastValid && partCandidate.Parent == address && partCandidate.LastValid > expiresAfter { part = partCandidate } return } nilParticipation := account.Participation{} // Loop through each of the files; pick the one that expires farthest in the future. var expiry basics.Round for _, info := range files { // Use above lambda so the deferred handle closure happens each loop partCandidate, err := checkIfFileIsDesiredKey(info, expiry) if err == nil && partCandidate != nilParticipation { part = partCandidate expiry = part.LastValid } } if part == nilParticipation { // Couldn't find one err = fmt.Errorf("Couldn't find a participation key database for address %v valid at round %v in directory %v", address.GetUserAddress(), round, keyDir) return } return } func participationKeysPath(dataDir string, address basics.Address, firstValid, lastValid basics.Round) (string, error) { // Build /<dataDir>/<genesisID>/<address>.<first_round>.<last_round>.partkey first := uint64(firstValid) last := uint64(lastValid) fileName := config.PartKeyFilename(address.String(), first, last) return filepath.Join(dataDir, fileName), nil } // GenParticipationKeys creates a .partkey database for a given address, fills // it with keys, and installs it in the right place func (c *Client) GenParticipationKeys(address string, firstValid, lastValid, keyDilution uint64) (part account.Participation, filePath string, err error) { return c.GenParticipationKeysTo(address, firstValid, lastValid, keyDilution, "") } // GenParticipationKeysTo creates a .partkey database for a given address, fills // it with keys, and saves it in the specified output directory. func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string) (part account.Participation, filePath string, err error) { // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(address) if err != nil { return } firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid) // Get the current protocol for ephemeral key parameters stat, err := c.Status() if err != nil { return } proto, ok := config.Consensus[protocol.ConsensusVersion(stat.LastVersion)] if !ok { err = fmt.Errorf("consensus protocol %s not supported", stat.LastVersion) return } // If output directory wasn't specified, store it in the current ledger directory. if outDir == "" { // Get the GenesisID for use in the participation key path var genID string genID, err = c.GenesisID() if err != nil { return } outDir = filepath.Join(c.DataDir(), genID) } // Connect to the database partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound) if err != nil { return } partdb, err := db.MakeErasableAccessor(partKeyPath) if err != nil { return } if keyDilution == 0 { keyDilution = proto.DefaultKeyDilution } // Fill the database with new participation keys newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution) return newPart, partKeyPath, err } // InstallParticipationKeys creates a .partkey database for a given address, // based on an existing database from inputfile. On successful install, it // deletes the input file. func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) { // Get the GenesisID for use in the participation key path var genID string genID, err = c.GenesisID() if err != nil { return } outDir := filepath.Join(c.DataDir(), genID) inputdb, err := db.MakeErasableAccessor(inputfile) if err != nil { return } defer inputdb.Close() partkey, err := account.RestoreParticipation(inputdb) if err != nil { return } if partkey.Parent == (basics.Address{}) { err = fmt.Errorf("Cannot install partkey with missing (zero) parent address") return } newdbpath, err := participationKeysPath(outDir, partkey.Parent, partkey.FirstValid, partkey.LastValid) if err != nil { return } newdb, err := db.MakeErasableAccessor(newdbpath) if err != nil { return } newpartkey := partkey newpartkey.Store = newdb err = newpartkey.Persist() if err != nil { return } // After successful install, remove the input copy of the // partkey so that old keys cannot be recovered after they // are used by algod. We try to delete the data inside // sqlite first, so the key material is zeroed out from // disk blocks, but regardless of whether that works, we // delete the input file. The consensus protocol version // is irrelevant for the maxuint64 round number we pass in. errCh := partkey.DeleteOldKeys(basics.Round(math.MaxUint64), config.Consensus[protocol.ConsensusCurrentVersion]) err = <-errCh if err != nil { return } os.Remove(inputfile) return newpartkey, newdbpath, nil } // ListParticipationKeys returns the available participation keys, // as a map from database filename to Participation key object. func (c *Client) ListParticipationKeys() (partKeyFiles map[string]account.Participation, err error) { genID, err := c.GenesisID() if err != nil { return } // Get a list of files in the participation keys directory keyDir := filepath.Join(c.DataDir(), genID) files, err := ioutil.ReadDir(keyDir) if err != nil { return } partKeyFiles = make(map[string]account.Participation) for _, file := range files { // If it can't be a participation key database, skip it if !config.IsPartKeyFilename(file.Name()) { continue } filename := file.Name() // Fetch a handle to this database handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename)) if err != nil { // Couldn't open it, skip it continue } // Fetch an account.Participation from the database part, err := account.RestoreParticipation(handle) handle.Close() if err != nil { // Couldn't read it, skip it continue } partKeyFiles[filename] = part } return }
1
36,851
I'm not really sure how this would happen. once the binary is already compiled, the config.Consensus should already have the entry for protocol.ConsensusCurrentVersion.
algorand-go-algorand
go
@@ -160,7 +160,7 @@ class LuigiTestCase(unittest.TestCase): temp = CmdlineParser._instance try: CmdlineParser._instance = None - run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args) + run_exit_status = luigi.run(args + ['--local-scheduler', '--no-lock']) finally: CmdlineParser._instance = temp return run_exit_status
1
# -*- coding: utf-8 -*- # # Copyright 2012-2015 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import itertools import tempfile import re from contextlib import contextmanager import luigi import luigi.task_register import luigi.cmdline_parser from luigi.cmdline_parser import CmdlineParser from luigi import six import os import unittest def skipOnTravis(reason): return unittest.skipIf(os.getenv('TRAVIS') == 'true', reason) class with_config(object): """ Decorator to override config settings for the length of a function. Usage: .. code-block: python >>> import luigi.configuration >>> @with_config({'foo': {'bar': 'baz'}}) ... def my_test(): ... print(luigi.configuration.get_config().get("foo", "bar")) ... >>> my_test() baz >>> @with_config({'hoo': {'bar': 'buz'}}) ... @with_config({'foo': {'bar': 'baz'}}) ... def my_test(): ... print(luigi.configuration.get_config().get("foo", "bar")) ... print(luigi.configuration.get_config().get("hoo", "bar")) ... >>> my_test() baz buz >>> @with_config({'foo': {'bar': 'buz'}}) ... @with_config({'foo': {'bar': 'baz'}}) ... def my_test(): ... print(luigi.configuration.get_config().get("foo", "bar")) ... >>> my_test() baz >>> @with_config({'foo': {'bur': 'buz'}}) ... @with_config({'foo': {'bar': 'baz'}}) ... def my_test(): ... print(luigi.configuration.get_config().get("foo", "bar")) ... print(luigi.configuration.get_config().get("foo", "bur")) ... >>> my_test() baz buz >>> @with_config({'foo': {'bur': 'buz'}}) ... @with_config({'foo': {'bar': 'baz'}}, replace_sections=True) ... def my_test(): ... print(luigi.configuration.get_config().get("foo", "bar")) ... print(luigi.configuration.get_config().get("foo", "bur", "no_bur")) ... >>> my_test() baz no_bur """ def __init__(self, config, replace_sections=False): self.config = config self.replace_sections = replace_sections def _make_dict(self, old_dict): if self.replace_sections: old_dict.update(self.config) return old_dict def get_section(sec): old_sec = old_dict.get(sec, {}) new_sec = self.config.get(sec, {}) old_sec.update(new_sec) return old_sec all_sections = itertools.chain(old_dict.keys(), self.config.keys()) return {sec: get_section(sec) for sec in all_sections} def __call__(self, fun): @functools.wraps(fun) def wrapper(*args, **kwargs): import luigi.configuration orig_conf = luigi.configuration.LuigiConfigParser.instance() new_conf = luigi.configuration.LuigiConfigParser() luigi.configuration.LuigiConfigParser._instance = new_conf orig_dict = {k: dict(orig_conf.items(k)) for k in orig_conf.sections()} new_dict = self._make_dict(orig_dict) for (section, settings) in six.iteritems(new_dict): new_conf.add_section(section) for (name, value) in six.iteritems(settings): new_conf.set(section, name, value) try: return fun(*args, **kwargs) finally: luigi.configuration.LuigiConfigParser._instance = orig_conf return wrapper class RunOnceTask(luigi.Task): def __init__(self, *args, **kwargs): super(RunOnceTask, self).__init__(*args, **kwargs) self.comp = False def complete(self): return self.comp def run(self): self.comp = True class LuigiTestCase(unittest.TestCase): """ Tasks registred within a test case will get unregistered in a finalizer Instance caches are cleared before and after all runs """ def setUp(self): super(LuigiTestCase, self).setUp() self._stashed_reg = luigi.task_register.Register._get_reg() luigi.task_register.Register.clear_instance_cache() def tearDown(self): luigi.task_register.Register._set_reg(self._stashed_reg) super(LuigiTestCase, self).tearDown() luigi.task_register.Register.clear_instance_cache() def run_locally(self, args): """ Helper for running tests testing more of the stack, the command line parsing and task from name intstantiation parts in particular. """ temp = CmdlineParser._instance try: CmdlineParser._instance = None run_exit_status = luigi.run(['--local-scheduler', '--no-lock'] + args) finally: CmdlineParser._instance = temp return run_exit_status def run_locally_split(self, space_seperated_args): """ Helper for running tests testing more of the stack, the command line parsing and task from name intstantiation parts in particular. """ return self.run_locally(space_seperated_args.split(' ')) class parsing(object): """ Convenient decorator for test cases to set the parsing environment. """ def __init__(self, cmds): self.cmds = cmds def __call__(self, fun): @functools.wraps(fun) def wrapper(*args, **kwargs): with CmdlineParser.global_instance(self.cmds, allow_override=True): return fun(*args, **kwargs) return wrapper def in_parse(cmds, deferred_computation): with CmdlineParser.global_instance(cmds) as cp: deferred_computation(cp.get_task_obj()) @contextmanager def temporary_unloaded_module(python_file_contents): """ Create an importable module Return the name of importable module name given its file contents (source code) """ with tempfile.NamedTemporaryFile( dir='test/', prefix="_test_time_generated_module", suffix='.py') as temp_module_file: temp_module_file.file.write(python_file_contents) temp_module_file.file.flush() temp_module_path = temp_module_file.name temp_module_name = re.search(r'/(_test_time_generated_module.*).py', temp_module_path).group(1) yield temp_module_name
1
17,960
Is there a reason for the order swap here?
spotify-luigi
py
@@ -241,8 +241,9 @@ namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Hosting } } - EqtTrace.Error("Unable to find path for dotnet host"); - return dotnetExeName; + string errorMessage = String.Format(Resources.NoDotnetDotExeFileExist, dotnetExeName); + EqtTrace.Error(errorMessage); + throw new FileNotFoundException(errorMessage); } private string GetTestHostPath(string runtimeConfigDevPath, string depsFilePath, string sourceDirectory)
1
// Copyright (c) Microsoft. All rights reserved. namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Hosting { using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Runtime.InteropServices; using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Resources; using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers; using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers.Interfaces; using Microsoft.VisualStudio.TestPlatform.ObjectModel; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client.Interfaces; using Microsoft.VisualStudio.TestPlatform.ObjectModel.Engine; using Microsoft.VisualStudio.TestPlatform.Utilities.Helpers; using Microsoft.VisualStudio.TestPlatform.Utilities.Helpers.Interfaces; using Newtonsoft.Json; using Newtonsoft.Json.Linq; using Microsoft.Extensions.DependencyModel; using Common.Logging; using ObjectModel.Logging; /// <summary> /// A host manager for <c>dotnet</c> core runtime. /// </summary> /// <remarks> /// Note that some functionality of this entity overlaps with that of <see cref="DefaultTestHostManager"/>. That is /// intentional since we want to move this to a separate assembly (with some runtime extensibility discovery). /// </remarks> public class DotnetTestHostManager : ITestHostManager { private readonly IProcessHelper processHelper; private readonly IFileHelper fileHelper; private ITestHostLauncher testHostLauncher; private Process testHostProcess; private EventHandler registeredExitHandler; private TestSessionMessageLogger logger = TestSessionMessageLogger.Instance; /// <summary> /// Initializes a new instance of the <see cref="DotnetTestHostManager"/> class. /// </summary> public DotnetTestHostManager() : this(new DefaultTestHostLauncher(), new ProcessHelper(), new FileHelper()) { } /// <summary> /// Initializes a new instance of the <see cref="DotnetTestHostManager"/> class. /// </summary> /// <param name="testHostLauncher">A test host launcher instance.</param> /// <param name="processHelper">Process helper instance.</param> /// <param name="fileHelper">File helper instance.</param> internal DotnetTestHostManager( ITestHostLauncher testHostLauncher, IProcessHelper processHelper, IFileHelper fileHelper) { this.testHostLauncher = testHostLauncher; this.processHelper = processHelper; this.fileHelper = fileHelper; } /// <summary> /// Gets a value indicating if the test host can be shared for multiple sources. /// </summary> /// <remarks> /// Dependency resolution for .net core projects are pivoted by the test project. Hence each test /// project must be launched in a separate test host process. /// </remarks> public bool Shared => false; /// <inheritdoc/> public void SetCustomLauncher(ITestHostLauncher customLauncher) { this.testHostLauncher = customLauncher; } /// <inheritdoc/> public int LaunchTestHost(TestProcessStartInfo testHostStartInfo) { var processId = this.testHostLauncher.LaunchTestHost(testHostStartInfo); this.testHostProcess = Process.GetProcessById(processId); return processId; } /// <inheritdoc/> public virtual TestProcessStartInfo GetTestHostProcessStartInfo( IEnumerable<string> sources, IDictionary<string, string> environmentVariables, TestRunnerConnectionInfo connectionInfo) { var startInfo = new TestProcessStartInfo(); var currentProcessPath = this.processHelper.GetCurrentProcessFileName(); // This host manager can create process start info for dotnet core targets only. // If already running with the dotnet executable, use it; otherwise pick up the dotnet available on path. // Wrap the paths with quotes in case dotnet executable is installed on a path with whitespace. if (currentProcessPath.EndsWith("dotnet", StringComparison.OrdinalIgnoreCase) || currentProcessPath.EndsWith("dotnet.exe", StringComparison.OrdinalIgnoreCase)) { startInfo.FileName = "\"" + currentProcessPath + "\""; } else { startInfo.FileName = "\"" + this.GetDotnetHostFullPath() + "\""; } EqtTrace.Verbose("DotnetTestHostmanager: Full path of dotnet.exe is {0}", startInfo.FileName); // .NET core host manager is not a shared host. It will expect a single test source to be provided. var args = "exec"; var sourcePath = sources.Single(); var sourceFile = Path.GetFileNameWithoutExtension(sourcePath); var sourceDirectory = Path.GetDirectoryName(sourcePath); // Probe for runtimeconfig and deps file for the test source var runtimeConfigPath = Path.Combine(sourceDirectory, string.Concat(sourceFile, ".runtimeconfig.json")); if (this.fileHelper.Exists(runtimeConfigPath)) { string argsToAdd = " --runtimeconfig \"" + runtimeConfigPath + "\""; args += argsToAdd; EqtTrace.Verbose("DotnetTestHostmanager: Adding {0} in args", argsToAdd); } else { EqtTrace.Verbose("DotnetTestHostmanager: File {0}, doesnot exist", runtimeConfigPath); } // Use the deps.json for test source var depsFilePath = Path.Combine(sourceDirectory, string.Concat(sourceFile, ".deps.json")); if (this.fileHelper.Exists(depsFilePath)) { string argsToAdd = " --depsfile \"" + depsFilePath + "\""; args += argsToAdd; EqtTrace.Verbose("DotnetTestHostmanager: Adding {0} in args", argsToAdd); } else { EqtTrace.Verbose("DotnetTestHostmanager: File {0}, doesnot exist", depsFilePath); } var runtimeConfigDevPath = Path.Combine(sourceDirectory, string.Concat(sourceFile, ".runtimeconfig.dev.json")); var testHostPath = this.GetTestHostPath(runtimeConfigDevPath, depsFilePath, sourceDirectory); if (this.fileHelper.Exists(testHostPath)) { EqtTrace.Verbose("DotnetTestHostmanager: Full path of testhost.dll is {0}", testHostPath); args += " \"" + testHostPath + "\" " + connectionInfo.ToCommandLineOptions(); } else { string message = string.Format(Resources.NoTestHostFileExist, sourcePath); EqtTrace.Verbose("DotnetTestHostmanager: " + message); throw new FileNotFoundException(message); } // Create a additional probing path args with Nuget.Client // args += "--additionalprobingpath xxx" // TODO this may be required in ASP.net, requires validation // Sample command line for the spawned test host // "D:\dd\gh\Microsoft\vstest\tools\dotnet\dotnet.exe" exec // --runtimeconfig G:\tmp\netcore-test\bin\Debug\netcoreapp1.0\netcore-test.runtimeconfig.json // --depsfile G:\tmp\netcore-test\bin\Debug\netcoreapp1.0\netcore-test.deps.json // --additionalprobingpath C:\Users\username\.nuget\packages\ // G:\nuget-package-path\microsoft.testplatform.testhost\version\**\testhost.dll // G:\tmp\netcore-test\bin\Debug\netcoreapp1.0\netcore-test.dll startInfo.Arguments = args; startInfo.EnvironmentVariables = environmentVariables ?? new Dictionary<string, string>(); startInfo.WorkingDirectory = Directory.GetCurrentDirectory(); return startInfo; } /// <inheritdoc/> public IEnumerable<string> GetTestPlatformExtensions(IEnumerable<string> sources) { var sourceDirectory = Path.GetDirectoryName(sources.Single()); if (!string.IsNullOrEmpty(sourceDirectory) && this.fileHelper.DirectoryExists(sourceDirectory)) { return this.fileHelper.EnumerateFiles(sourceDirectory, ".*.TestAdapter.dll", SearchOption.TopDirectoryOnly); } return Enumerable.Empty<string>(); } /// <inheritdoc/> public void RegisterForExitNotification(Action abortCallback) { if (this.testHostProcess != null && abortCallback != null) { this.registeredExitHandler = (sender, args) => abortCallback(); this.testHostProcess.Exited += this.registeredExitHandler; } } /// <inheritdoc/> public void DeregisterForExitNotification() { if (this.testHostProcess != null && this.registeredExitHandler != null) { this.testHostProcess.Exited -= this.registeredExitHandler; } } /// <summary> /// Get full path for the .net host /// </summary> /// <returns>Full path to <c>dotnet</c> executable</returns> /// <remarks>Debuggers require the full path of executable to launch it.</remarks> private string GetDotnetHostFullPath() { char separator = ';'; var dotnetExeName = "dotnet.exe"; // Use semicolon(;) as path separator for windows // colon(:) for Linux and OSX if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { separator = ':'; dotnetExeName = "dotnet"; } var pathString = Environment.GetEnvironmentVariable("PATH"); foreach (string path in pathString.Split(separator)) { string exeFullPath = Path.Combine(path.Trim(), dotnetExeName); if (this.fileHelper.Exists(exeFullPath)) { return exeFullPath; } } EqtTrace.Error("Unable to find path for dotnet host"); return dotnetExeName; } private string GetTestHostPath(string runtimeConfigDevPath, string depsFilePath, string sourceDirectory) { string testHostPackageName = "microsoft.testplatform.testhost"; string testHostPath = string.Empty; if (this.fileHelper.Exists(runtimeConfigDevPath) && this.fileHelper.Exists(depsFilePath)) { EqtTrace.Verbose("DotnetTestHostmanager: Reading file {0} to get path of testhost.dll", depsFilePath); // Get testhost relative path using (var stream = this.fileHelper.GetStream(depsFilePath, FileMode.Open)) { var context = new DependencyContextJsonReader().Read(stream); var testhostPackage = context.RuntimeLibraries.Where(lib => lib.Name.Equals(testHostPackageName, StringComparison.CurrentCultureIgnoreCase)).FirstOrDefault(); if (testhostPackage != null) { foreach (var runtimeAssemblyGroup in testhostPackage.RuntimeAssemblyGroups) { foreach (var path in runtimeAssemblyGroup.AssetPaths) { if (path.EndsWith("testhost.dll", StringComparison.CurrentCultureIgnoreCase)) { testHostPath = path; break; } } } testHostPath = Path.Combine(testhostPackage.Path, testHostPath); EqtTrace.Verbose("DotnetTestHostmanager: Relative path of testhost.dll with respect to package folder is {0}", testHostPath); } } // Get probing path using (StreamReader file = new StreamReader(this.fileHelper.GetStream(runtimeConfigDevPath, FileMode.Open))) using (JsonTextReader reader = new JsonTextReader(file)) { JObject context = (JObject)JToken.ReadFrom(reader); JObject runtimeOptions = (JObject)context.GetValue("runtimeOptions"); JToken additionalProbingPaths = runtimeOptions.GetValue("additionalProbingPaths"); foreach (var x in additionalProbingPaths) { EqtTrace.Verbose("DotnetTestHostmanager: Looking for path {0} in folder {1}", testHostPath, x.ToString()); string testHostFullPath = Path.Combine(x.ToString(), testHostPath); if (this.fileHelper.Exists(testHostFullPath)) { return testHostFullPath; } } } } else { // *.runconfig.dev.json or *.deps.json file does not exist then it may be that user is running test from publish directory. // Get testhost.dll from source directory testHostPath = Path.Combine(sourceDirectory, "testhost.dll"); } return testHostPath; } } public class DefaultTestHostLauncher : ITestHostLauncher { private readonly IProcessHelper processHelper; /// <summary> /// Initializes a new instance of the <see cref="DefaultTestHostLauncher"/> class. /// </summary> public DefaultTestHostLauncher() : this(new ProcessHelper()) { } /// <summary> /// Initializes a new instance of the <see cref="DefaultTestHostLauncher"/> class. /// </summary> /// <param name="processHelper">Process helper instance.</param> internal DefaultTestHostLauncher(IProcessHelper processHelper) { this.processHelper = processHelper; } /// <inheritdoc/> public bool IsDebug => false; /// <inheritdoc/> public int LaunchTestHost(TestProcessStartInfo defaultTestHostStartInfo) { return this.processHelper.LaunchProcess( defaultTestHostStartInfo.FileName, defaultTestHostStartInfo.Arguments, defaultTestHostStartInfo.WorkingDirectory).Id; } } }
1
11,382
Where will this exception get caught ?
microsoft-vstest
.cs
@@ -49,9 +49,10 @@ var mainEtherAccPass = "localaccount" type CliWallet struct { txOpts *bind.TransactOpts Owner common.Address - backend *ethclient.Client + Backend *ethclient.Client identityRegistry registry.IdentityRegistryTransactorSession tokens mysttoken.MystTokenTransactorSession + KS *keystore.KeyStore } // RegisterIdentity registers identity with given data on behalf of user
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package e2e import ( "context" "errors" "math/big" "os" "time" "github.com/MysteriumNetwork/payments/cli/helpers" mysttoken "github.com/MysteriumNetwork/payments/mysttoken/generated" registry "github.com/MysteriumNetwork/payments/registry/generated" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/params" "github.com/mysterium/node/tequilapi/client" ) //addresses should match those deployed in e2e test environment var tokenAddress = common.HexToAddress("0x0222eb28e1651E2A8bAF691179eCfB072457f00c") var paymentsAddress = common.HexToAddress("0x1955141ba8e77a5B56efBa8522034352c94f77Ea") //owner of contracts and main acc with ethereum var mainEtherAcc = common.HexToAddress("0xa754f0d31411d88e46aed455fa79b9fced122497") var mainEtherAccPass = "localaccount" // CliWallet represents operations which can be done with user controlled account type CliWallet struct { txOpts *bind.TransactOpts Owner common.Address backend *ethclient.Client identityRegistry registry.IdentityRegistryTransactorSession tokens mysttoken.MystTokenTransactorSession } // RegisterIdentity registers identity with given data on behalf of user func (wallet *CliWallet) RegisterIdentity(dto client.RegistrationStatusDTO) error { var Pub1 [32]byte var Pub2 [32]byte var S [32]byte var R [32]byte copy(Pub1[:], common.FromHex(dto.PublicKey.Part1)) copy(Pub2[:], common.FromHex(dto.PublicKey.Part2)) copy(R[:], common.FromHex(dto.Signature.R)) copy(S[:], common.FromHex(dto.Signature.S)) tx, err := wallet.identityRegistry.RegisterIdentity(Pub1, Pub2, dto.Signature.V, R, S) if err != nil { return err } return wallet.checkTxResult(tx) } // GiveEther transfers ether to given address func (wallet *CliWallet) GiveEther(address common.Address, amount, units int64) error { amountInWei := new(big.Int).Mul(big.NewInt(amount), big.NewInt(units)) nonce, err := wallet.backend.PendingNonceAt(context.Background(), wallet.Owner) if err != nil { return err } gasPrice, err := wallet.backend.SuggestGasPrice(context.Background()) if err != nil { return err } tx := types.NewTransaction(nonce, address, amountInWei, params.TxGas, gasPrice, nil) signedTx, err := wallet.txOpts.Signer(types.HomesteadSigner{}, wallet.Owner, tx) if err != nil { return err } err = wallet.backend.SendTransaction(context.Background(), signedTx) if err != nil { return err } return wallet.checkTxResult(signedTx) } // GiveTokens gives myst tokens to specified address func (wallet *CliWallet) GiveTokens(address common.Address, amount int64) error { tx, err := wallet.tokens.Mint(address, big.NewInt(amount)) if err != nil { return err } return wallet.checkTxResult(tx) } // ApproveForPayments allows specified amount of ERC20 tokens to be spend by payments contract func (wallet *CliWallet) ApproveForPayments(amount int64) error { tx, err := wallet.tokens.Approve(paymentsAddress, big.NewInt(amount)) if err != nil { return err } return wallet.checkTxResult(tx) } func (wallet *CliWallet) checkTxResult(tx *types.Transaction) error { for i := 0; i < 10; i++ { _, pending, err := wallet.backend.TransactionByHash(context.Background(), tx.Hash()) switch { case err != nil: return err case pending: time.Sleep(1 * time.Second) case !pending: break } } receipt, err := wallet.backend.TransactionReceipt(context.Background(), tx.Hash()) if err != nil { return err } if receipt.Status != 1 { return errors.New("tx marked as failed") } return nil } // NewMainAccWallet initializes wallet with main localnet account private key (owner of ERC20, payments and lots of ether) func NewMainAccWallet(keystoreDir string) (*CliWallet, error) { ks := initKeyStore(keystoreDir) return newCliWallet(mainEtherAcc, mainEtherAccPass, ks) } // NewUserWallet initializes wallet with generated account with specified keystore func NewUserWallet(keystoreDir string) (*CliWallet, error) { ks := initKeyStore(keystoreDir) acc, err := ks.NewAccount("") if err != nil { return nil, err } return newCliWallet(acc.Address, "", ks) } func newCliWallet(owner common.Address, passphrase string, ks *keystore.KeyStore) (*CliWallet, error) { client, err := newEthClient() if err != nil { return nil, err } ownerAcc := accounts.Account{Address: owner} err = ks.Unlock(ownerAcc, passphrase) if err != nil { return nil, err } transactor := helpers.CreateNewKeystoreTransactor(ks, &ownerAcc) tokensContract, err := mysttoken.NewMystTokenTransactor(tokenAddress, client) paymentsContract, err := registry.NewIdentityRegistryTransactor(paymentsAddress, client) if err != nil { return nil, err } return &CliWallet{ txOpts: transactor, Owner: owner, backend: client, tokens: mysttoken.MystTokenTransactorSession{ Contract: tokensContract, TransactOpts: *transactor, }, identityRegistry: registry.IdentityRegistryTransactorSession{ Contract: paymentsContract, TransactOpts: *transactor, }, }, nil } func initKeyStore(path string) *keystore.KeyStore { return keystore.NewKeyStore(path, keystore.StandardScryptN, keystore.StandardScryptP) } func registerIdentity(registrationData client.RegistrationStatusDTO) error { defer os.RemoveAll("testdataoutput") //master account - owner of conctracts, and can issue tokens masterAccWallet, err := NewMainAccWallet("../bin/localnet/account") if err != nil { return err } //random user userWallet, err := NewUserWallet("testdataoutput") if err != nil { return err } //user gets some ethers from master acc err = masterAccWallet.GiveEther(userWallet.Owner, 1, params.Ether) if err != nil { return err } //user buys some tokens in exchange err = masterAccWallet.GiveTokens(userWallet.Owner, 1000) if err != nil { return err } //user allows payments to take some tokens err = userWallet.ApproveForPayments(1000) if err != nil { return err } //user registers identity err = userWallet.RegisterIdentity(registrationData) return err }
1
11,642
Why this field is made public?
mysteriumnetwork-node
go
@@ -35,16 +35,16 @@ #include "proj_internal.h" #include "proj_internal.h" -static int is_nodata(float value) +static int is_nodata(float value, double vmultiplier) { /* nodata? */ /* GTX official nodata value if -88.88880f, but some grids also */ /* use other big values for nodata (e.g naptrans2008.gtx has */ /* nodata values like -2147479936), so test them too */ - return value > 1000 || value < -1000 || value == -88.88880f; + return value * vmultiplier > 1000 || value * vmultiplier < -1000 || value == -88.88880f; } -static double read_vgrid_value( PJ *defn, PJ_LP input, int *gridlist_count_p, PJ_GRIDINFO **tables, struct CTABLE *ct) { +static double read_vgrid_value( PJ *defn, PJ_LP input, double vmultiplier, int *gridlist_count_p, PJ_GRIDINFO **tables, struct CTABLE *ct) { int itable = 0; double value = HUGE_VAL; double grid_x, grid_y;
1
/****************************************************************************** * Project: PROJ.4 * Purpose: Apply vertical datum shifts based on grid shift files, normally * geoid grids mapping WGS84 to NAVD88 or something similar. * Author: Frank Warmerdam, [email protected] * ****************************************************************************** * Copyright (c) 2010, Frank Warmerdam <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. *****************************************************************************/ #define PJ_LIB__ #include <stdio.h> #include <string.h> #include "proj_math.h" #include "proj_internal.h" #include "proj_internal.h" static int is_nodata(float value) { /* nodata? */ /* GTX official nodata value if -88.88880f, but some grids also */ /* use other big values for nodata (e.g naptrans2008.gtx has */ /* nodata values like -2147479936), so test them too */ return value > 1000 || value < -1000 || value == -88.88880f; } static double read_vgrid_value( PJ *defn, PJ_LP input, int *gridlist_count_p, PJ_GRIDINFO **tables, struct CTABLE *ct) { int itable = 0; double value = HUGE_VAL; double grid_x, grid_y; long grid_ix, grid_iy; long grid_ix2, grid_iy2; float *cvs; /* do not deal with NaN coordinates */ /* cppcheck-suppress duplicateExpression */ if( isnan(input.phi) || isnan(input.lam) ) itable = *gridlist_count_p; /* keep trying till we find a table that works */ for ( ; itable < *gridlist_count_p; itable++ ) { PJ_GRIDINFO *gi = tables[itable]; ct = gi->ct; /* skip tables that don't match our point at all. */ if( ct->ll.phi > input.phi || ct->ll.lam > input.lam || ct->ll.phi + (ct->lim.phi-1) * ct->del.phi < input.phi || ct->ll.lam + (ct->lim.lam-1) * ct->del.lam < input.lam ) continue; /* If we have child nodes, check to see if any of them apply. */ while( gi->child != nullptr ) { PJ_GRIDINFO *child; for( child = gi->child; child != nullptr; child = child->next ) { struct CTABLE *ct1 = child->ct; if( ct1->ll.phi > input.phi || ct1->ll.lam > input.lam || ct1->ll.phi+(ct1->lim.phi-1)*ct1->del.phi < input.phi || ct1->ll.lam+(ct1->lim.lam-1)*ct1->del.lam < input.lam) continue; break; } /* we didn't find a more refined child node to use, so go with current grid */ if( child == nullptr ) { break; } /* Otherwise let's try for childrens children .. */ gi = child; ct = child->ct; } /* load the grid shift info if we don't have it. */ if( ct->cvs == nullptr && !pj_gridinfo_load( pj_get_ctx(defn), gi ) ) { pj_ctx_set_errno( defn->ctx, PJD_ERR_FAILED_TO_LOAD_GRID ); return PJD_ERR_FAILED_TO_LOAD_GRID; } /* Interpolation a location within the grid */ grid_x = (input.lam - ct->ll.lam) / ct->del.lam; grid_y = (input.phi - ct->ll.phi) / ct->del.phi; grid_ix = lround(floor(grid_x)); grid_iy = lround(floor(grid_y)); grid_x -= grid_ix; grid_y -= grid_iy; grid_ix2 = grid_ix + 1; if( grid_ix2 >= ct->lim.lam ) grid_ix2 = ct->lim.lam - 1; grid_iy2 = grid_iy + 1; if( grid_iy2 >= ct->lim.phi ) grid_iy2 = ct->lim.phi - 1; cvs = (float *) ct->cvs; { float value_a = cvs[grid_ix + grid_iy * ct->lim.lam]; float value_b = cvs[grid_ix2 + grid_iy * ct->lim.lam]; float value_c = cvs[grid_ix + grid_iy2 * ct->lim.lam]; float value_d = cvs[grid_ix2 + grid_iy2 * ct->lim.lam]; double total_weight = 0.0; int n_weights = 0; value = 0.0f; if( !is_nodata(value_a) ) { double weight = (1.0-grid_x) * (1.0-grid_y); value += value_a * weight; total_weight += weight; n_weights ++; } if( !is_nodata(value_b) ) { double weight = (grid_x) * (1.0-grid_y); value += value_b * weight; total_weight += weight; n_weights ++; } if( !is_nodata(value_c) ) { double weight = (1.0-grid_x) * (grid_y); value += value_c * weight; total_weight += weight; n_weights ++; } if( !is_nodata(value_d) ) { double weight = (grid_x) * (grid_y); value += value_d * weight; total_weight += weight; n_weights ++; } if( n_weights == 0 ) value = HUGE_VAL; else if( n_weights != 4 ) value /= total_weight; } } return value; } /************************************************************************/ /* pj_apply_vgridshift() */ /* */ /* This implementation takes uses the gridlist from a coordinate */ /* system definition. If the gridlist has not yet been */ /* populated in the coordinate system definition we set it up */ /* now. */ /************************************************************************/ int pj_apply_vgridshift( PJ *defn, const char *listname, PJ_GRIDINFO ***gridlist_p, int *gridlist_count_p, int inverse, long point_count, int point_offset, double *x, double *y, double *z ) { int i; static int debug_count = 0; PJ_GRIDINFO **tables; struct CTABLE ct; if( *gridlist_p == nullptr ) { *gridlist_p = pj_gridlist_from_nadgrids( pj_get_ctx(defn), pj_param(defn->ctx,defn->params,listname).s, gridlist_count_p ); if( *gridlist_p == nullptr || *gridlist_count_p == 0 ) return defn->ctx->last_errno; } if( *gridlist_count_p == 0 ) { pj_ctx_set_errno( defn->ctx, PJD_ERR_FAILED_TO_LOAD_GRID); return PJD_ERR_FAILED_TO_LOAD_GRID; } tables = *gridlist_p; defn->ctx->last_errno = 0; for( i = 0; i < point_count; i++ ) { double value; long io = i * point_offset; PJ_LP input; input.phi = y[io]; input.lam = x[io]; value = read_vgrid_value(defn, input, gridlist_count_p, tables, &ct); if( inverse ) z[io] -= value; else z[io] += value; if( value != HUGE_VAL ) { if( debug_count++ < 20 ) { proj_log_trace(defn, "pj_apply_gridshift(): used %s", ct.id); break; } } if( value == HUGE_VAL ) { int itable; char gridlist[3000]; proj_log_debug(defn, "pj_apply_vgridshift(): failed to find a grid shift table for\n" " location (%.7fdW,%.7fdN)", x[io] * RAD_TO_DEG, y[io] * RAD_TO_DEG ); gridlist[0] = '\0'; for( itable = 0; itable < *gridlist_count_p; itable++ ) { PJ_GRIDINFO *gi = tables[itable]; if( strlen(gridlist) + strlen(gi->gridname) > sizeof(gridlist)-100 ) { strcat( gridlist, "..." ); break; } if( itable == 0 ) sprintf( gridlist, " tried: %s", gi->gridname ); else sprintf( gridlist+strlen(gridlist), ",%s", gi->gridname ); } proj_log_debug(defn, "%s", gridlist); pj_ctx_set_errno( defn->ctx, PJD_ERR_GRID_AREA ); return PJD_ERR_GRID_AREA; } } return 0; } /**********************************************/ int proj_vgrid_init(PJ* P, const char *grids) { /********************************************** Initizalize and populate gridlist. Takes a PJ-object and the plus-parameter name that is used in the proj-string to specify the grids to load, e.g. "+grids". The + should be left out here. Returns the number of loaded grids. ***********************************************/ /* prepend "s" to the "grids" string to allow usage with pj_param */ char *sgrids = (char *) pj_malloc( (strlen(grids)+1+1) *sizeof(char) ); sprintf(sgrids, "%s%s", "s", grids); if (P->vgridlist_geoid == nullptr) { P->vgridlist_geoid = pj_gridlist_from_nadgrids( P->ctx, pj_param(P->ctx, P->params, sgrids).s, &(P->vgridlist_geoid_count) ); if( P->vgridlist_geoid == nullptr || P->vgridlist_geoid_count == 0 ) { pj_dealloc(sgrids); return 0; } } if (P->vgridlist_geoid_count == 0) { proj_errno_set(P, PJD_ERR_FAILED_TO_LOAD_GRID); } pj_dealloc(sgrids); return P->vgridlist_geoid_count; } /***********************************************/ double proj_vgrid_value(PJ *P, PJ_LP lp){ /*********************************************** Read grid value at position lp in grids loaded with proj_grid_init. Returns the grid value of the given coordinate. ************************************************/ struct CTABLE used_grid; double value; memset(&used_grid, 0, sizeof(struct CTABLE)); value = read_vgrid_value(P, lp, &(P->vgridlist_geoid_count), P->vgridlist_geoid, &used_grid); proj_log_trace(P, "proj_vgrid_value: (%f, %f) = %f", lp.lam*RAD_TO_DEG, lp.phi*RAD_TO_DEG, value); return value; }
1
10,244
Is the `vmultiplier` only used here for checking if a grid value is nodata?
OSGeo-PROJ
cpp
@@ -239,7 +239,7 @@ func signTestCert(key crypto.Signer) *x509.Certificate { SerialNumber: serialNumber, SignatureAlgorithm: x509.SHA256WithRSA, Subject: pkix.Name{ - Organization: []string{defaultOrganization}, + Organization: []string{"cert-manager"}, CommonName: commonName, }, NotBefore: time.Now(),
1
/* Copyright 2019 The Jetstack cert-manager contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package pki import ( "crypto" "crypto/ecdsa" "crypto/elliptic" "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "strings" "testing" "time" v1 "github.com/jetstack/cert-manager/pkg/apis/certmanager/v1" ) func buildCertificateWithKeyParams(keyAlgo v1.PrivateKeyAlgorithm, keySize int) *v1.Certificate { return &v1.Certificate{ Spec: v1.CertificateSpec{ CommonName: "test", DNSNames: []string{"test.test"}, PrivateKey: &v1.CertificatePrivateKey{ Algorithm: keyAlgo, Size: keySize, }, }, } } func ecCurveForKeySize(keySize int) (elliptic.Curve, error) { switch keySize { case 0, ECCurve256: return elliptic.P256(), nil case ECCurve384: return elliptic.P384(), nil case ECCurve521: return elliptic.P521(), nil default: return nil, fmt.Errorf("unknown ecdsa key size specified: %d", keySize) } } func TestGeneratePrivateKeyForCertificate(t *testing.T) { type testT struct { name string keyAlgo v1.PrivateKeyAlgorithm keySize int expectErr bool expectErrStr string } tests := []testT{ { name: "rsa key with weak keysize (< 2048)", keyAlgo: v1.RSAKeyAlgorithm, keySize: 1024, expectErr: true, expectErrStr: "weak rsa key size specified", }, { name: "rsa key with too big keysize (> 8192)", keyAlgo: v1.RSAKeyAlgorithm, keySize: 8196, expectErr: true, expectErrStr: "rsa key size specified too big", }, { name: "ecdsa key with unsupported keysize", keyAlgo: v1.ECDSAKeyAlgorithm, keySize: 100, expectErr: true, expectErrStr: "unsupported ecdsa key size specified", }, { name: "unsupported key algo specified", keyAlgo: v1.PrivateKeyAlgorithm("blahblah"), keySize: 256, expectErr: true, expectErrStr: "unsupported private key algorithm specified", }, { name: "rsa key with keysize 2048", keyAlgo: v1.RSAKeyAlgorithm, keySize: 2048, expectErr: false, }, { name: "rsa key with keysize 4096", keyAlgo: v1.RSAKeyAlgorithm, keySize: 4096, expectErr: false, }, { name: "ecdsa key with keysize 256", keyAlgo: v1.ECDSAKeyAlgorithm, keySize: 256, expectErr: false, }, { name: "ecdsa key with keysize 384", keyAlgo: v1.ECDSAKeyAlgorithm, keySize: 384, expectErr: false, }, { name: "ecdsa key with keysize 521", keyAlgo: v1.ECDSAKeyAlgorithm, keySize: 521, expectErr: false, }, { name: "valid key size with key algorithm not specified", keyAlgo: v1.PrivateKeyAlgorithm(""), keySize: 2048, expectErr: false, }, { name: "rsa with keysize not specified", keyAlgo: v1.RSAKeyAlgorithm, expectErr: false, }, { name: "ecdsa with keysize not specified", keyAlgo: v1.ECDSAKeyAlgorithm, expectErr: false, }, } testFn := func(test testT) func(*testing.T) { return func(t *testing.T) { privateKey, err := GeneratePrivateKeyForCertificate(buildCertificateWithKeyParams(test.keyAlgo, test.keySize)) if test.expectErr { if err == nil { t.Error("expected err, but got no error") return } if !strings.Contains(err.Error(), test.expectErrStr) { t.Errorf("expected err string to match: '%s', got: '%s'", test.expectErrStr, err.Error()) return } } if !test.expectErr { if err != nil { t.Errorf("expected no err, but got '%q'", err) return } if test.keyAlgo == "rsa" { // For rsa algorithm, if keysize is not provided, the default of 2048 will be used expectedRsaKeySize := 2048 if test.keySize != 0 { expectedRsaKeySize = test.keySize } key, ok := privateKey.(*rsa.PrivateKey) if !ok { t.Errorf("expected rsa private key, but got %T", privateKey) return } actualKeySize := key.N.BitLen() if expectedRsaKeySize != actualKeySize { t.Errorf("expected %d, but got %d", expectedRsaKeySize, actualKeySize) return } } if test.keyAlgo == "ecdsa" { // For ecdsa algorithm, if keysize is not provided, the default of 256 will be used expectedEcdsaKeySize := ECCurve256 if test.keySize != 0 { expectedEcdsaKeySize = test.keySize } key, ok := privateKey.(*ecdsa.PrivateKey) if !ok { t.Errorf("expected ecdsa private key, but got %T", privateKey) return } actualKeySize := key.Curve.Params().BitSize if expectedEcdsaKeySize != actualKeySize { t.Errorf("expected %d but got %d", expectedEcdsaKeySize, actualKeySize) return } curve, err := ecCurveForKeySize(test.keySize) if err != nil { t.Errorf(err.Error()) return } if !curve.IsOnCurve(key.PublicKey.X, key.PublicKey.Y) { t.Error("expected key to be on specified curve") return } } } } } for _, test := range tests { t.Run(test.name, testFn(test)) } } func signTestCert(key crypto.Signer) *x509.Certificate { commonName := "testingcert" serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { panic(fmt.Errorf("failed to generate serial number: %s", err.Error())) } template := &x509.Certificate{ Version: 3, BasicConstraintsValid: true, SerialNumber: serialNumber, SignatureAlgorithm: x509.SHA256WithRSA, Subject: pkix.Name{ Organization: []string{defaultOrganization}, CommonName: commonName, }, NotBefore: time.Now(), NotAfter: time.Now().Add(v1.DefaultCertificateDuration), // see http://golang.org/pkg/crypto/x509/#KeyUsage KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment, } _, crt, err := SignCertificate(template, template, key.Public(), key) if err != nil { panic(fmt.Errorf("error signing test cert: %v", err)) } return crt } func TestPublicKeyMatchesCertificate(t *testing.T) { privKey1, err := GenerateRSAPrivateKey(2048) if err != nil { t.Errorf("error generating private key: %v", err) } privKey2, err := GenerateRSAPrivateKey(2048) if err != nil { t.Errorf("error generating private key: %v", err) } testCert1 := signTestCert(privKey1) testCert2 := signTestCert(privKey2) matches, err := PublicKeyMatchesCertificate(privKey1.Public(), testCert1) if err != nil { t.Errorf("expected no error, but got: %v", err) } if !matches { t.Errorf("expected private key to match certificate, but it did not") } matches, err = PublicKeyMatchesCertificate(privKey1.Public(), testCert2) if err != nil { t.Errorf("expected no error, but got: %v", err) } if matches { t.Errorf("expected private key to not match certificate, but it did") } } func TestPublicKeyMatchesCertificateRequest(t *testing.T) { privKey1, err := GenerateRSAPrivateKey(2048) if err != nil { t.Errorf("error generating private key: %v", err) } privKey2, err := GenerateRSAPrivateKey(2048) if err != nil { t.Errorf("error generating private key: %v", err) } template := &x509.CertificateRequest{ Version: 3, // SignatureAlgorithm: sigAlgo, Subject: pkix.Name{ CommonName: "cn", }, } csr1, err := x509.CreateCertificateRequest(rand.Reader, template, privKey1) if err != nil { t.Errorf("error generating csr1: %v", err) } csr2, err := x509.CreateCertificateRequest(rand.Reader, template, privKey2) if err != nil { t.Errorf("error generating csr2: %v", err) } parsedCSR1, err := x509.ParseCertificateRequest(csr1) if err != nil { t.Errorf("error parsing csr1: %v", err) } parsedCSR2, err := x509.ParseCertificateRequest(csr2) if err != nil { t.Errorf("error parsing csr2: %v", err) } matches, err := PublicKeyMatchesCSR(privKey1.Public(), parsedCSR1) if err != nil { t.Errorf("expected no error, but got: %v", err) } if !matches { t.Errorf("expected private key to match certificate, but it did not") } matches, err = PublicKeyMatchesCSR(privKey1.Public(), parsedCSR2) if err != nil { t.Errorf("expected no error, but got: %v", err) } if matches { t.Errorf("expected private key to not match certificate, but it did") } } func TestPrivateKeyEncodings(t *testing.T) { type testT struct { name string key []byte keyEncoding v1.PrivateKeyEncoding expectErr bool expectErrStr string } const privKey = ` -----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC0sETnD5CNh/VZ K3F3snYlD4t39YL30s56SiNmAOftZEvPkDqMzZh11/DlUggR9kQh/SpIQr/Gxg3o TVG22AnjJynxlw1ikMNUa/Emq2of+MrEpn9oBKl+qproA07UrcVYuMrcPd7qRAco 2gwByIAJ2uIBR6OUO8bjwUhXlZ4Ui8ZqVwZD5ja+IlmlHpH5UDgzE7U2L7PfCHWZ wTc2rtlgj6U2qQrxTOmHsiwJ6O8wjoR84XUtzPCAYHuh8pEbRZwDA/pVjFpA9+7w JqKShcykSn9EQdCEuEG2oYP22AZ++X8kL7hQiynDWopYXB134wHlz5f5arN7zpmo lzWQj/SRAgMBAAECggEAPn3ANtGxQuHEvyRBSA6WwdaQe9qTgzaLZswBViP2Eqpf ddgCB/SLSCZ5EDbAx8WOZtryJq+/N/YDlVY4bq5lLQS8guulG3PJuobijmc2evxG 1KBo7AbAwCgtDxUlzHSVDkxLDMTxNcB1YXGYOX2omr/y7lJihr/t15Nfe7spQx1H p/lTZO01LMa2sau1K3UyUO7b/fHmfiTDru3Ulf7MvXfdRM0CcUkdVVCMsNUmuQrV aXJOuWHxlnxEHvy//Kjio1Efq52QCg5v5mNrliV4S+Fn/4rKKg1yEuMHQXL1Db0V VU6tsEAWhY2ynab2My0Hzyj++lxP4vCau7s2HZrEgQKBgQDaC4DbwmAFIB4t/wq4 tfFP6yWQ1PHdHiJWm0VBooiD9Tg69Ar2WlmyEKD1DoEo6hA3xgKYraV9CGoDsIuz Hx5w/sCP0DdcR2COK5JcPwo6QjzCPWvqSKFU2YRH9CniBeNZ/y8bRY2Pxxf+UfZM oxBnIgBR7fsZ5ahcIT/qXEzhuQKBgQDUJBgnxRSu9pawJqBdX1qvjwymNjX+3vNW aHoROr/z1Qz58NHobM3exrePMtznKaE3i52wv9jmAxbmOvyTSkc0/6hsKVYI+uVm ipQd62GRJeFUeXznBA0HpoEe9Kw3fCg0QInDEnhXQwdLXD2E0XFqfYEAY0UEX2ky kbuFvs/1mQKBgHiDLx9HGUb773JAqi5Y8RzYvJSF/X/W7eesPeT+7jDw0blTtNNg MH8ITc3jzGNOqtQIrtqv7u1iPyX259+CbZLKWqehz1dDZAxv7J7rgL2eanJ4/DIr HnAFIOWb/6Ia2wOc1O5fzNQQmCRKLLVC0wgNdNvTiptPoXP6NJdVmHCRAoGAN9js twOOJ2VpVCRHEW257P/Gv7cDzf4Zp4THpeOGhwVubho02HBUXjKIPl2QhBaUh4/s yowm6J6ll6Stu4TRbCVlzFuxd5m9bJpM1feSbui/AkMdW5/YYkw2L9UPxWedGexn mAAzyB0wPWmiFGYi6nrxzA1WLQmFIzf0WwhZrPkCgYAfxHLDdjSfDp2yV6suFFVw wr/9z3Hed+XCQvl+SSWNB3x9ge4q3mXh+XmYkVL144MBO2+KCENZHJHZsHPKR9Dl OfVRvbdkFnyPY03IcQPpOifT2W0ydTaP02xqqeZ1s9ZfyQWNECt+E6SQwGtgkC3n O7WnDn8nuLFdW+NzzbIrTw== -----END PRIVATE KEY-----` privateKeyBytes := []byte(privKey) tests := []testT{ { name: "rsa 2048 private key with empty key encoding", key: privateKeyBytes, keyEncoding: v1.PKCS1, expectErr: false, }, { name: "rsa 2048 private key with pkcs1 key encoding", key: privateKeyBytes, keyEncoding: v1.PKCS1, expectErr: false, }, { name: "rsa 2048 private key with pkcs8 key encoding", key: privateKeyBytes, keyEncoding: v1.PKCS8, expectErr: false, }, } testFn := func(test testT) func(*testing.T) { return func(t *testing.T) { block, _ := pem.Decode(privateKeyBytes) decodedKey, err := x509.ParsePKCS8PrivateKey(block.Bytes) if err != nil { t.Fatal(err) } encodedKey, err := EncodePrivateKey(decodedKey, test.keyEncoding) if test.expectErr { if err == nil { t.Error("expected err, but got no error") return } if !strings.Contains(err.Error(), test.expectErrStr) { t.Errorf("expected err string to match: '%s', got: '%s'", test.expectErrStr, err.Error()) return } } if !test.expectErr { if err != nil { t.Errorf("expected no err, but got '%q'", err) return } expectedEncoding := test.keyEncoding actualEncoding := v1.PrivateKeyEncoding("") block, _ := pem.Decode(encodedKey) switch block.Type { case "PRIVATE KEY": actualEncoding = v1.PKCS8 case "RSA PRIVATE KEY": actualEncoding = v1.PKCS1 case "EC PRIVATE KEY": actualEncoding = v1.PKCS1 default: err := "unknown key encoding for private key" t.Errorf("%s", err) } if expectedEncoding != actualEncoding { t.Errorf("expected %s, but got %s", expectedEncoding, actualEncoding) return } } } } for _, test := range tests { t.Run(test.name, testFn(test)) } }
1
23,220
Is this deliberate? If so, why?
jetstack-cert-manager
go
@@ -5,7 +5,7 @@ from mitmproxy import exceptions from mitmproxy.net import tls as net_tls from mitmproxy.proxy.protocol import base -# taken from https://testssl.sh/openssl-rfc.mappping.html +# taken from https://testssl.sh/openssl-rfc.mapping.html CIPHER_ID_NAME_MAP = { 0x00: 'NULL-MD5', 0x01: 'NULL-MD5',
1
from typing import Optional # noqa from typing import Union from mitmproxy import exceptions from mitmproxy.net import tls as net_tls from mitmproxy.proxy.protocol import base # taken from https://testssl.sh/openssl-rfc.mappping.html CIPHER_ID_NAME_MAP = { 0x00: 'NULL-MD5', 0x01: 'NULL-MD5', 0x02: 'NULL-SHA', 0x03: 'EXP-RC4-MD5', 0x04: 'RC4-MD5', 0x05: 'RC4-SHA', 0x06: 'EXP-RC2-CBC-MD5', 0x07: 'IDEA-CBC-SHA', 0x08: 'EXP-DES-CBC-SHA', 0x09: 'DES-CBC-SHA', 0x0a: 'DES-CBC3-SHA', 0x0b: 'EXP-DH-DSS-DES-CBC-SHA', 0x0c: 'DH-DSS-DES-CBC-SHA', 0x0d: 'DH-DSS-DES-CBC3-SHA', 0x0e: 'EXP-DH-RSA-DES-CBC-SHA', 0x0f: 'DH-RSA-DES-CBC-SHA', 0x10: 'DH-RSA-DES-CBC3-SHA', 0x11: 'EXP-EDH-DSS-DES-CBC-SHA', 0x12: 'EDH-DSS-DES-CBC-SHA', 0x13: 'EDH-DSS-DES-CBC3-SHA', 0x14: 'EXP-EDH-RSA-DES-CBC-SHA', 0x15: 'EDH-RSA-DES-CBC-SHA', 0x16: 'EDH-RSA-DES-CBC3-SHA', 0x17: 'EXP-ADH-RC4-MD5', 0x18: 'ADH-RC4-MD5', 0x19: 'EXP-ADH-DES-CBC-SHA', 0x1a: 'ADH-DES-CBC-SHA', 0x1b: 'ADH-DES-CBC3-SHA', # 0x1c: , # 0x1d: , 0x1e: 'KRB5-DES-CBC-SHA', 0x1f: 'KRB5-DES-CBC3-SHA', 0x20: 'KRB5-RC4-SHA', 0x21: 'KRB5-IDEA-CBC-SHA', 0x22: 'KRB5-DES-CBC-MD5', 0x23: 'KRB5-DES-CBC3-MD5', 0x24: 'KRB5-RC4-MD5', 0x25: 'KRB5-IDEA-CBC-MD5', 0x26: 'EXP-KRB5-DES-CBC-SHA', 0x27: 'EXP-KRB5-RC2-CBC-SHA', 0x28: 'EXP-KRB5-RC4-SHA', 0x29: 'EXP-KRB5-DES-CBC-MD5', 0x2a: 'EXP-KRB5-RC2-CBC-MD5', 0x2b: 'EXP-KRB5-RC4-MD5', 0x2f: 'AES128-SHA', 0x30: 'DH-DSS-AES128-SHA', 0x31: 'DH-RSA-AES128-SHA', 0x32: 'DHE-DSS-AES128-SHA', 0x33: 'DHE-RSA-AES128-SHA', 0x34: 'ADH-AES128-SHA', 0x35: 'AES256-SHA', 0x36: 'DH-DSS-AES256-SHA', 0x37: 'DH-RSA-AES256-SHA', 0x38: 'DHE-DSS-AES256-SHA', 0x39: 'DHE-RSA-AES256-SHA', 0x3a: 'ADH-AES256-SHA', 0x3b: 'NULL-SHA256', 0x3c: 'AES128-SHA256', 0x3d: 'AES256-SHA256', 0x3e: 'DH-DSS-AES128-SHA256', 0x3f: 'DH-RSA-AES128-SHA256', 0x40: 'DHE-DSS-AES128-SHA256', 0x41: 'CAMELLIA128-SHA', 0x42: 'DH-DSS-CAMELLIA128-SHA', 0x43: 'DH-RSA-CAMELLIA128-SHA', 0x44: 'DHE-DSS-CAMELLIA128-SHA', 0x45: 'DHE-RSA-CAMELLIA128-SHA', 0x46: 'ADH-CAMELLIA128-SHA', 0x62: 'EXP1024-DES-CBC-SHA', 0x63: 'EXP1024-DHE-DSS-DES-CBC-SHA', 0x64: 'EXP1024-RC4-SHA', 0x65: 'EXP1024-DHE-DSS-RC4-SHA', 0x66: 'DHE-DSS-RC4-SHA', 0x67: 'DHE-RSA-AES128-SHA256', 0x68: 'DH-DSS-AES256-SHA256', 0x69: 'DH-RSA-AES256-SHA256', 0x6a: 'DHE-DSS-AES256-SHA256', 0x6b: 'DHE-RSA-AES256-SHA256', 0x6c: 'ADH-AES128-SHA256', 0x6d: 'ADH-AES256-SHA256', 0x80: 'GOST94-GOST89-GOST89', 0x81: 'GOST2001-GOST89-GOST89', 0x82: 'GOST94-NULL-GOST94', 0x83: 'GOST2001-GOST89-GOST89', 0x84: 'CAMELLIA256-SHA', 0x85: 'DH-DSS-CAMELLIA256-SHA', 0x86: 'DH-RSA-CAMELLIA256-SHA', 0x87: 'DHE-DSS-CAMELLIA256-SHA', 0x88: 'DHE-RSA-CAMELLIA256-SHA', 0x89: 'ADH-CAMELLIA256-SHA', 0x8a: 'PSK-RC4-SHA', 0x8b: 'PSK-3DES-EDE-CBC-SHA', 0x8c: 'PSK-AES128-CBC-SHA', 0x8d: 'PSK-AES256-CBC-SHA', # 0x8e: , # 0x8f: , # 0x90: , # 0x91: , # 0x92: , # 0x93: , # 0x94: , # 0x95: , 0x96: 'SEED-SHA', 0x97: 'DH-DSS-SEED-SHA', 0x98: 'DH-RSA-SEED-SHA', 0x99: 'DHE-DSS-SEED-SHA', 0x9a: 'DHE-RSA-SEED-SHA', 0x9b: 'ADH-SEED-SHA', 0x9c: 'AES128-GCM-SHA256', 0x9d: 'AES256-GCM-SHA384', 0x9e: 'DHE-RSA-AES128-GCM-SHA256', 0x9f: 'DHE-RSA-AES256-GCM-SHA384', 0xa0: 'DH-RSA-AES128-GCM-SHA256', 0xa1: 'DH-RSA-AES256-GCM-SHA384', 0xa2: 'DHE-DSS-AES128-GCM-SHA256', 0xa3: 'DHE-DSS-AES256-GCM-SHA384', 0xa4: 'DH-DSS-AES128-GCM-SHA256', 0xa5: 'DH-DSS-AES256-GCM-SHA384', 0xa6: 'ADH-AES128-GCM-SHA256', 0xa7: 'ADH-AES256-GCM-SHA384', 0x5600: 'TLS_FALLBACK_SCSV', 0xc001: 'ECDH-ECDSA-NULL-SHA', 0xc002: 'ECDH-ECDSA-RC4-SHA', 0xc003: 'ECDH-ECDSA-DES-CBC3-SHA', 0xc004: 'ECDH-ECDSA-AES128-SHA', 0xc005: 'ECDH-ECDSA-AES256-SHA', 0xc006: 'ECDHE-ECDSA-NULL-SHA', 0xc007: 'ECDHE-ECDSA-RC4-SHA', 0xc008: 'ECDHE-ECDSA-DES-CBC3-SHA', 0xc009: 'ECDHE-ECDSA-AES128-SHA', 0xc00a: 'ECDHE-ECDSA-AES256-SHA', 0xc00b: 'ECDH-RSA-NULL-SHA', 0xc00c: 'ECDH-RSA-RC4-SHA', 0xc00d: 'ECDH-RSA-DES-CBC3-SHA', 0xc00e: 'ECDH-RSA-AES128-SHA', 0xc00f: 'ECDH-RSA-AES256-SHA', 0xc010: 'ECDHE-RSA-NULL-SHA', 0xc011: 'ECDHE-RSA-RC4-SHA', 0xc012: 'ECDHE-RSA-DES-CBC3-SHA', 0xc013: 'ECDHE-RSA-AES128-SHA', 0xc014: 'ECDHE-RSA-AES256-SHA', 0xc015: 'AECDH-NULL-SHA', 0xc016: 'AECDH-RC4-SHA', 0xc017: 'AECDH-DES-CBC3-SHA', 0xc018: 'AECDH-AES128-SHA', 0xc019: 'AECDH-AES256-SHA', 0xc01a: 'SRP-3DES-EDE-CBC-SHA', 0xc01b: 'SRP-RSA-3DES-EDE-CBC-SHA', 0xc01c: 'SRP-DSS-3DES-EDE-CBC-SHA', 0xc01d: 'SRP-AES-128-CBC-SHA', 0xc01e: 'SRP-RSA-AES-128-CBC-SHA', 0xc01f: 'SRP-DSS-AES-128-CBC-SHA', 0xc020: 'SRP-AES-256-CBC-SHA', 0xc021: 'SRP-RSA-AES-256-CBC-SHA', 0xc022: 'SRP-DSS-AES-256-CBC-SHA', 0xc023: 'ECDHE-ECDSA-AES128-SHA256', 0xc024: 'ECDHE-ECDSA-AES256-SHA384', 0xc025: 'ECDH-ECDSA-AES128-SHA256', 0xc026: 'ECDH-ECDSA-AES256-SHA384', 0xc027: 'ECDHE-RSA-AES128-SHA256', 0xc028: 'ECDHE-RSA-AES256-SHA384', 0xc029: 'ECDH-RSA-AES128-SHA256', 0xc02a: 'ECDH-RSA-AES256-SHA384', 0xc02b: 'ECDHE-ECDSA-AES128-GCM-SHA256', 0xc02c: 'ECDHE-ECDSA-AES256-GCM-SHA384', 0xc02d: 'ECDH-ECDSA-AES128-GCM-SHA256', 0xc02e: 'ECDH-ECDSA-AES256-GCM-SHA384', 0xc02f: 'ECDHE-RSA-AES128-GCM-SHA256', 0xc030: 'ECDHE-RSA-AES256-GCM-SHA384', 0xc031: 'ECDH-RSA-AES128-GCM-SHA256', 0xc032: 'ECDH-RSA-AES256-GCM-SHA384', 0xcc13: 'ECDHE-RSA-CHACHA20-POLY1305', 0xcc14: 'ECDHE-ECDSA-CHACHA20-POLY1305', 0xcc15: 'DHE-RSA-CHACHA20-POLY1305', 0xff00: 'GOST-MD5', 0xff01: 'GOST-GOST94', 0xff02: 'GOST-GOST89MAC', 0xff03: 'GOST-GOST89STREAM', 0x010080: 'RC4-MD5', 0x020080: 'EXP-RC4-MD5', 0x030080: 'RC2-CBC-MD5', 0x040080: 'EXP-RC2-CBC-MD5', 0x050080: 'IDEA-CBC-MD5', 0x060040: 'DES-CBC-MD5', 0x0700c0: 'DES-CBC3-MD5', 0x080080: 'RC4-64-MD5', } # We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default. # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=apache-2.2.15&openssl=1.0.2&hsts=yes&profile=old DEFAULT_CLIENT_CIPHERS = ( "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:" "ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:" "ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:" "ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:" "DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:" "DHE-RSA-AES256-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:" "AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:DES-CBC3-SHA:" "HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:" "!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA" ) class TlsLayer(base.Layer): """ The TLS layer implements transparent TLS connections. It exposes the following API to child layers: - :py:meth:`set_server_tls` to modify TLS settings for the server connection. - :py:attr:`server_tls`, :py:attr:`server_sni` as read-only attributes describing the current TLS settings for the server connection. """ def __init__(self, ctx, client_tls, server_tls, custom_server_sni=None): super().__init__(ctx) self._client_tls = client_tls self._server_tls = server_tls self._custom_server_sni = custom_server_sni self._client_hello = None # type: Optional[net_tls.ClientHello] def __call__(self): """ The strategy for establishing TLS is as follows: First, we determine whether we need the server cert to establish ssl with the client. If so, we first connect to the server and then to the client. If not, we only connect to the client and do the server handshake lazily. An additional complexity is that we need to mirror SNI and ALPN from the client when connecting to the server. We manually peek into the connection and parse the ClientHello message to obtain these values. """ if self._client_tls: # Peek into the connection, read the initial client hello and parse it to obtain SNI and ALPN values. try: self._client_hello = net_tls.ClientHello.from_file(self.client_conn.rfile) except exceptions.TlsProtocolException as e: self.log("Cannot parse Client Hello: %s" % repr(e), "error") # Do we need to do a server handshake now? # There are two reasons why we would want to establish TLS with the server now: # 1. If we already have an existing server connection and server_tls is True, # we need to establish TLS now because .connect() will not be called anymore. # 2. We may need information from the server connection for the client handshake. # # A couple of factors influence (2): # 2.1 There actually is (or will be) a TLS-enabled upstream connection # 2.2 An upstream connection is not wanted by the user if --no-upstream-cert is passed. # 2.3 An upstream connection is implied by add_upstream_certs_to_client_chain # 2.4 The client wants to negotiate an alternative protocol in its handshake, we need to find out # what is supported by the server # 2.5 The client did not sent a SNI value, we don't know the certificate subject. client_tls_requires_server_connection = ( self._server_tls and self.config.options.upstream_cert and ( self.config.options.add_upstream_certs_to_client_chain or self._client_tls and ( self._client_hello.alpn_protocols or not self._client_hello.sni ) ) ) establish_server_tls_now = ( (self.server_conn.connected() and self._server_tls) or client_tls_requires_server_connection ) if self._client_tls and establish_server_tls_now: self._establish_tls_with_client_and_server() elif self._client_tls: self._establish_tls_with_client() elif establish_server_tls_now: self._establish_tls_with_server() layer = self.ctx.next_layer(self) layer() def __repr__(self): # pragma: no cover if self._client_tls and self._server_tls: return "TlsLayer(client and server)" elif self._client_tls: return "TlsLayer(client)" elif self._server_tls: return "TlsLayer(server)" else: return "TlsLayer(inactive)" def connect(self): if not self.server_conn.connected(): self.ctx.connect() if self._server_tls and not self.server_conn.tls_established: self._establish_tls_with_server() def set_server_tls(self, server_tls: bool, sni: Union[str, None, bool] = None) -> None: """ Set the TLS settings for the next server connection that will be established. This function will not alter an existing connection. Args: server_tls: Shall we establish TLS with the server? sni: ``str`` for a custom SNI value, ``None`` for the client SNI value, ``False`` if no SNI value should be sent. """ self._server_tls = server_tls self._custom_server_sni = sni @property def server_tls(self): """ ``True``, if the next server connection that will be established should be upgraded to TLS. """ return self._server_tls @property def server_sni(self): """ The Server Name Indication we want to send with the next server TLS handshake. """ if self._custom_server_sni is False: return None else: return self._custom_server_sni or self._client_hello and self._client_hello.sni @property def alpn_for_client_connection(self): return self.server_conn.get_alpn_proto_negotiated() def __alpn_select_callback(self, conn_, options): # This gets triggered if we haven't established an upstream connection yet. default_alpn = b'http/1.1' if self.alpn_for_client_connection in options: choice = bytes(self.alpn_for_client_connection) elif default_alpn in options: choice = bytes(default_alpn) else: choice = options[0] self.log("ALPN for client: %s" % choice, "debug") return choice def _establish_tls_with_client_and_server(self): try: self.ctx.connect() self._establish_tls_with_server() except Exception: # If establishing TLS with the server fails, we try to establish TLS with the client nonetheless # to send an error message over TLS. try: self._establish_tls_with_client() except: pass raise self._establish_tls_with_client() def _establish_tls_with_client(self): self.log("Establish TLS with client", "debug") cert, key, chain_file = self._find_cert() if self.config.options.add_upstream_certs_to_client_chain: extra_certs = self.server_conn.server_certs else: extra_certs = None try: self.client_conn.convert_to_tls( cert, key, method=self.config.openssl_method_client, options=self.config.openssl_options_client, cipher_list=self.config.options.ciphers_client or DEFAULT_CLIENT_CIPHERS, dhparams=self.config.certstore.dhparams, chain_file=chain_file, alpn_select_callback=self.__alpn_select_callback, extra_chain_certs=extra_certs, ) # Some TLS clients will not fail the handshake, # but will immediately throw an "unexpected eof" error on the first read. # The reason for this might be difficult to find, so we try to peek here to see if it # raises ann error. self.client_conn.rfile.peek(1) except exceptions.TlsException as e: raise exceptions.ClientHandshakeException( "Cannot establish TLS with client (sni: {sni}): {e}".format( sni=self._client_hello.sni, e=repr(e) ), self._client_hello.sni or repr(self.server_conn.address) ) def _establish_tls_with_server(self): self.log("Establish TLS with server", "debug") try: alpn = None if self._client_tls: if self._client_hello.alpn_protocols: # We only support http/1.1 and h2. # If the server only supports spdy (next to http/1.1), it may select that # and mitmproxy would enter TCP passthrough mode, which we want to avoid. alpn = [ x for x in self._client_hello.alpn_protocols if not (x.startswith(b"h2-") or x.startswith(b"spdy")) ] if alpn and b"h2" in alpn and not self.config.options.http2: alpn.remove(b"h2") if self.client_conn.tls_established and self.client_conn.get_alpn_proto_negotiated(): # If the client has already negotiated an ALP, then force the # server to use the same. This can only happen if the host gets # changed after the initial connection was established. E.g.: # * the client offers http/1.1 and h2, # * the initial host is only capable of http/1.1, # * then the first server connection negotiates http/1.1, # * but after the server_conn change, the new host offers h2 # * which results in garbage because the layers don' match. alpn = [self.client_conn.get_alpn_proto_negotiated()] # We pass through the list of ciphers send by the client, because some HTTP/2 servers # will select a non-HTTP/2 compatible cipher from our default list and then hang up # because it's incompatible with h2. :-) ciphers_server = self.config.options.ciphers_server if not ciphers_server and self._client_tls: ciphers_server = [] for id in self._client_hello.cipher_suites: if id in CIPHER_ID_NAME_MAP.keys(): ciphers_server.append(CIPHER_ID_NAME_MAP[id]) ciphers_server = ':'.join(ciphers_server) args = net_tls.client_arguments_from_options(self.config.options) args["cipher_list"] = ciphers_server self.server_conn.establish_tls( sni=self.server_sni, alpn_protos=alpn, **args ) tls_cert_err = self.server_conn.ssl_verification_error if tls_cert_err is not None: self.log(str(tls_cert_err), "warn") self.log("Ignoring server verification error, continuing with connection", "warn") except exceptions.InvalidCertificateException as e: raise exceptions.InvalidServerCertificate(str(e)) except exceptions.TlsException as e: raise exceptions.TlsProtocolException( "Cannot establish TLS with {host}:{port} (sni: {sni}): {e}".format( host=self.server_conn.address[0], port=self.server_conn.address[1], sni=self.server_sni, e=repr(e) ) ) proto = self.alpn_for_client_connection.decode() if self.alpn_for_client_connection else '-' self.log("ALPN selected by server: {}".format(proto), "debug") def _find_cert(self): """ This function determines the Common Name (CN) and Subject Alternative Names (SANs) our certificate should have and then fetches a matching cert from the certstore. """ host = None sans = set() # In normal operation, the server address should always be known at this point. # However, we may just want to establish TLS so that we can send an error message to the client, # in which case the address can be None. if self.server_conn.address: host = self.server_conn.address[0].encode("idna") # Should we incorporate information from the server certificate? use_upstream_cert = ( self.server_conn and self.server_conn.tls_established and self.config.options.upstream_cert ) if use_upstream_cert: upstream_cert = self.server_conn.cert sans.update(upstream_cert.altnames) if upstream_cert.cn: sans.add(host) host = upstream_cert.cn.decode("utf8").encode("idna") # Also add SNI values. if self._client_hello.sni: sans.add(self._client_hello.sni.encode("idna")) if self._custom_server_sni: sans.add(self._custom_server_sni.encode("idna")) # RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity. # In other words, the Common Name is irrelevant then. if host: sans.add(host) return self.config.certstore.get_cert(host, list(sans))
1
13,953
both URL works. I corrected it anyway.
mitmproxy-mitmproxy
py
@@ -43,12 +43,14 @@ import java.util.Collection; * @since 5.0 */ public class PrePostAdviceReactiveMethodInterceptor implements MethodInterceptor { - private Authentication anonymous = new AnonymousAuthenticationToken("key", "anonymous", - AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS")); + private final Authentication anonymous = new AnonymousAuthenticationToken("key", "anonymous", + AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS")); private final MethodSecurityMetadataSource attributeSource; - private final PreInvocationAuthorizationAdvice preInvocationAdvice; + private PreInvocationAuthorizationAdvice preInvocationAdvice; + + private PreInvocationAuthorizationReactiveAdvice preInvocationReactiveAdvice; private final PostInvocationAuthorizationAdvice postAdvice;
1
/* * Copyright 2002-2018 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.security.access.prepost; import org.aopalliance.intercept.MethodInterceptor; import org.aopalliance.intercept.MethodInvocation; import org.reactivestreams.Publisher; import org.springframework.security.access.AccessDeniedException; import org.springframework.security.access.ConfigAttribute; import org.springframework.security.access.method.MethodSecurityMetadataSource; import org.springframework.security.authentication.AnonymousAuthenticationToken; import org.springframework.security.core.Authentication; import org.springframework.security.core.authority.AuthorityUtils; import org.springframework.security.core.context.ReactiveSecurityContextHolder; import org.springframework.security.core.context.SecurityContext; import org.springframework.util.Assert; import reactor.core.Exceptions; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.lang.reflect.Method; import java.util.Collection; /** * A {@link MethodInterceptor} that supports {@link PreAuthorize} and {@link PostAuthorize} for methods that return * {@link Mono} or {@link Flux} * * @author Rob Winch * @since 5.0 */ public class PrePostAdviceReactiveMethodInterceptor implements MethodInterceptor { private Authentication anonymous = new AnonymousAuthenticationToken("key", "anonymous", AuthorityUtils.createAuthorityList("ROLE_ANONYMOUS")); private final MethodSecurityMetadataSource attributeSource; private final PreInvocationAuthorizationAdvice preInvocationAdvice; private final PostInvocationAuthorizationAdvice postAdvice; /** * Creates a new instance * @param attributeSource the {@link MethodSecurityMetadataSource} to use * @param preInvocationAdvice the {@link PreInvocationAuthorizationAdvice} to use * @param postInvocationAdvice the {@link PostInvocationAuthorizationAdvice} to use */ public PrePostAdviceReactiveMethodInterceptor(MethodSecurityMetadataSource attributeSource, PreInvocationAuthorizationAdvice preInvocationAdvice, PostInvocationAuthorizationAdvice postInvocationAdvice) { Assert.notNull(attributeSource, "attributeSource cannot be null"); Assert.notNull(preInvocationAdvice, "preInvocationAdvice cannot be null"); Assert.notNull(postInvocationAdvice, "postInvocationAdvice cannot be null"); this.attributeSource = attributeSource; this.preInvocationAdvice = preInvocationAdvice; this.postAdvice = postInvocationAdvice; } @Override public Object invoke(final MethodInvocation invocation) { Method method = invocation.getMethod(); Class<?> returnType = method.getReturnType(); if (!Publisher.class.isAssignableFrom(returnType)) { throw new IllegalStateException("The returnType " + returnType + " on " + method + " must return an instance of org.reactivestreams.Publisher (i.e. Mono / Flux) in order to support Reactor Context"); } Class<?> targetClass = invocation.getThis().getClass(); Collection<ConfigAttribute> attributes = this.attributeSource .getAttributes(method, targetClass); PreInvocationAttribute preAttr = findPreInvocationAttribute(attributes); Mono<Authentication> toInvoke = ReactiveSecurityContextHolder.getContext() .map(SecurityContext::getAuthentication) .defaultIfEmpty(this.anonymous) .filter( auth -> this.preInvocationAdvice.before(auth, invocation, preAttr)) .switchIfEmpty(Mono.defer(() -> Mono.error(new AccessDeniedException("Denied")))); PostInvocationAttribute attr = findPostInvocationAttribute(attributes); if (Mono.class.isAssignableFrom(returnType)) { return toInvoke .flatMap( auth -> this.<Mono<?>>proceed(invocation) .map( r -> attr == null ? r : this.postAdvice.after(auth, invocation, attr, r)) ); } if (Flux.class.isAssignableFrom(returnType)) { return toInvoke .flatMapMany( auth -> this.<Flux<?>>proceed(invocation) .map( r -> attr == null ? r : this.postAdvice.after(auth, invocation, attr, r)) ); } return toInvoke .flatMapMany( auth -> Flux.from(this.<Publisher<?>>proceed(invocation)) .map( r -> attr == null ? r : this.postAdvice.after(auth, invocation, attr, r)) ); } private static <T extends Publisher<?>> T proceed(final MethodInvocation invocation) { try { return (T) invocation.proceed(); } catch(Throwable throwable) { throw Exceptions.propagate(throwable); } } private static PostInvocationAttribute findPostInvocationAttribute( Collection<ConfigAttribute> config) { for (ConfigAttribute attribute : config) { if (attribute instanceof PostInvocationAttribute) { return (PostInvocationAttribute) attribute; } } return null; } private static PreInvocationAttribute findPreInvocationAttribute( Collection<ConfigAttribute> config) { for (ConfigAttribute attribute : config) { if (attribute instanceof PreInvocationAttribute) { return (PreInvocationAttribute) attribute; } } return null; } }
1
15,633
Rather than have another member and and if/else statement, if the user passes in `PreInvocationAuthorizationAdvice` it could be adapted to match `PreInvocationAuthorizationReactiveAdvice`
spring-projects-spring-security
java
@@ -210,8 +210,7 @@ class Sync { /** * A callback passed to `Realm.App.Sync.setLogger` when instrumenting the Realm Sync client with a custom logger. * @callback Realm.App.Sync~logCallback - * @param {number} level The level of the log entry between 0 and 8 inclusively. - * Use this as an index into `['all', 'trace', 'debug', 'detail', 'info', 'warn', 'error', 'fatal', 'off']` to get the name of the level. + * @param {string} level The level of the log which can be ['all', 'trace', 'debug', 'detail', 'info', 'warn', 'error', 'fatal', 'off']. * @param {string} message The message of the log entry. */
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// /* eslint getter-return: "off", no-dupe-class-members: "off" */ /** * This describes the options used to create a {@link Realm.App} instance. * @typedef {Object} Realm.App~AppConfiguration * @property {string} id - The id of the MongoDB Realm app. * @property {string} url - The URL of the MongoDB Realm end-point. * @property {number} timeout - General timeout (in millisecs) for requests. * @property {Realm.App~LocalAppConfiguration} app - local app configuration */ /** * This describes the options used for local app configuration. * @typedef {Object} Realm.App~LocalAppConfiguration * @property {string} name - The name of the app. * @property {string} version - The version of the app. */ /** * This describes the different options used to create a {@link Realm} instance with Realm Cloud synchronization. * @typedef {Object} Realm.App.Sync~SyncConfiguration * @property {Realm.User} user - A {@link Realm.User} object obtained by calling `Realm.App.logIn`. * @property {string|number|BSON.ObjectId|null} partitionValue - The value of the partition key. * @property {function} [error] - A callback function which is called in error situations. * The `error` callback can take up to five optional arguments: `name`, `message`, `isFatal`, * `category`, and `code`. * * @property {Object} [customHttpHeaders] - A map (string, string) of custom HTTP headers. * @property {Realm.App.Sync~OpenRealmBehaviorConfiguration} [newRealmFileBehavior] - Whether to create a new file and sync in background or wait for the file to be synced. If not set, the Realm will be downloaded before opened. * @property {Realm.App.Sync~OpenRealmBehaviorConfiguration} [existingRealmFileBehavior] - Whether to open existing file and sync in background or wait for the sync of the * file to complete and then open. If not set, the Realm will be downloaded before opened. */ /** * Specify how to open a synced Realm. * * @typedef {Object} Realm.App.Sync~OpenRealmBehaviorConfiguration * @property {string} type - how to open a Realm - 'downloadBeforeOpen' to wait for download to complete or 'openImmediately' to open the local Realm * @property {number} [timeOut] - how long to wait for a download (in ms). Default: infinity * @property {string} [timeOutBehavior] - what to do when download times out - 'openLocalRealm' to open the local Realm or 'throwException' to throw an exception. * @see {@link Realm.App.Sync~openLocalRealmBehavior} * @see {@link Realm.App.Sync~downloadBeforeOpenBehavior} */ /** * The default behavior settings if you want to open a synchronized Realm immediately and start working on it. * If this is the first time you open the Realm, it will be empty while the server data is being downloaded * in the background. * * @typedef {Realm.App.Sync~OpenRealmBehaviorConfiguration} Realm.App.Sync~openLocalRealmBehavior */ /** * The default behavior settings if you want to wait for downloading a synchronized Realm to complete before opening it. * * @typedef {Realm.App.Sync~OpenRealmBehaviorConfiguration} Realm.App.Sync~downloadBeforeOpenBehavior */ /** * The class represents a MongoDB Realm App. * * ```js * let app = new Realm.App(config); * ``` * * @memberof Realm */ class App { /** * Creates a new app and connects to a MongoDB Realm instance. * * @param {(Realm.App~AppConfiguration|string)} configOrId - The configuration of the app or a string app id. * @throws If no app id is provided. */ constructor(configOrId) { } /** * Logs in a user. * * @param {Realm.Credentials} credentials - Valid Credentials for the user. * @returns {Promise<Realm.User>} */ logIn(credentials) { } /** * Returns the current user if any. * * @returns {Realm.User} The current user, `null` is no current user. */ get currentUser() { } /** * Returns a dictionary of alll users. Users' identity is used as key. * * @returns {Array} */ get allUsers() { } /** * Switches the current user. * * @param {Realm.User} user - The user to switch to. * @throws If user is not logged in. */ switchUser(user) { } /** * Removes the user from MongoDB Realm. * * @param {Realm.User} user - The user to remove. * @returns {Promise<void>} */ removeUser(user) { } /** * Client for the email/password authentication provider. * * @example * { * // Creating a new user, by registering via email & password * const app = new Realm.App(config); * await app.emailPasswordAuth.registerUser('[email protected]', 'some-secure-password'); * } * * @type {Realm.Auth.EmailPasswordAuth} */ get emailPasswordAuth() { } /** * Returns an instance of an app. If an app with the specified id * hasn't been created, a new app instance will be created. * * @param {string} appId * @returns {Realm.App} * @since v10.0.0 */ getApp(appId) { } } /** * * Class for interacting with Realm Sync. * * @memberof Realm */ class Sync { /** * Calling this method will force Realm to attempt to reconnect the Realm App to the server immediately. * * Realm will reconnect automatically, but by using exponential backoff. This means that if the device is offline for * a long time, restoring the connection after it comes back online can take longer than expected. In situations * where it is possible to detect the network condition (e.g. Airplane mode). Manually calling this method can * provide a smoother user experience. * * @param {Realm.App} app - The Realm app. */ static reconnect(app) { } /** * Set the sync log level. You can only set the log level once, and you must do it after creating an App instance * but before opening any Realms. * * @param {Realm.App} app - The Realm app. * @param {Realm.Sync~LogLevel} level - The new log level * @example * { * const app = new Realm.App(getAppConfig()); * Realm.App.Sync.setLogLevel("all"); * const user = await app.logIn(credentials); * const realm = await Realm.open(getRealmConfig(user)); * } * @param {Realm.App.Sync~LogLevel} level - The log level. */ static setLogLevel(app, level) { } /** * Enable multiplexing multiple sync sessions over a single connection for a Realm app. * When having a lot of synchronized realms open the system might run out of file * descriptors because of all the open sockets to the server. Session multiplexing * is designed to alleviate that, but it might not work with a server configured with * fail-over. Only use if you're seeing errors about reaching the file descriptor limit * and you know you are using many sync sessions. * @param {Realm.App} app - The Realm app. */ static enableSessionMultiplexing(app) { } /** * A callback passed to `Realm.App.Sync.setLogger` when instrumenting the Realm Sync client with a custom logger. * @callback Realm.App.Sync~logCallback * @param {number} level The level of the log entry between 0 and 8 inclusively. * Use this as an index into `['all', 'trace', 'debug', 'detail', 'info', 'warn', 'error', 'fatal', 'off']` to get the name of the level. * @param {string} message The message of the log entry. */ /** * Capture the sync client's log. You can only set the log level once, and you must do it after creating an App instance * but before opening any Realms. * * @param {Realm.App} app - the Realm app. * @param {Realm.Sync~logCallback} logger - The log callback. * @example * { * const app = new Realm.App(getAppConfig()); * Realm.App.Sync.setLogger((level, message) => console.log(`[${level}] ${message}`); * const user = await app.logIn(credentials); * const realm = await Realm.open(getRealmConfig(user)); * } * @see {Realm.App.Sync~setLogLevel} */ static setLogger(app, logger) { } /** * Set the application part of the User-Agent string that will be sent to the Realm Object Server when a session * is created. * * This method can only be called up to the point where the first Realm is opened. After that, the User-Agent * can no longer be changed. * @param {Realm.App} the Realm app * @param {string} the user agent description */ static setUserAgent(app, userAgent) { } /** * Initiate a client reset. The Realm must be closed prior to the reset. * * @param {Realm.App} [app] - The app where the Realm was opened. * @param {string} [path] - The path to the Realm to reset. * Throws error if reset is not possible. * @example * { * const config = { sync: { user, partitionValue } }; * config.sync.error = (sender, error) => { * if (error.name === 'ClientReset') { * Realm.Sync.initiateClientReset(app, original_path); * // copy required objects from Realm at error.config.path * } * } * } */ static initiateClientReset(app, path) { } /** * Returns `true` if Realm still has a reference to any sync sessions regardless of their state. * If `false` is returned it means that no sessions currently exist. * @param {Realm.App} [app] - The app where the Realm was opened. */ static _hasExistingSessions(app) { } /** * Returns all sync sessions for a user. * * @param {Realm.User} user - the user. * @returns {Array<Realm.App.Sync.Session>} an array of sessions * @since 10.0.0 */ static getAllSyncSessions(user) { } /** * Returns the session associated with a user and partition value. * * @param {Realm.User} user * @param {string|number|ObjectId|null} partitionValue * @returns {Realm.App.Sync.Session} the session * @since 10.0.0 */ static getSyncSession(user, partitionValue) { } } /** * @typedef Realm.App.Sync~LogLevel * @type {("all"|"trace"|"debug"|"detail"|"info"|"warn"|"error"|"fatal"|"off")} */ /** * Class that describes authentication errors in the Realm Object Server * @memberof Realm.App.Sync */ class AuthError extends Error { /** * The numerical code for this error. * @type {number} */ get code() { } /** * The unique help URI that describes this error. * @type {string} */ get type() { } } /** * Describes an error when an incompatible synced Realm is opened. The old version of the Realm can be accessed in readonly mode using the configuration() member * @memberof Realm.App.Sync */ class IncompatibleSyncedRealmError { /** * The name of the error is 'IncompatibleSyncedRealmError' */ get name() { } /** * The {Realm~Configuration} of the backed up Realm. * @type {Realm~Configuration} */ get configuration() { } } /** * Class for creating user credentials * @memberof Realm */ class Credentials { /** * Creates credentials based on a login with an email address and a password. * @param {string} username The username of the user. * @param {string} password The user's password. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static emailPassword(email, password) { } /** * Creates credentials based on a Facebook login. * @param {string} token A Facebook authentication token, obtained by logging into Facebook. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static facebook(token) { } /** * Creates credentials based on a Google login. * @param {string} authCode A Google authentication code, obtained by logging into Google. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static google(authCode) { } /** * Creates credentials for an anonymous user. These can only be used once - using them a second * time will result in a different user being logged in. If you need to get a user that has already logged * in with the Anonymous credentials, use {@linkcode Realm.App.currentUser} or {@linkcode Realm.App.allUsers} * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static anonymous() { } /** * Creates credentials with a JSON Web Token (JWT) provider and user identifier. * @param {string} token A string identifying the user. Usually an identity token or a username. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static jwt(token) { } /** * Creates credentials with a MongoDB Realm function and user identifier. * @param {string} payload A string identifying the user. Usually an identity token or a username. * @return {Promise<Credentials>} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static function(payload) { } /** * Creates credentials from a user API key. * @param {string} key A string identifying the user by API key. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static userApiKey(token) { } /** * Creates credentials from a server API key. * @param {string} key A string identifying the user by API key. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static serverApiKey(token) { } /** * Creates credentials based on an Apple login. * @param {string} token An Apple authentication token, obtained by logging into Apple. * @return {Credentials} An instance of `Credentials` that can be used in {@linkcode Realm.App.logIn}. */ static apple(token) { } /** * Gets the identity provider for the credentials. * @returns {string} The identity provider, such as Google, Facebook, etc. */ get provider() { } } /** * A namespace for auth providers * @see Realm.Auth.EmailPasswordAuth * @see Realm.Auth.ApiKeyAuth * @memberof Realm */ class Auth { } /** * Class for managing email/password for users * @memberof Realm.Auth */ class EmailPasswordAuth { /** * Registers a new email identity with the email/password provider, * and sends a confirmation email to the provided address. * * @param {string} email - The email address of the user to register. * @param {string} password - The password that the user created for the new username/password identity. * @returns {Promise<void>} */ registerUser(email, password) { } /** * Confirms an email identity with the email/password provider. * * @param {string} token - The confirmation token that was emailed to the user. * @param {string} id - The confirmation token id that was emailed to the user. * @returns {Promise<void>} */ confirmUser(token, id) { } /** * Re-sends a confirmation email to a user that has registered but * not yet confirmed their email address. * * @param {string} email - The email address of the user to re-send a confirmation for. * @returns {Promise<void>} */ resendConfirmationEmail(email) { } /** * Sends an email to the user for resetting the password. * @param {string} email - The email address of the user to re-send a confirmation for. * @returns {Promise<void>} */ sendResetPasswordEmail(email) { } /** * Resets the password of an email identity using the password reset token emailed to a user. * @param {string} password - The desired new password. * @param {string} token - The password reset token that was emailed to the user. * @param {string} id - The password reset token id that was emailed to the user. * @returns {Promise<void>} */ resetPassword(password, token, id) { } /** * Resets the password of an email identity using the * password reset function set up in the application. * * @param {string} email - The email address of the user. * @param {string} password - The desired new password. * @param {Array<BSON>} args - Arguments passed onto the function. * @return {Promose<void>} */ callResetPasswordFunction(email, password, ...args) { } } /** * A client for the user API key authentication provider which * can be used to create and modify user API keys. This * client should only be used by an authenticated user. * @memberof Realm.Auth */ class ApiKeyAuth { /** * Creates a user API key that can be used to authenticate as the current user. * * @param {string} name - The name of the API key to be created. * @returns {Promise<void>} */ create(name) { } /** * Fetches a user API key associated with the current user. * * @param {string} id - The id of the API key to fetch. * @returns {Promise<Object>} */ fetch(id) { } /** * Fetches the user API keys associated with the current user. * * @returns {Promise<Array>} */ fetchAll() { } /** * Deletes a user API key associated with the current user. * * @param {string} id - The id of the API key to delete. * @returns {Promise<void>} */ delete(id) { } /** * Enables a user API key associated with the current user. * * @param {string} id - The id of the API key to enable. * @returns {Promise<void>} */ enable(id) { } /** * Disables a user API key associated with the current user. * * @param {string} id - The id of the API key to disable. * @returns {Promise<void>} */ disable(id) { } } /** * Class for managing users. * @memberof Realm */ class User { /** * Gets the identity of this user on MongoDB Realm Cloud. * The identity is a guaranteed to be unique among all users on MongoDB Realm Cloud . * @type {string} */ get id() { } /** * Gets an array of identities for this user on MongoDB Realm Cloud. * Each element in the array is an object with properties userId and providerType. * @type {Array<Object>} */ get identities() { } /** * Gets the provider type for the identity. * @type {string} */ get providerType() { } /** * Gets the device id. `null` if no device id. * @type {string} */ get deviceId() { } /** * Gets this user's access token. This is the user's credential for accessing the MongoDB * Realm Cloud and should be treated as sensitive data. * @type {string} */ get accessToken() { } /** * Gets this user's refresh token. This is the user's credential for accessing the MongoDB * Realm Cloud and should be treated as sensitive data. * @type {string} */ get refreshToken() { } /** * Gets this user's associated custom data. This is application-specific data provided by the server. * @type {object?} */ get customData() { } /** * Is true if the user is logged in. False otherwise. * @type {boolean} */ get isLoggedIn() { } /** * Gets the user's state which can be one of the following: * - `LoggedOut` - the user is logged out * - `LoggedIn` - the user is logged in * - `Removed` - the user has been removed * @type {string} */ get state() { } /** * Gets the user's profile (name, email address, etc.). * @type {object} */ get profile() { } /** * Logs out the user. * @returns {Promise<void>} - resolves when the user has been logged out */ logOut() { } /** * Links a user to another credentials. This is useful when linking * different account togteher. * @param {Realm.Credentials} credentials * @returns {Promise<void>} - resolves when the user has been linked with the other credentials. */ linkCredentials(credentials) { } /** * Refresh user's custom data. * @returns {Promise<Object>} * @see {Realm.User.customData} */ refreshCustomData() { } /** * Returns a provider to interact with API keys. * @return {Realm.Auth.ApiKeyAuth} - the provider */ apiKeys() { } /** * Calls the named server function as this user. * @param {string} name - name of the function to call * @param {any[]} args - list of arguments to pass */ callFunction(name, args) { } /** * Convenience wrapper around `call_function(name, [args])` * * @example * // These are all equivalent: * await user.call_function("do_thing", [a1, a2, a3]); * await user.functions.do_thing(a1, a2, a3); * await user.functions["do_thing"](a1, a2, a3); * * @example * // It it legal to store the functions as first-class values: * const do_thing = user.functions.do_thing; * await do_thing(a1); * await do_thing(a2); */ get functions() { } /** * Returns a connection to the MongoDB service. * * @example * let blueWidgets = user.mongoClient('myClusterName') * .db('myDb') * .collection('widgets') * .find({color: 'blue'}); * * @param {string} serviceName * @returns {Realm~MongoDB} */ mongoClient(serviceName) { } /** * @class Realm.User~Push Access to the operations of the push service. */ /** * Registers the provided token with this User's device. * * @function Realm.User~Push#register * @param {string} token * @returns {Promise<void>} completed when the user is registered, or the operation fails. */ /** * Deregisters this User's device. * * @function Realm.User~Push#deregister * @returns {Promise<void>} completed when the user is deregistered, or the operation fails. */ /** * Access the operations of the push service. * * @param {string} serviceName * @returns {Realm.User~Push} */ push(serviceName) { } } /** * An object encapsulating a MongoDB Realm Cloud session. Sessions represent the communication between the * client (and a local Realm file on disk), and the server (and a remote Realm at a given URL stored on a Realm Object Server). * Sessions are always created by the SDK and vended out through various APIs. The lifespans of sessions * associated with Realms are managed automatically. * @memberof Realm.App.Sync */ class Session { /** * Gets the Sync-part of the configuration that the corresponding Realm was * constructed with. * @type {object} */ get config() { } /** * Gets the User that this session was created with. * @type {User} */ get user() { } /** * Gets the URL of the Realm Object Server that this session is connected to. * @type {string} */ get url() { } /** * Gets the current state of the session. * Can be either: * - "active": The session is connected to the Realm Object Server and is actively transferring data. * - "inactive": The session is not currently communicating with the Realm Object Server. * - "invalid": A non-recoverable error has occurred, and this session is semantically invalid. A new session should be created. * @type {string} */ get state() { } /** * Register a progress notification callback on a session object * @param {string} direction - The progress direction to register for. * Can be either: * - `download` - report download progress * - `upload` - report upload progress * @param {string} mode - The progress notification mode to use for the registration. * Can be either: * - `reportIndefinitely` - the registration will stay active until the callback is unregistered * - `forCurrentlyOutstandingWork` - the registration will be active until only the currently transferable bytes are synced * @param {callback(transferred, transferable)} callback - called with the following arguments: * - `transferred` - the current number of bytes already transferred * - `transferable` - the total number of transferable bytes (the number of bytes already transferred plus the number of bytes pending transfer) */ addProgressNotification(direction, mode, progressCallback) { } /** Unregister a progress notification callback that was previously registered with addProgressNotification. * Calling the function multiple times with the same callback is ignored. * @param {callback(transferred, transferable)} callback - a previously registered progress callback */ removeProgressNotification(progressCallback) { } /** * Registers a connection notification on the session object. This will be notified about changes to the * underlying connection to the Realm Object Server. * * @param {callback(newState, oldState)} callback - called with the following arguments: * - `newState` - the new state of the connection * - `oldState` - the state the connection transitioned from. */ addConnectionNotification(connectionCallback) { } /** * Unregister a state notification callback that was previously registered with addStateNotification. * Calling the function multiple times with the same callback is ignored. * * @param {callback(oldState, newState)} callback - a previously registered state callback. */ removeConnectionNotification(connectionCallback) { } /** * Gets the current state of the connection to the server. Multiple sessions might share the same underlying * connection. In that case, any connection change is sent to all sessions. * * Can be either: * - Realm.App.Sync.ConnectionState.Disconnected: No connection to the server is available. * - Realm.App.Sync.ConnectionState.Connecting: An attempt to connect to the server is in progress. * - Realm.App.Sync.ConnectionState.Connected: The connection to the server is active and data can be synchronized. * * Data will only be synchronized with the Realm ObjectServer if this method returns `Connected` and `state()` * returns `Active` or `Dying`. * * @type {string} */ connectionState() { } /** * Returns `true` if the session is currently active and connected to the server, `false` if not. * * @type {boolean} */ isConnected() { } /** * Resumes a sync session that has been paused. * * This method is asynchronous so in order to know when the session has started you will need * to add a connection notification with `addConnectionNotification`. * * This method is idempotent so it will be a no-op if the session is already started. */ resume() { } /** * Pause a sync session. * * This method is asynchronous so in order to know when the session has started you will need * to add a connection notification with `addConnectionNotification`. * * This method is idempotent so it will be a no-op if the session is already paused. */ pause() { } /** * This method returns a promise that does not resolve successfully until all known local changes have been uploaded * to the server or the specified timeout is hit in which case it will be rejected. If the method times out, the upload * will still continue in the background. * * This method cannot be called before the Realm has been opened. * * @param timeout maximum amount of time to wait in milliseconds before the promise is rejected. If no timeout * is specified the method will wait forever. */ uploadAllLocalChanges(timeoutMs) { } /** * This method returns a promise that does not resolve successfully until all known remote changes have been * downloaded and applied to the Realm or the specified timeout is hit in which case it will be rejected. If the method * times out, the download will still continue in the background. * * This method cannot be called before the Realm has been opened. * * @param timeout maximum amount of time to wait in milliseconds before the promise will be rejected. If no timeout * is specified the method will wait forever. */ downloadAllServerChanges(timeoutMs) { } } /** * A Realm Worker can be used to process Sync events in multiple automatically-managed child processes. * * Similar to Web Workers, a Worker is initialized by passing it the name of a module which should be loaded in the new process. * The module should export a function for each even type it wishes to handle, which will be called when that event is emitted. * * Currently supported events: * * * `'available'`: Emitted whenever there is a new Realm which has a virtual * path matching the filter regex, either due to the Realm being newly created * or the listener being added. The virtual path (i.e. the portion of the * URL after the protocol and hostname) is passed as an argument. * * `'change'`: Emitted whenever the data within a Realm matching the filter * regex has changed. A [ChangeEvent]{@link Realm.App.Sync.ChangeEvent} argument * is passed containing information about which Realm changed and what * objects within the Realm changed. * * `'delete'`: Emitted whenever a Realm matching the filter regex has been * deleted from the server. The virtual path of the Realm being deleted is * passed as an argument. * * Worker automatically spawns child processes as needed to handle events in * parallel (up to the limit specified in the `options` parameter). Events for * each specific Realm will be processes in serial in the order in which the * events occurred, but may not all be processed in the same child. * * @example * // my-worker.js * function onavailable(path) { * console.log(`Realm available at ${path}`); * } * * function onchange(change) { * console.log(`Realm at ${change.path} changed`); * } * * function ondelete(path) { * console.log(`Realm at ${path} deleted`); * } * * module.exports = {onchange, oncavailable, ondelete}; * * // server script * Realm.App.Sync.addListener(realmServerURL, adminUser, '.*', new Realm.Worker('my-worker')); * * @memberof Realm */ class Worker { /** * Create a new Worker which executes the given module. * * @param {string} moduleName - The module to load in the worker process. * @param {object} [options] - An object containing option properties to configure the worker. * Available properties are as follows: * * * `maxWorkers`: The maximum number of child processes to spawn. Defaults to `os.cpus().length`. * * `env`: An object containing environment variables to set for the child process. * * `execArgv`: Command-line arguments to pass to the `node` worker processes. */ constructor(moduleName, options = {}) { } } /** * The MongoDB service can be used to get database and collection objects for interacting with MongoDB data. * @alias Realm~MongoDB */ class MongoDB { /** * Get the service name. * @return {string} The service name. */ get serviceName() { } /** * Get the interface to a remote MongoDB database. * * @param {string} databaseName The name of the database. * @returns {Realm~MongoDBDatabase} The remote MongoDB database. */ db(databaseName) { } } /** * The MongoDB service can be used to get database and collection objects for interacting with MongoDB data. * @alias Realm~MongoDBDatabase */ class MongoDBDatabase { /** * Get the database name. * @return {string} The database name. */ get name() { } /** * Get the interface to a remote MongoDB collection. * * @param {string} name The name of the collection. * @returns {Realm.MongoDBCollection} The remote MongoDB collection. */ collection(name) { } } /** * A remote collection of documents in a MongoDB database. * @memberof Realm */ class MongoDBCollection { /** * Gets the name of the collection. * @return {string} The name. */ get name() { } /** * Finds the documents which match the provided query. * * @param {object} [filter] An optional filter applied to narrow down the results. * @param {object} [options] Additional options to apply. * @param {object} [options.projection] Limits the fields to return for all matching documents. * See [Tutorial: Project Fields to Return from Query](https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/). * @param {object} [options.sort] The order in which to return matching documents. * @param {number} [options.limit] The maximum number of documents to return. * @returns {Promise<object[]>} The documents. */ async find(filter, options) { } /** * Finds a document which matches the provided filter. * * @param {object} [filter] An optional filter applied to narrow down the results. * @param {object} [options] Additional options to apply. * @param {object} [options.projection] Limits the fields to return for all matching documents. * See [Tutorial: Project Fields to Return from Query](https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/). * @param {object} [options.sort] The order in which to return matching documents. * @returns {Promise<object>} The document or null if nothing matched. */ async findOne(filter, options) { } /** * Finds a document which matches the provided query and performs the desired update to individual fields. * * @param {object} filter A filter applied to narrow down the results. * @param {object} update The new values for the document. * @param {object} [options] Additional options to apply. * @param {object} [options.projection] Limits the fields to return for all matching documents. * See [Tutorial: Project Fields to Return from Query](https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/). * @param {object} [options.sort] The order in which to return matching documents. * @param {boolean} [options.upsert=false] if true, indicates that MongoDB should insert a new document that matches the * query filter when the query does not match any existing documents in the collection. * @param {boolean} [options.returnNewDocument=false] if true, indicates that the action should return * the document in its updated form instead of its original, pre-update form. * @returns {Promise<?object>} The document (before or after modification) or null if nothing matched. */ async findOneAndUpdate(filter, update, options) { } /** * Finds a document which matches the provided filter and replaces it with a new document. * * @param {object} filter A filter applied to narrow down the results. * @param {object} replacement The new values for the document. * @param {object} [options] Additional options to apply. * @param {object} [options.projection] Limits the fields to return for all matching documents. * See [Tutorial: Project Fields to Return from Query](https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/). * @param {object} [options.sort] The order in which to return matching documents. * @param {boolean} [options.upsert=false] if true, indicates that MongoDB should insert a new document that matches the * query filter when the query does not match any existing documents in the collection. * @param {boolean} [options.returnNewDocument=false] if true, indicates that the action should return * the document in its updated form instead of its original, pre-update form. * @returns {Promise<?object>} The document (before or after modification) or null if nothing matched. */ async findOneAndReplace(filter, replacement, options) { } /** * Finds a document which matches the provided filter and deletes it * * @param {object} filter A filter applied to narrow down the results. * @param {object} [options] Additional options to apply. * @param {object} [options.projection] Limits the fields to return for all matching documents. * See [Tutorial: Project Fields to Return from Query](https://docs.mongodb.com/manual/tutorial/project-fields-from-query-results/). * @param {object} [options.sort] The order in which to return matching documents. * @returns {Promise<object>} The document or null if nothing matched. */ async findOneAndDelete(filter, options) { } /** * Runs an aggregation framework pipeline against this collection. * * @param {object[]} pipeline An array of aggregation pipeline stages. * @returns {Promise<object[]>} The result. */ async aggregate(pipeline) { } /** * Counts the number of documents in this collection matching the provided filter. * * @param {object} [filter] An optional filter applied to narrow down the results. * @param {object} [options] Additional options to apply. * @param {number} [options.limit] The maximum number of documents to return. * @returns {Promise<number>} */ async count(filter, options) { } /** * @typedef Realm.MongoDBCollection~InsertOneResult Result of inserting a document * @property insertedId The id of the inserted document */ /** * Inserts a single document into the collection. * Note: If the document is missing an _id, one will be generated for it by the server. * * @param {object} document The document. * @returns {Promise<Realm.MongoDBCollection~InsertOneResult>} The _id of the inserted document. */ async insertOne(document) { } /** * @typedef Realm.MongoDBCollection~InsertManyResult Result of inserting many documents * @property {Array} insertedIds The ids of the inserted documents */ /** * Inserts an array of documents into the collection. * If any values are missing identifiers, they will be generated by the server. * * @param {object[]} documents The array of documents. * @returns {Promise<Realm.MongoDBCollection~InsertManyResult>} The _ids of the inserted documents. */ async insertMany(documents) { } /** * @typedef {object} Realm.MongoDBCollection~DeleteResult Result of deleting documents * @property {number} deletedCount The number of documents that were deleted. */ /** * Deletes a single matching document from the collection. * * @param {object} filter A filter applied to narrow down the result. * @returns {Promise<Realm.MongoDBCollection~DeleteResult>} */ async deleteOne(filter) { } /** * Deletes multiple documents. * * @param {object} filter A filter applied to narrow down the result. * @returns {Promise<Realm.MongoDBCollection~DeleteResult>} */ async deleteMany(filter) { } /** * @typedef {object} Realm.MongoDBCollection~UpdateResult Result of updating documents * @property {number} matchedCount The number of documents that matched the filter. * @property {number} modifedCount The number of documents matched by the query. * @property [upsertedId] The identifier of the inserted document if an upsert took place. */ /** * Updates a single document matching the provided filter in this collection. * * @param {object} filter A filter applied to narrow down the results. * @param {object} update The new values for the document. * @param {object} [options] Additional options to apply. * @param {boolean} [options.upsert=false] if true, indicates that MongoDB should insert a new document that matches the * query filter when the query does not match any existing documents in the collection. * @returns {Promise<Realm.MongoDBCollection~UpdateResult>} */ async updateOne(filter, update, options) { } /** * Updates multiple documents matching the provided filter in this collection. * * @param {object} filter A filter applied to narrow down the results. * @param {object} update The new values for the document. * @param {object} [options] Additional options to apply. * @param {boolean} [options.upsert=false] if true, indicates that MongoDB should insert a new document that matches the * query filter when the query does not match any existing documents in the collection. * @returns {Promise<Realm.MongoDBCollection~UpdateResult>} */ async updateMany(filter, update, options) { } /** * @typedef {object} Realm.MongoDBCollection~ChangeEvent An event in a change stream. * * Note that which properties are present will depend on both the * `operationType` field, which is itself always present, and the MongoDB * server version. * * @see https://docs.mongodb.com/manual/reference/change-events/ * @property _id The opaque resume token for this event. * @property {string} operationType What kind of operation was this? One of: * `"insert"`, `"delete"`, `"replace"`, `"update"`, `"drop"`, `"rename"`, `"dropDatabase"`, or `"invalidate"`. * @property {object} fullDocument A full copy of the document that was touched by this operation. * See the mongodb reference manual for details about which version of the document will be returned. * @property {object} ns Namespace of the collection affected by this event. * @property {string} ns.db Database name * @property {string} ns.coll Collection name * @property {object} to Destination namespace for `"rename"` events. * @property {string} to.db Database name * @property {string} to.coll Collection name * @property {object} documentKey The `_id` and shard key of the modified document. `_id` is not duplicated * if it is part of the shard key. * @property {object} updateDescription * @property {object} updateDescription.updatedFields An object mapping from modified field names to their new values. * @property {string[]} updateDescription.removedFields A list of field names that were removed. * @property {Timestamp} clusterTime The timestamp from the oplog entry associated with the event. * @property {Long} txnNumber The transaction number. Only present if part of a multi-document transaction. * @property {object} lsid The logical session id of the transaction. Only present if part of a multi-document transaction. */ /** * Creates an asynchronous change stream to monitor this collection for changes. * * By default, yields all change events for this collection. You may specify at most one of * the `filter` or `ids` options. * * @param {object} [options={}] * @param {object} [options.filter] A filter for which change events you are interested in. * @param {any[]} [options.ids] A list of ids that you are interested in watching * * @yields {Realm.MongoDBCollection~ChangeEvent} a change event */ async* watch(options) {} }
1
19,568
The callback get the log level as a number.
realm-realm-js
js
@@ -71,6 +71,7 @@ setup( "hyperframe>=5.0, <6", "jsbeautifier>=1.6.3, <1.7", "kaitaistruct>=0.7, <0.8", + "ldap3>=2.2.0, <2.2.1", "passlib>=1.6.5, <1.8", "pyasn1>=0.1.9, <0.3", "pyOpenSSL>=16.0, <17.1",
1
import os import runpy from codecs import open from setuptools import setup, find_packages # Based on https://github.com/pypa/sampleproject/blob/master/setup.py # and https://python-packaging-user-guide.readthedocs.org/ here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() VERSION = runpy.run_path(os.path.join(here, "mitmproxy", "version.py"))["VERSION"] setup( name="mitmproxy", version=VERSION, description="An interactive, SSL-capable, man-in-the-middle HTTP proxy for penetration testers and software developers.", long_description=long_description, url="http://mitmproxy.org", author="Aldo Cortesi", author_email="[email protected]", license="MIT", classifiers=[ "License :: OSI Approved :: MIT License", "Development Status :: 5 - Production/Stable", "Environment :: Console", "Environment :: Console :: Curses", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Security", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: Proxy Servers", "Topic :: Software Development :: Testing" ], packages=find_packages(include=[ "mitmproxy", "mitmproxy.*", "pathod", "pathod.*", ]), include_package_data=True, entry_points={ 'console_scripts': [ "mitmproxy = mitmproxy.tools.main:mitmproxy", "mitmdump = mitmproxy.tools.main:mitmdump", "mitmweb = mitmproxy.tools.main:mitmweb", "pathod = pathod.pathod_cmdline:go_pathod", "pathoc = pathod.pathoc_cmdline:go_pathoc" ] }, # https://packaging.python.org/en/latest/requirements/#install-requires # It is not considered best practice to use install_requires to pin dependencies to specific versions. install_requires=[ "blinker>=1.4, <1.5", "click>=6.2, <7", "certifi>=2015.11.20.1", # no semver here - this should always be on the last release! "construct>=2.8, <2.9", "cryptography>=1.4, <1.9", "cssutils>=1.0.1, <1.1", "h2>=3.0, <4", "html2text>=2016.1.8, <=2016.9.19", "hyperframe>=5.0, <6", "jsbeautifier>=1.6.3, <1.7", "kaitaistruct>=0.7, <0.8", "passlib>=1.6.5, <1.8", "pyasn1>=0.1.9, <0.3", "pyOpenSSL>=16.0, <17.1", "pyparsing>=2.1.3, <2.3", "pyperclip>=1.5.22, <1.6", "requests>=2.9.1, <3", "ruamel.yaml>=0.13.2, <0.15", "tornado>=4.3, <4.6", "urwid>=1.3.1, <1.4", "brotlipy>=0.5.1, <0.7", "sortedcontainers>=1.5.4, <1.6", # transitive from cryptography, we just blacklist here. # https://github.com/pypa/setuptools/issues/861 "setuptools>=11.3, !=29.0.0", ], extras_require={ ':sys_platform == "win32"': [ "pydivert>=2.0.3, <2.1", ], ':sys_platform != "win32"': [ ], 'dev': [ "Flask>=0.10.1, <0.13", "flake8>=3.2.1, <3.4", "mypy>=0.501, <0.502", "rstcheck>=2.2, <4.0", "tox>=2.3, <3", "pytest>=3, <3.1", "pytest-cov>=2.2.1, <3", "pytest-timeout>=1.0.0, <2", "pytest-xdist>=1.14, <2", "pytest-faulthandler>=1.3.0, <2", "sphinx>=1.3.5, <1.6", "sphinx-autobuild>=0.5.2, <0.7", "sphinxcontrib-documentedlist>=0.5.0, <0.7", "sphinx_rtd_theme>=0.1.9, <0.3", ], 'contentviews': [ ], 'examples': [ "beautifulsoup4>=4.4.1, <4.6", "Pillow>=3.2, <4.2", ] } )
1
13,162
Is there any issue with 2.2.3? If not this should be `<2.3`.
mitmproxy-mitmproxy
py
@@ -1595,7 +1595,3 @@ class SeriesTest(ReusedSQLTestCase, SQLTestUtils): # Only support for MultiIndex kser = ks.Series([10, -2, 4, 7]) self.assertRaises(ValueError, lambda: kser.unstack()) - - def test_item(self): - kser = ks.Series([10, 20]) - self.assertRaises(ValueError, lambda: kser.item())
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import base64 from collections import defaultdict from distutils.version import LooseVersion import inspect from io import BytesIO from itertools import product from datetime import datetime, timedelta import matplotlib matplotlib.use("agg") from matplotlib import pyplot as plt import numpy as np import pandas as pd import pyspark from pyspark.ml.linalg import SparseVector from databricks import koalas as ks from databricks.koalas import Series from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.series import MissingPandasLikeSeries class SeriesTest(ReusedSQLTestCase, SQLTestUtils): @property def pser(self): return pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") @property def kser(self): return ks.from_pandas(self.pser) def test_series(self): kser = self.kser self.assertTrue(isinstance(kser, Series)) self.assert_eq(kser + 1, self.pser + 1) def test_series_tuple_name(self): pser = self.pser pser.name = ("x", "a") kser = ks.from_pandas(pser) self.assert_eq(kser, pser) self.assert_eq(kser.name, pser.name) pser.name = ("y", "z") kser.name = ("y", "z") self.assert_eq(kser, pser) self.assert_eq(kser.name, pser.name) def test_repr_cache_invalidation(self): # If there is any cache, inplace operations should invalidate it. s = ks.range(10)["id"] s.__repr__() s.rename("a", inplace=True) self.assertEqual(s.__repr__(), s.rename("a").__repr__()) def test_empty_series(self): a = pd.Series([], dtype="i1") b = pd.Series([], dtype="str") self.assert_eq(ks.from_pandas(a), a) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assert_eq(ks.from_pandas(a), a) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) def test_all_null_series(self): a = pd.Series([None, None, None], dtype="float64") b = pd.Series([None, None, None], dtype="str") self.assert_eq(ks.from_pandas(a).dtype, a.dtype) self.assertTrue(ks.from_pandas(a).toPandas().isnull().all()) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self.assert_eq(ks.from_pandas(a).dtype, a.dtype) self.assertTrue(ks.from_pandas(a).toPandas().isnull().all()) self.assertRaises(ValueError, lambda: ks.from_pandas(b)) def test_head_tail(self): kser = self.kser pser = self.pser self.assert_eq(kser.head(3), pser.head(3)) self.assert_eq(kser.head(0), pser.head(0)) self.assert_eq(kser.head(-3), pser.head(-3)) self.assert_eq(kser.head(-10), pser.head(-10)) # TODO: self.assert_eq(kser.tail(3), pser.tail(3)) def test_rename(self): pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") kser = ks.from_pandas(pser) pser.name = "renamed" kser.name = "renamed" self.assertEqual(kser.name, "renamed") self.assert_eq(kser, pser) pser.name = None kser.name = None self.assertEqual(kser.name, None) self.assert_eq(kser, pser) pidx = pser.index kidx = kser.index pidx.name = "renamed" kidx.name = "renamed" self.assertEqual(kidx.name, "renamed") self.assert_eq(kidx, pidx) def test_rename_method(self): # Series name pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.rename("y"), pser.rename("y")) self.assertEqual(kser.name, "x") # no mutation self.assert_eq(kser.rename(), pser.rename()) kser.rename("z", inplace=True) pser.rename("z", inplace=True) self.assertEqual(kser.name, "z") self.assert_eq(kser, pser) # Series index # pser = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x') # kser = ks.from_pandas(s) # TODO: index # res = kser.rename(lambda x: x ** 2) # self.assert_eq(res, pser.rename(lambda x: x ** 2)) # res = kser.rename(pser) # self.assert_eq(res, pser.rename(pser)) # res = kser.rename(kser) # self.assert_eq(res, pser.rename(pser)) # res = kser.rename(lambda x: x**2, inplace=True) # self.assertis(res, kser) # s.rename(lambda x: x**2, inplace=True) # self.assert_eq(kser, pser) def test_or(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf["left"] | pdf["right"], kdf["left"] | kdf["right"]) def test_and(self): pdf = pd.DataFrame( { "left": [True, False, True, False, np.nan, np.nan, True, False, np.nan], "right": [True, False, False, True, True, False, np.nan, np.nan, np.nan], } ) kdf = ks.from_pandas(pdf) self.assert_eq(pdf["left"] & pdf["right"], kdf["left"] & kdf["right"]) def test_to_numpy(self): pser = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x") kser = ks.from_pandas(pser) np.testing.assert_equal(kser.to_numpy(), pser.values) def test_isin(self): pser = pd.Series(["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal") kser = ks.from_pandas(pser) self.assert_eq(kser.isin(["cow", "lama"]), pser.isin(["cow", "lama"])) self.assert_eq(kser.isin({"cow"}), pser.isin({"cow"})) msg = "only list-like objects are allowed to be passed to isin()" with self.assertRaisesRegex(TypeError, msg): kser.isin(1) def test_fillna(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.fillna(0), pser.fillna(0)) kser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(kser, pser) # test considering series does not have NA/NaN values kser.fillna(0, inplace=True) pser.fillna(0, inplace=True) self.assert_eq(kser, pser) def test_dropna(self): pser = pd.Series([np.nan, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.dropna(), pser.dropna()) kser.dropna(inplace=True) self.assert_eq(kser, pser.dropna()) def test_nunique(self): pser = pd.Series([1, 2, 1, np.nan]) kser = ks.from_pandas(pser) # Assert NaNs are dropped by default nunique_result = kser.nunique() self.assertEqual(nunique_result, 2) self.assert_eq(nunique_result, pser.nunique()) # Assert including NaN values nunique_result = kser.nunique(dropna=False) self.assertEqual(nunique_result, 3) self.assert_eq(nunique_result, pser.nunique(dropna=False)) # Assert approximate counts self.assertEqual(ks.Series(range(100)).nunique(approx=True), 103) self.assertEqual(ks.Series(range(100)).nunique(approx=True, rsd=0.01), 100) def _test_value_counts(self): # this is also containing test for Index & MultiIndex pser = pd.Series([1, 2, 1, 3, 3, np.nan, 1, 4], name="x") kser = ks.from_pandas(pser) exp = pser.value_counts() res = kser.value_counts() self.assertEqual(res.name, exp.name) self.assert_eq(res, exp, almost=True) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) with self.assertRaisesRegex( NotImplementedError, "value_counts currently does not support bins" ): kser.value_counts(bins=3) pser.name = "index" kser.name = "index" self.assert_eq(kser.value_counts(), pser.value_counts(), almost=True) # Series from DataFrame pdf = pd.DataFrame({"a": [1, 2, 3], "b": [None, 1, None]}) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.a.value_counts(normalize=True), pdf.a.value_counts(normalize=True), almost=True ) self.assert_eq( kdf.a.value_counts(ascending=True), pdf.a.value_counts(ascending=True), almost=True ) self.assert_eq( kdf.a.value_counts(normalize=True, dropna=False), pdf.a.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kdf.a.value_counts(ascending=True, dropna=False), pdf.a.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with NaN index pser = pd.Series([1, 2, 3], index=[2, None, 5]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c")]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex some of index has NaN pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", None), ("y", "c")]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) # Series with MultiIndex some of index is NaN. # This test only available for pandas >= 0.24. if LooseVersion(pd.__version__) >= LooseVersion("0.24"): pser.index = pd.MultiIndex.from_tuples([("x", "a"), None, ("y", "c")]) kser = ks.from_pandas(pser) self.assert_eq( kser.value_counts(normalize=True), pser.value_counts(normalize=True), almost=True ) self.assert_eq( kser.value_counts(ascending=True), pser.value_counts(ascending=True), almost=True ) self.assert_eq( kser.value_counts(normalize=True, dropna=False), pser.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.value_counts(ascending=True, dropna=False), pser.value_counts(ascending=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True), pser.index.value_counts(normalize=True), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True), pser.index.value_counts(ascending=True), almost=True, ) self.assert_eq( kser.index.value_counts(normalize=True, dropna=False), pser.index.value_counts(normalize=True, dropna=False), almost=True, ) self.assert_eq( kser.index.value_counts(ascending=True, dropna=False), pser.index.value_counts(ascending=True, dropna=False), almost=True, ) def test_value_counts(self): if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): self._test_value_counts() self.assertRaises( RuntimeError, lambda: ks.MultiIndex.from_tuples([("x", "a"), ("x", "b")]).value_counts(), ) else: self._test_value_counts() def test_nsmallest(self): sample_lst = [1, 2, 3, 4, np.nan, 6] pser = pd.Series(sample_lst, name="x") kser = ks.Series(sample_lst, name="x") self.assert_eq(kser.nsmallest(n=3), pser.nsmallest(n=3)) self.assert_eq(kser.nsmallest(), pser.nsmallest()) self.assert_eq((kser + 1).nsmallest(), (pser + 1).nsmallest()) def test_nlargest(self): sample_lst = [1, 2, 3, 4, np.nan, 6] pser = pd.Series(sample_lst, name="x") kser = ks.Series(sample_lst, name="x") self.assert_eq(kser.nlargest(n=3), pser.nlargest(n=3)) self.assert_eq(kser.nlargest(), pser.nlargest()) self.assert_eq((kser + 1).nlargest(), (pser + 1).nlargest()) def test_isnull(self): pser = pd.Series([1, 2, 3, 4, np.nan, 6], name="x") kser = ks.from_pandas(pser) self.assert_eq(kser.notnull(), pser.notnull()) self.assert_eq(kser.isnull(), pser.isnull()) pser = self.pser kser = self.kser self.assert_eq(kser.notnull(), pser.notnull()) self.assert_eq(kser.isnull(), pser.isnull()) def test_all(self): for pser in [ pd.Series([True, True], name="x"), pd.Series([True, False], name="x"), pd.Series([0, 1], name="x"), pd.Series([1, 2, 3], name="x"), pd.Series([True, True, None], name="x"), pd.Series([True, False, None], name="x"), pd.Series([], name="x"), pd.Series([np.nan], name="x"), ]: kser = ks.from_pandas(pser) self.assert_eq(kser.all(), pser.all()) pser = pd.Series([1, 2, 3, 4], name="x") kser = ks.from_pandas(pser) self.assert_eq((kser % 2 == 0).all(), (pser % 2 == 0).all()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kser.all(axis=1) def test_any(self): for pser in [ pd.Series([False, False], name="x"), pd.Series([True, False], name="x"), pd.Series([0, 1], name="x"), pd.Series([1, 2, 3], name="x"), pd.Series([True, True, None], name="x"), pd.Series([True, False, None], name="x"), pd.Series([], name="x"), pd.Series([np.nan], name="x"), ]: kser = ks.from_pandas(pser) self.assert_eq(kser.any(), pser.any()) pser = pd.Series([1, 2, 3, 4], name="x") kser = ks.from_pandas(pser) self.assert_eq((kser % 2 == 0).any(), (pser % 2 == 0).any()) with self.assertRaisesRegex( NotImplementedError, 'axis should be either 0 or "index" currently.' ): kser.any(axis=1) def test_reset_index_with_default_index_types(self): pser = pd.Series([1, 2, 3], name="0", index=np.random.rand(3)) kser = ks.from_pandas(pser) with ks.option_context("compute.default_index_type", "sequence"): self.assert_eq(kser.reset_index(), pser.reset_index()) with ks.option_context("compute.default_index_type", "distributed-sequence"): # the order might be changed. self.assert_eq(kser.reset_index().sort_index(), pser.reset_index()) with ks.option_context("compute.default_index_type", "distributed"): # the index is different. self.assert_eq( kser.reset_index().to_pandas().reset_index(drop=True), pser.reset_index() ) def test_sort_values(self): pser = pd.Series([1, 2, 3, 4, 5, None, 7], name="0") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.sort_values()), repr(pser.sort_values())) self.assert_eq( repr(kser.sort_values(ascending=False)), repr(pser.sort_values(ascending=False)) ) self.assert_eq( repr(kser.sort_values(na_position="first")), repr(pser.sort_values(na_position="first")) ) self.assertRaises(ValueError, lambda: kser.sort_values(na_position="invalid")) self.assert_eq(kser.sort_values(inplace=True), pser.sort_values(inplace=True)) self.assert_eq(repr(kser), repr(pser)) def test_sort_index(self): pser = pd.Series([2, 1, np.nan], index=["b", "a", np.nan], name="0") kser = ks.from_pandas(pser) # Assert invalid parameters self.assertRaises(NotImplementedError, lambda: kser.sort_index(axis=1)) self.assertRaises(NotImplementedError, lambda: kser.sort_index(kind="mergesort")) self.assertRaises(ValueError, lambda: kser.sort_index(na_position="invalid")) # Assert default behavior without parameters self.assert_eq(kser.sort_index(), pser.sort_index(), almost=True) # Assert sorting descending self.assert_eq( kser.sort_index(ascending=False), pser.sort_index(ascending=False), almost=True ) # Assert sorting NA indices first self.assert_eq( kser.sort_index(na_position="first"), pser.sort_index(na_position="first"), almost=True ) # Assert sorting inplace self.assertEqual(kser.sort_index(inplace=True), pser.sort_index(inplace=True)) self.assert_eq(kser, pser, almost=True) # Assert multi-indices pser = pd.Series(range(4), index=[["b", "b", "a", "a"], [1, 0, 1, 0]], name="0") kser = ks.from_pandas(pser) self.assert_eq(kser.sort_index(), pser.sort_index(), almost=True) self.assert_eq(kser.sort_index(level=[1, 0]), pser.sort_index(level=[1, 0]), almost=True) self.assert_eq(kser.reset_index().sort_index(), pser.reset_index().sort_index()) def test_to_datetime(self): pser = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100) kser = ks.from_pandas(pser) self.assert_eq( pd.to_datetime(pser, infer_datetime_format=True), ks.to_datetime(kser, infer_datetime_format=True), ) def test_missing(self): kser = self.kser missing_functions = inspect.getmembers(MissingPandasLikeSeries, inspect.isfunction) unsupported_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "unsupported_function" ] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kser, name)() deprecated_functions = [ name for (name, type_) in missing_functions if type_.__name__ == "deprecated_function" ] for name in deprecated_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*Series.*{}.*is deprecated".format(name) ): getattr(kser, name)() missing_properties = inspect.getmembers( MissingPandasLikeSeries, lambda o: isinstance(o, property) ) unsupported_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "unsupported_property" ] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Series.*{}.*not implemented( yet\\.|\\. .+)".format(name), ): getattr(kser, name) deprecated_properties = [ name for (name, type_) in missing_properties if type_.fget.__name__ == "deprecated_property" ] for name in deprecated_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*Series.*{}.*is deprecated".format(name) ): getattr(kser, name) def test_clip(self): pser = pd.Series([0, 2, 4], index=np.random.rand(3)) kser = ks.from_pandas(pser) # Assert list-like values are not accepted for 'lower' and 'upper' msg = "List-like value are not supported for 'lower' and 'upper' at the moment" with self.assertRaises(ValueError, msg=msg): kser.clip(lower=[1]) with self.assertRaises(ValueError, msg=msg): kser.clip(upper=[1]) # Assert no lower or upper self.assert_eq(kser.clip(), pser.clip()) # Assert lower only self.assert_eq(kser.clip(1), pser.clip(1)) # Assert upper only self.assert_eq(kser.clip(upper=3), pser.clip(upper=3)) # Assert lower and upper self.assert_eq(kser.clip(1, 3), pser.clip(1, 3)) # Assert behavior on string values str_kser = ks.Series(["a", "b", "c"]) self.assert_eq(str_kser.clip(1, 3), str_kser) def test_is_unique(self): # We can't use pandas' is_unique for comparison. pandas 0.23 ignores None pser = pd.Series([1, 2, 2, None, None]) kser = ks.from_pandas(pser) self.assertEqual(False, kser.is_unique) self.assertEqual(False, (kser + 1).is_unique) pser = pd.Series([1, None, None]) kser = ks.from_pandas(pser) self.assertEqual(False, kser.is_unique) self.assertEqual(False, (kser + 1).is_unique) pser = pd.Series([1]) kser = ks.from_pandas(pser) self.assertEqual(pser.is_unique, kser.is_unique) self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique) pser = pd.Series([1, 1, 1]) kser = ks.from_pandas(pser) self.assertEqual(pser.is_unique, kser.is_unique) self.assertEqual((pser + 1).is_unique, (kser + 1).is_unique) def test_to_list(self): if LooseVersion(pd.__version__) >= LooseVersion("0.24.0"): self.assertEqual(self.kser.to_list(), self.pser.to_list()) def test_append(self): pser1 = pd.Series([1, 2, 3], name="0") pser2 = pd.Series([4, 5, 6], name="0") pser3 = pd.Series([4, 5, 6], index=[3, 4, 5], name="0") kser1 = ks.from_pandas(pser1) kser2 = ks.from_pandas(pser2) kser3 = ks.from_pandas(pser3) self.assert_eq(kser1.append(kser2), pser1.append(pser2)) self.assert_eq(kser1.append(kser3), pser1.append(pser3)) self.assert_eq( kser1.append(kser2, ignore_index=True), pser1.append(pser2, ignore_index=True) ) kser1.append(kser3, verify_integrity=True) msg = "Indices have overlapping values" with self.assertRaises(ValueError, msg=msg): kser1.append(kser2, verify_integrity=True) def test_map(self): pser = pd.Series(["cat", "dog", None, "rabbit"]) kser = ks.from_pandas(pser) # Currently Koalas doesn't return NaN as Pandas does. self.assertEqual( repr(kser.map({})), repr(pser.map({}).replace({pd.np.nan: None}).rename(0)) ) d = defaultdict(lambda: "abc") self.assertTrue("abc" in repr(kser.map(d))) self.assertEqual(repr(kser.map(d)), repr(pser.map(d).rename(0))) def tomorrow(date) -> datetime: return date + timedelta(days=1) pser = pd.Series([datetime(2019, 10, 24)]) kser = ks.from_pandas(pser) self.assertEqual(repr(kser.map(tomorrow)), repr(pser.map(tomorrow).rename(0))) def test_add_prefix(self): pser = pd.Series([1, 2, 3, 4], name="0") kser = ks.from_pandas(pser) self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_")) pser = pd.Series( [1, 2, 3], name="0", index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]), ) kser = ks.from_pandas(pser) self.assert_eq(pser.add_prefix("item_"), kser.add_prefix("item_")) def test_add_suffix(self): pser = pd.Series([1, 2, 3, 4], name="0") kser = ks.from_pandas(pser) self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item")) pser = pd.Series( [1, 2, 3], name="0", index=pd.MultiIndex.from_tuples([("A", "X"), ("A", "Y"), ("B", "X")]), ) kser = ks.from_pandas(pser) self.assert_eq(pser.add_suffix("_item"), kser.add_suffix("_item")) def test_pandas_wraps(self): # This test checks the return column name of `isna()`. Previously it returned the column # name as its internal expression which contains, for instance, '`f(x)`' in the middle of # column name which currently cannot be recognized in PySpark. @ks.pandas_wraps def f(x) -> ks.Series[int]: return 2 * x df = ks.DataFrame({"x": [1, None]}) self.assert_eq(f(df["x"]).isna(), pd.Series([False, True]).rename("f(x)")) def test_hist(self): pdf = pd.DataFrame( {"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10] ) kdf = ks.from_pandas(pdf) def plot_to_base64(ax): bytes_data = BytesIO() ax.figure.savefig(bytes_data, format="png") bytes_data.seek(0) b64_data = base64.b64encode(bytes_data.read()) plt.close(ax.figure) return b64_data _, ax1 = plt.subplots(1, 1) # Using plot.hist() because pandas changes ticks props when called hist() ax1 = pdf["a"].plot.hist() _, ax2 = plt.subplots(1, 1) ax2 = kdf["a"].hist() self.assert_eq(plot_to_base64(ax1), plot_to_base64(ax2)) def test_cummin(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummin()), repr(kser.cummin())) self.assertEqual(repr(pser.cummin(skipna=False)), repr(kser.cummin(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummin()), repr(kser.cummin())) self.assertEqual(repr(pser.cummin(skipna=False)), repr(kser.cummin(skipna=False))) def test_cummax(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummax()), repr(kser.cummax())) self.assertEqual(repr(pser.cummax(skipna=False)), repr(kser.cummax(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cummax()), repr(kser.cummax())) self.assertEqual(repr(pser.cummax(skipna=False)), repr(kser.cummax(skipna=False))) def test_cumsum(self): pser = pd.Series([1.0, None, 0.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumsum()), repr(kser.cumsum())) self.assertEqual(repr(pser.cumsum(skipna=False)), repr(kser.cumsum(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumsum()), repr(kser.cumsum())) self.assertEqual(repr(pser.cumsum(skipna=False)), repr(kser.cumsum(skipna=False))) def test_cumprod(self): pser = pd.Series([1.0, None, 1.0, 4.0, 9.0]).rename("a") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumprod()), repr(kser.cumprod())) self.assertEqual(repr(pser.cumprod(skipna=False)), repr(kser.cumprod(skipna=False))) # with reversed index pser.index = [4, 3, 2, 1, 0] kser = ks.from_pandas(pser) self.assertEqual(repr(pser.cumprod()), repr(kser.cumprod())) self.assertEqual(repr(pser.cumprod(skipna=False)), repr(kser.cumprod(skipna=False))) with self.assertRaisesRegex(Exception, "values should be bigger than 0"): repr(ks.Series([0, 1]).cumprod()) def test_median(self): with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).median(accuracy="a") def test_rank(self): pser = pd.Series([1, 2, 3, 1], name="x") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.rank()), repr(kser.rank().sort_index())) self.assertEqual(repr(pser.rank()), repr(kser.rank().sort_index())) self.assertEqual( repr(pser.rank(ascending=False)), repr(kser.rank(ascending=False).sort_index()) ) self.assertEqual(repr(pser.rank(method="min")), repr(kser.rank(method="min").sort_index())) self.assertEqual(repr(pser.rank(method="max")), repr(kser.rank(method="max").sort_index())) self.assertEqual( repr(pser.rank(method="first")), repr(kser.rank(method="first").sort_index()) ) self.assertEqual( repr(pser.rank(method="dense")), repr(kser.rank(method="dense").sort_index()) ) msg = "method must be one of 'average', 'min', 'max', 'first', 'dense'" with self.assertRaisesRegex(ValueError, msg): kser.rank(method="nothing") def test_round(self): pser = pd.Series([0.028208, 0.038683, 0.877076], name="x") kser = ks.from_pandas(pser) self.assertEqual(repr(pser.round(2)), repr(kser.round(2))) msg = "decimals must be an integer" with self.assertRaisesRegex(ValueError, msg): kser.round(1.5) def test_quantile(self): with self.assertRaisesRegex(ValueError, "accuracy must be an integer; however"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(accuracy="a") with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q="a") with self.assertRaisesRegex(ValueError, "q must be a float of an array of floats;"): ks.Series([24.0, 21.0, 25.0, 33.0, 26.0]).quantile(q=["a"]) def test_idxmax(self): pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"]) kser = ks.Series(pser) self.assertEqual(kser.idxmax(), pser.idxmax()) self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False)) index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second") ) pser = pd.Series(data=[1, 2, 4, 5], index=index) kser = ks.Series(pser) self.assertEqual(kser.idxmax(), pser.idxmax()) self.assertEqual(kser.idxmax(skipna=False), pser.idxmax(skipna=False)) kser = ks.Series([]) with self.assertRaisesRegex(ValueError, "an empty sequence"): kser.idxmax() pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) kser = ks.Series(pser) self.assertEqual(kser.idxmax(), pser.idxmax()) self.assertEqual(repr(kser.idxmax(skipna=False)), repr(pser.idxmax(skipna=False))) def test_idxmin(self): pser = pd.Series(data=[1, 4, 5], index=["A", "B", "C"]) kser = ks.Series(pser) self.assertEqual(kser.idxmin(), pser.idxmin()) self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False)) index = pd.MultiIndex.from_arrays( [["a", "a", "b", "b"], ["c", "d", "e", "f"]], names=("first", "second") ) pser = pd.Series(data=[1, 2, 4, 5], index=index) kser = ks.Series(pser) self.assertEqual(kser.idxmin(), pser.idxmin()) self.assertEqual(kser.idxmin(skipna=False), pser.idxmin(skipna=False)) kser = ks.Series([]) with self.assertRaisesRegex(ValueError, "an empty sequence"): kser.idxmin() pser = pd.Series([1, 100, None, 100, 1, 100], index=[10, 3, 5, 2, 1, 8]) kser = ks.Series(pser) self.assertEqual(kser.idxmin(), pser.idxmin()) self.assertEqual(repr(kser.idxmin(skipna=False)), repr(pser.idxmin(skipna=False))) def test_shift(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) if LooseVersion(pd.__version__) < LooseVersion("0.24.2"): self.assertEqual(repr(kser.shift(periods=2)), repr(pser.shift(periods=2))) else: self.assertEqual( repr(kser.shift(periods=2, fill_value=0)), repr(pser.shift(periods=2, fill_value=0)) ) with self.assertRaisesRegex(ValueError, "periods should be an int; however"): kser.shift(periods=1.5) def test_astype(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(int), pser.astype(int)) self.assert_eq(kser.astype(bool), pser.astype(bool)) pser = pd.Series([10, 20, 15, 30, 45, None, np.nan], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(bool), pser.astype(bool)) pser = pd.Series(["hi", "hi ", " ", " \t", "", None], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(bool), pser.astype(bool)) self.assert_eq(kser.str.strip().astype(bool), pser.str.strip().astype(bool)) pser = pd.Series([True, False, None], name="x") kser = ks.Series(pser) self.assert_eq(kser.astype(bool), pser.astype(bool)) with self.assertRaisesRegex(ValueError, "Type int63 not understood"): kser.astype("int63") def test_aggregate(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) msg = "func must be a string or list of strings" with self.assertRaisesRegex(ValueError, msg): kser.aggregate({"x": ["min", "max"]}) msg = ( "If the given function is a list, it " "should only contains function names as strings." ) with self.assertRaisesRegex(ValueError, msg): kser.aggregate(["min", max]) def test_drop(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) msg = "Need to specify at least one of 'labels' or 'index'" with self.assertRaisesRegex(ValueError, msg): kser.drop() # For MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) msg = "'level' should be less than the number of indexes" with self.assertRaisesRegex(ValueError, msg): kser.drop(labels="weight", level=2) msg = ( "If the given index is a list, it " "should only contains names as strings, " "or a list of tuples that contain " "index names as strings" ) with self.assertRaisesRegex(ValueError, msg): kser.drop(["lama", ["cow", "falcon"]]) msg = "'index' type should be one of str, list, tuple" with self.assertRaisesRegex(ValueError, msg): kser.drop({"lama": "speed"}) msg = "Cannot specify both 'labels' and 'index'" with self.assertRaisesRegex(ValueError, msg): kser.drop("lama", index="cow") msg = r"'Key length \(2\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): kser.drop(("lama", "speed", "x")) self.assert_eq(kser.drop(("lama", "speed", "x"), level=1), kser) def test_pop(self): midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.pop(("lama", "speed")), pser.pop(("lama", "speed"))) msg = "'key' should be string or tuple that contains strings" with self.assertRaisesRegex(ValueError, msg): kser.pop(0) msg = ( "'key' should have index names as only strings " "or a tuple that contain index names as only strings" ) with self.assertRaisesRegex(ValueError, msg): kser.pop(("lama", 0)) msg = r"'Key length \(3\) exceeds index depth \(2\)'" with self.assertRaisesRegex(KeyError, msg): kser.pop(("lama", "speed", "x")) def test_replace(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) self.assert_eq(kser.replace(), pser.replace()) self.assert_eq(kser.replace({}), pser.replace({})) msg = "'to_replace' should be one of str, list, dict, int, float" with self.assertRaisesRegex(ValueError, msg): kser.replace(ks.range(5)) msg = "Replacement lists must match in length. Expecting 3 got 2" with self.assertRaisesRegex(ValueError, msg): kser.replace([10, 20, 30], [1, 2]) msg = "replace currently not support for regex" with self.assertRaisesRegex(NotImplementedError, msg): kser.replace(r"^1.$", regex=True) def test_xs(self): midx = pd.MultiIndex( [["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.xs(("a", "lama", "speed")), pser.xs(("a", "lama", "speed"))) def test_duplicates(self): psers = { "test on texts": pd.Series( ["lama", "cow", "lama", "beetle", "lama", "hippo"], name="animal" ), "test on numbers": pd.Series([1, 1, 2, 4, 3]), } keeps = ["first", "last", False] for (msg, pser), keep in product(psers.items(), keeps): with self.subTest(msg, keep=keep): kser = ks.Series(pser) self.assert_eq( pser.drop_duplicates(keep=keep).sort_values(), kser.drop_duplicates(keep=keep).sort_values(), ) def test_update(self): pser = pd.Series([10, 20, 15, 30, 45], name="x") kser = ks.Series(pser) msg = "'other' must be a Series" with self.assertRaisesRegex(ValueError, msg): kser.update(10) def test_where(self): pser1 = pd.Series([0, 1, 2, 3, 4], name=0) kser1 = ks.from_pandas(pser1) self.assert_eq(repr(pser1.where(pser1 > 3)), repr(kser1.where(kser1 > 3).sort_index())) def test_mask(self): pser1 = pd.Series([0, 1, 2, 3, 4], name=0) kser1 = ks.from_pandas(pser1) self.assert_eq(repr(pser1.mask(pser1 > 3)), repr(kser1.mask(kser1 > 3).sort_index())) def test_truncate(self): pser1 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7]) kser1 = ks.Series(pser1) pser2 = pd.Series([10, 20, 30, 40, 50, 60, 70], index=[7, 6, 5, 4, 3, 2, 1]) kser2 = ks.Series(pser2) self.assert_eq(kser1.truncate(), pser1.truncate()) self.assert_eq(kser1.truncate(before=2), pser1.truncate(before=2)) self.assert_eq(kser1.truncate(after=5), pser1.truncate(after=5)) self.assert_eq(kser1.truncate(copy=False), pser1.truncate(copy=False)) self.assert_eq(kser1.truncate(2, 5, copy=False), pser1.truncate(2, 5, copy=False)) self.assert_eq(kser2.truncate(4, 6), pser2.truncate(4, 6)) self.assert_eq(kser2.truncate(4, 6, copy=False), pser2.truncate(4, 6, copy=False)) kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 3, 2, 1]) msg = "truncate requires a sorted index" with self.assertRaisesRegex(ValueError, msg): kser.truncate() kser = ks.Series([10, 20, 30, 40, 50, 60, 70], index=[1, 2, 3, 4, 5, 6, 7]) msg = "Truncate: 2 must be after 5" with self.assertRaisesRegex(ValueError, msg): kser.truncate(5, 2) def test_getitem(self): pser = pd.Series([10, 20, 15, 30, 45], ["A", "A", "B", "C", "D"]) kser = ks.Series(pser) self.assert_eq(kser["A"], pser["A"]) self.assert_eq(kser["B"], pser["B"]) self.assert_eq(kser[kser > 15], pser[pser > 15]) # for MultiIndex midx = pd.MultiIndex( [["a", "b", "c"], ["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 0, 0, 0, 1, 2, 0, 1, 2]], ) pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], name="0", index=midx) kser = ks.Series(pser) self.assert_eq(kser["a"], pser["a"]) self.assert_eq(kser["a", "lama"], pser["a", "lama"]) self.assert_eq(kser[kser > 1.5], pser[pser > 1.5]) msg = r"'Key length \(4\) exceeds index depth \(3\)'" with self.assertRaisesRegex(KeyError, msg): kser[("a", "lama", "speed", "x")] def test_keys(self): midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.keys(), pser.keys()) def test_index(self): # to check setting name of Index properly. idx = pd.Index([1, 2, 3, 4, 5, 6, 7, 8, 9]) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=idx) pser = kser.to_pandas() kser.name = "koalas" pser.name = "koalas" self.assert_eq(kser.index.name, pser.index.name) # for check setting names of MultiIndex properly. kser.names = ["hello", "koalas"] pser.names = ["hello", "koalas"] self.assert_eq(kser.index.names, pser.index.names) def test_pct_change(self): kser = ks.Series([90, 91, 85], index=[2, 4, 1]) pser = kser.to_pandas() self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), almost=True) self.assert_eq( kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000), almost=True ) self.assert_eq( kser.pct_change(periods=100000000), pser.pct_change(periods=100000000), almost=True ) # for MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_eq(kser.pct_change(), pser.pct_change(), almost=True) self.assert_eq(kser.pct_change(periods=2), pser.pct_change(periods=2), almost=True) self.assert_eq(kser.pct_change(periods=-1), pser.pct_change(periods=-1), almost=True) self.assert_eq( kser.pct_change(periods=-100000000), pser.pct_change(periods=-100000000), almost=True ) self.assert_eq( kser.pct_change(periods=100000000), pser.pct_change(periods=100000000), almost=True ) def test_axes(self): kser = ks.Series([90, 91, 85], index=[2, 4, 1]) pser = kser.to_pandas() self.assert_list_eq(kser.axes, pser.axes) # for MultiIndex midx = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx) pser = kser.to_pandas() self.assert_list_eq(kser.axes, pser.axes) def test_combine_first(self): kser1 = ks.Series({"falcon": 330.0, "eagle": 160.0}) kser2 = ks.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0}) pser1 = kser1.to_pandas() pser2 = kser2.to_pandas() self.assert_eq( repr(kser1.combine_first(kser2).sort_index()), repr(pser1.combine_first(pser2).sort_index()), ) with self.assertRaisesRegex( ValueError, "`combine_first` only allows `Series` for parameter `other`" ): kser1.combine_first(50) # MultiIndex midx1 = pd.MultiIndex( [["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]], [[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]], ) midx2 = pd.MultiIndex( [["lama", "cow", "falcon"], ["speed", "weight", "length"]], [[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]], ) kser1 = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1) kser2 = ks.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2) pser1 = kser1.to_pandas() pser2 = kser2.to_pandas() self.assert_eq( repr(kser1.combine_first(kser2).sort_index()), repr(pser1.combine_first(pser2).sort_index()), ) # Series come from same DataFrame kdf = ks.DataFrame( { "A": {"falcon": 330.0, "eagle": 160.0}, "B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0}, } ) kser1 = kdf.A kser2 = kdf.B pser1 = kser1.to_pandas() pser2 = kser2.to_pandas() self.assert_eq( repr(kser1.combine_first(kser2).sort_index()), repr(pser1.combine_first(pser2).sort_index()), ) def test_udt(self): sparse_values = {0: 0.1, 1: 1.1} sparse_vector = SparseVector(len(sparse_values), sparse_values) pser = pd.Series([sparse_vector]) if LooseVersion(pyspark.__version__) < LooseVersion("2.4"): with self.sql_conf({"spark.sql.execution.arrow.enabled": False}): kser = ks.from_pandas(pser) self.assert_eq(kser, pser) else: kser = ks.from_pandas(pser) self.assert_eq(kser, pser) def test_repeat(self): pser = pd.Series(["a", "b", "c"], name="0", index=np.random.rand(3)) kser = ks.from_pandas(pser) self.assert_eq(kser.repeat(3).sort_index(), pser.repeat(3).sort_index()) self.assert_eq(kser.repeat(0).sort_index(), pser.repeat(0).sort_index()) self.assertRaises(ValueError, lambda: kser.repeat(-1)) self.assertRaises(ValueError, lambda: kser.repeat("abc")) def test_take(self): pser = pd.Series([100, 200, 300, 400, 500], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(kser.take([0, 2, 4]).sort_values(), pser.take([0, 2, 4]).sort_values()) self.assert_eq( kser.take(range(0, 5, 2)).sort_values(), pser.take(range(0, 5, 2)).sort_values() ) self.assert_eq(kser.take([-4, -2, 0]).sort_values(), pser.take([-4, -2, 0]).sort_values()) self.assert_eq( kser.take(range(-2, 1, 2)).sort_values(), pser.take(range(-2, 1, 2)).sort_values() ) # Checking the type of indices. self.assertRaises(ValueError, lambda: kser.take(1)) self.assertRaises(ValueError, lambda: kser.take("1")) self.assertRaises(ValueError, lambda: kser.take({1, 2})) self.assertRaises(ValueError, lambda: kser.take({1: None, 2: None})) def test_divmod(self): pser = pd.Series([100, None, 300, None, 500], name="Koalas") kser = ks.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(repr(kser.divmod(-100)), repr(pser.divmod(-100))) self.assert_eq(repr(kser.divmod(100)), repr(pser.divmod(100))) elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"): expected_result = repr((pser.floordiv(-100), pser.mod(-100))) self.assert_eq(repr(kser.divmod(-100)), expected_result) expected_result = repr((pser.floordiv(100), pser.mod(100))) self.assert_eq(repr(kser.divmod(100)), expected_result) def test_rdivmod(self): pser = pd.Series([100, None, 300, None, 500], name="Koalas") kser = ks.from_pandas(pser) if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(repr(kser.rdivmod(-100)), repr(pser.rdivmod(-100))) self.assert_eq(repr(kser.rdivmod(100)), repr(pser.rdivmod(100))) elif LooseVersion(pd.__version__) < LooseVersion("1.0.0"): expected_result = repr((pser.rfloordiv(-100), pser.rmod(-100))) self.assert_eq(repr(kser.rdivmod(-100)), expected_result) expected_result = repr((pser.rfloordiv(100), pser.rmod(100))) self.assert_eq(repr(kser.rdivmod(100)), expected_result) def test_mod(self): pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.mod(-150)), repr(pser.mod(-150))) self.assert_eq(repr(kser.mod(0)), repr(pser.mod(0))) self.assert_eq(repr(kser.mod(150)), repr(pser.mod(150))) pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6}) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.a.mod(kdf.b)), repr(pdf.a.mod(pdf.b).rename("a"))) def test_rmod(self): pser = pd.Series([100, None, -300, None, 500, -700], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.rmod(-150)), repr(pser.rmod(-150))) self.assert_eq(repr(kser.rmod(0)), repr(pser.rmod(0))) self.assert_eq(repr(kser.rmod(150)), repr(pser.rmod(150))) pdf = pd.DataFrame({"a": [100, None, -300, None, 500, -700], "b": [150] * 6}) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.a.rmod(kdf.b)), repr(pdf.a.rmod(pdf.b).rename("a"))) def test_asof(self): pser = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(kser.asof(20)), repr(pser.asof(20))) self.assert_eq(repr(kser.asof([5, 20]).sort_index()), repr(pser.asof([5, 20]).sort_index())) self.assert_eq(repr(kser.asof(100)), repr(pser.asof(100))) self.assert_eq(repr(kser.asof(-100)), repr(pser.asof(-100))) self.assert_eq(repr(kser.asof(-100)), repr(pser.asof(-100))) self.assert_eq( repr(kser.asof([-100, 100]).sort_index()), repr(pser.asof([-100, 100]).sort_index()) ) # where cannot be an Index, Series or a DataFrame self.assertRaises(ValueError, lambda: kser.asof(ks.Index([-100, 100]))) self.assertRaises(ValueError, lambda: kser.asof(ks.Series([-100, 100]))) self.assertRaises(ValueError, lambda: kser.asof(ks.DataFrame({"A": [1, 2, 3]}))) # asof is not supported for a MultiIndex pser.index = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("y", "d")]) kser = ks.from_pandas(pser) self.assertRaises(ValueError, lambda: kser.asof(20)) # asof requires a sorted index (More precisely, should be a monotonic increasing) kser = ks.Series([1, 2, np.nan, 4], index=[10, 30, 20, 40], name="Koalas") self.assertRaises(ValueError, lambda: kser.asof(20)) kser = ks.Series([1, 2, np.nan, 4], index=[40, 30, 20, 10], name="Koalas") self.assertRaises(ValueError, lambda: kser.asof(20)) def test_squeeze(self): # Single value kser = ks.Series([90]) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) # Single value with MultiIndex midx = pd.MultiIndex.from_tuples([("a", "b", "c")]) kser = ks.Series([90], index=midx) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) # Multiple values kser = ks.Series([90, 91, 85]) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) # Multiple values with MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kser = ks.Series([90, 91, 85], index=midx) pser = kser.to_pandas() self.assert_eq(kser.squeeze(), pser.squeeze()) def test_div_zero_and_nan(self): pser = pd.Series([100, None, -300, None, 500, -700, np.inf, -np.inf], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(repr(pser.div(0)), repr(kser.div(0))) self.assert_eq(repr(pser.truediv(0)), repr(kser.truediv(0))) self.assert_eq(repr(pser / 0), repr(kser / 0)) self.assert_eq(repr(pser.div(np.nan)), repr(kser.div(np.nan))) self.assert_eq(repr(pser.truediv(np.nan)), repr(kser.truediv(np.nan))) self.assert_eq(repr(pser / np.nan), repr(kser / np.nan)) # floordiv has different behavior in pandas > 1.0.0 when divide by 0 if LooseVersion(pd.__version__) >= LooseVersion("1.0.0"): self.assert_eq(repr(pser.floordiv(0)), repr(kser.floordiv(0))) self.assert_eq(repr(pser // 0), repr(kser // 0)) else: result = pd.Series( [np.inf, np.nan, -np.inf, np.nan, np.inf, -np.inf, np.inf, -np.inf], name="Koalas" ) self.assert_eq(repr(kser.floordiv(0)), repr(result)) self.assert_eq(repr(kser // 0), repr(result)) self.assert_eq(repr(pser.floordiv(np.nan)), repr(kser.floordiv(np.nan))) def test_mad(self): pser = pd.Series([1, 2, 3, 4], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) pser = pd.Series([None, -2, 5, 10, 50, np.nan, -20], name="Koalas") kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) pmidx = pd.MultiIndex.from_tuples( [("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")] ) pser = pd.Series([1, 2, 3, 4, 5], name="Koalas") pser.index = pmidx kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) pmidx = pd.MultiIndex.from_tuples( [("a", "1"), ("a", "2"), ("b", "1"), ("b", "2"), ("c", "1")] ) pser = pd.Series([None, -2, 5, 50, np.nan], name="Koalas") pser.index = pmidx kser = ks.from_pandas(pser) self.assert_eq(pser.mad(), kser.mad()) def test_to_frame(self): kser = ks.Series(["a", "b", "c"]) pser = kser.to_pandas() self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a")) # for MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kser = ks.Series(["a", "b", "c"], index=midx) pser = kser.to_pandas() self.assert_eq(pser.to_frame(name="a"), kser.to_frame(name="a")) def test_shape(self): kser = ks.Series(["a", "b", "c"]) pser = kser.to_pandas() self.assert_eq(pser.shape, kser.shape) # for MultiIndex midx = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y"), ("c", "z")]) kser = ks.Series(["a", "b", "c"], index=midx) pser = kser.to_pandas() self.assert_eq(pser.shape, kser.shape) def test_unstack(self): pser = pd.Series( [10, -2, 4, 7], index=pd.MultiIndex.from_tuples( [("one", "a", "z"), ("one", "b", "x"), ("two", "a", "c"), ("two", "b", "v")] ), ) kser = ks.from_pandas(pser) levels = [-3, -2, -1, 0, 1, 2] for level in levels: self.assert_eq(pser.unstack(level=level), kser.unstack(level=level).sort_index()) # Exceeding the range of level self.assertRaises(IndexError, lambda: kser.unstack(level=3)) self.assertRaises(IndexError, lambda: kser.unstack(level=-4)) # Only support for MultiIndex kser = ks.Series([10, -2, 4, 7]) self.assertRaises(ValueError, lambda: kser.unstack()) def test_item(self): kser = ks.Series([10, 20]) self.assertRaises(ValueError, lambda: kser.item())
1
15,237
Shall we keep this test?
databricks-koalas
py
@@ -102,12 +102,15 @@ class Tx_Solr_Backend_IndexingConfigurationSelectorField { /** * Renders a field to select which indexing configurations to initialize. * - * Uses TCEforms. + * Uses \TYPO3\CMS\Backend\Form\FormEngine. * * @return string Markup for the select field */ public function render() { - $tceForm = t3lib_div::makeInstance('t3lib_TCEforms'); + $selectFieldRenderer = $formEngine = \TYPO3\CMS\Core\Utility\GeneralUtility::makeInstance('TYPO3\\CMS\\Backend\\Form\\FormEngine'); + if (class_exists('TYPO3\\CMS\Backend\\Form\\Element\\SelectElement')) { + $selectFieldRenderer = \TYPO3\CMS\Core\Utility\GeneralUtility::makeInstance('TYPO3\\CMS\Backend\\Form\\Element\\SelectElement', $formEngine); + } // transform selected values into the format used by TCEforms $selectedValues = array();
1
<?php /*************************************************************** * Copyright notice * * (c) 2013 Ingo Renner <[email protected]> * All rights reserved * * This script is part of the TYPO3 project. The TYPO3 project is * free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The GNU General Public License can be found at * http://www.gnu.org/copyleft/gpl.html. * * This script is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * This copyright notice MUST APPEAR in all copies of the script! ***************************************************************/ /** * Index Queue indexing configuration selector form field. * * @author Ingo Renner <[email protected]> * @package TYPO3 * @subpackage solr */ class Tx_Solr_Backend_IndexingConfigurationSelectorField { /** * Site used to determine indexing configurations * * @var Tx_Solr_Site */ protected $site; /** * Form element name * * @var string */ protected $formElementName = 'tx_solr-index-queue-indexing-configuration-selector'; /** * Selected values * * @var array */ protected $selectedValues = array(); /** * Constructor * * @param Tx_Solr_Site $site The site to use to determine indexing configurations */ public function __construct(Tx_Solr_Site $site = NULL) { $this->site = $site; } /** * Sets the form element name. * * @param string $formElementName Form element name */ public function setFormElementName($formElementName) { $this->formElementName = $formElementName; } /** * Gets the form element name. * * @return string form element name */ public function getFormElementName() { return $this->formElementName; } /** * Sets the selected values. * * @param array $selectedValues */ public function setSelectedValues(array $selectedValues) { $this->selectedValues = $selectedValues; } /** * Gets the selected values. * * @return array */ public function getSelectedValues() { return $this->selectedValues; } /** * Renders a field to select which indexing configurations to initialize. * * Uses TCEforms. * * @return string Markup for the select field */ public function render() { $tceForm = t3lib_div::makeInstance('t3lib_TCEforms'); // transform selected values into the format used by TCEforms $selectedValues = array(); foreach ($this->selectedValues as $selectedValue) { $selectedValues[] = $selectedValue . '|1'; } $selectedValues = implode(',', $selectedValues); $PA = array( 'fieldChangeFunc' => array(), 'itemFormElName' => $this->formElementName, 'itemFormElValue' => $selectedValues ); $tablesToIndex = $this->getIndexQueueConfigurationTableMap(); $formField = $tceForm->getSingleField_typeSelect_checkbox( '', // table '', // field '', // row $PA, // array with additional configuration options array(), // config, $this->buildSelectorItems($tablesToIndex), // items '' // Label for no-matching-value ); // need to wrap the field in a TCEforms table to make the CSS apply $form = ' <table class="typo3-TCEforms tx_solr-TCEforms"> <tr> <td>' . "\n" . $formField . "\n" . '</td> </tr> </table> '; return $form; } /** * Builds a map of indexing configuration names to tables to to index. * * @return array Indexing configuration to database table map */ protected function getIndexQueueConfigurationTableMap() { $indexingTableMap = array(); $solrConfiguration = Tx_Solr_Util::getSolrConfigurationFromPageId($this->site->getRootPageId()); foreach ($solrConfiguration['index.']['queue.'] as $name => $configuration) { if (is_array($configuration)) { $name = substr($name, 0, -1); if ($solrConfiguration['index.']['queue.'][$name]) { $table = $name; if ($solrConfiguration['index.']['queue.'][$name . '.']['table']) { $table = $solrConfiguration['index.']['queue.'][$name . '.']['table']; } $indexingTableMap[$name] = $table; } } } return $indexingTableMap; } /** * Builds the items to render in the TCEforms select field. * * @param array $tablesToIndex A map of indexing configuration to database tables * @return array Selectable items for the TCEforms select field */ protected function buildSelectorItems(array $tablesToIndex) { $selectorItems = array(); foreach ($tablesToIndex as $configurationName => $tableName) { $icon = 'tcarecords-' . $tableName . '-default'; if ($tableName == 'pages') { $icon = 'apps-pagetree-page-default'; } $labelTableName = ''; if ($configurationName != $tableName) { $labelTableName = ' (' . $tableName . ')'; } $selectorItems[] = array( $configurationName . $labelTableName, $configurationName, $icon ); } return $selectorItems; } } if (defined('TYPO3_MODE') && $GLOBALS['TYPO3_CONF_VARS'][TYPO3_MODE]['XCLASS']['ext/solr/Classes/backend/IndexingConfigurationSelectorField.php']) { include_once($GLOBALS['TYPO3_CONF_VARS'][TYPO3_MODE]['XCLASS']['ext/solr/Classes/backend/IndexingConfigurationSelectorField.php']); } ?>
1
5,454
When would this be the case? / When would SelectElement not be available? Is this change compatible with 6.2? (I'd like to roll a release that works with 6.2+ first, before moving on to 7.x)
TYPO3-Solr-ext-solr
php
@@ -257,6 +257,10 @@ public abstract class DataType implements Serializable { + ((long) (readBuffer[7] & 255) << 56)); } + public boolean isSameCatalog(DataType other) { + return false; + } + // all data should be read in little endian. public TiChunkColumnVector decodeChunkColumn(CodecDataInput cdi) { int numRows = readIntLittleEndian(cdi);
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv.types; import static com.pingcap.tikv.codec.Codec.isNullFlag; import static java.nio.ByteOrder.LITTLE_ENDIAN; import static java.util.Objects.requireNonNull; import com.google.common.collect.ImmutableList; import com.pingcap.tidb.tipb.ExprType; import com.pingcap.tikv.codec.Codec; import com.pingcap.tikv.codec.CodecDataInput; import com.pingcap.tikv.codec.CodecDataOutput; import com.pingcap.tikv.columnar.TiChunkColumnVector; import com.pingcap.tikv.exception.ConvertNotSupportException; import com.pingcap.tikv.exception.ConvertOverflowException; import com.pingcap.tikv.exception.TypeException; import com.pingcap.tikv.meta.Collation; import com.pingcap.tikv.meta.TiColumnInfo; import com.pingcap.tikv.meta.TiColumnInfo.InternalTypeHolder; import java.io.Serializable; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; /** Base Type for encoding and decoding TiDB row information. */ public abstract class DataType implements Serializable { private static final Long MaxInt8 = (1L << 7) - 1; private static final Long MinInt8 = -1L << 7; private static final Long MaxInt16 = (1L << 15) - 1; private static final Long MinInt16 = -1L << 15; private static final Long MaxInt24 = (1L << 23) - 1; private static final Long MinInt24 = -1L << 23; private static final Long MaxInt32 = (1L << 31) - 1; private static final Long MinInt32 = -1L << 31; private static final Long MaxInt64 = (1L << 63) - 1; private static final Long MinInt64 = -1L << 63; private static final Long MaxUint8 = (1L << 8) - 1; private static final Long MaxUint16 = (1L << 16) - 1; private static final Long MaxUint24 = (1L << 24) - 1; private static final Long MaxUint32 = (1L << 32) - 1; private static final Long MaxUint64 = -1L; // Flag Information for strict mysql type public static final int NotNullFlag = 1; /* Field can't be NULL */ public static final int PriKeyFlag = 2; /* Field is part of a primary key */ public static final int UniqueKeyFlag = 4; /* Field is part of a unique key */ public static final int MultipleKeyFlag = 8; /* Field is part of a key */ public static final int BlobFlag = 16; /* Field is a blob */ public static final int UnsignedFlag = 32; /* Field is unsigned */ public static final int ZerofillFlag = 64; /* Field is zerofill */ public static final int BinaryFlag = 128; /* Field is binary */ public static final int EnumFlag = 256; /* Field is an enum */ public static final int AutoIncrementFlag = 512; /* Field is an auto increment field */ public static final int TimestampFlag = 1024; /* Field is a timestamp */ public static final int SetFlag = 2048; /* Field is a set */ public static final int NoDefaultValueFlag = 4096; /* Field doesn't have a default value */ public static final int OnUpdateNowFlag = 8192; /* Field is set to NOW on UPDATE */ public static final int NumFlag = 32768; /* Field is a num (for clients) */ public static final long COLUMN_VERSION_FLAG = 1; public DataType(MySQLType tp, int prec, int scale) { this.tp = tp; this.flag = 0; this.elems = ImmutableList.of(); this.length = prec; this.decimal = scale; this.charset = ""; this.collation = Collation.DEF_COLLATION_CODE; } public enum EncodeType { KEY, VALUE, PROTO } public static final int UNSPECIFIED_LEN = -1; // MySQL type protected final MySQLType tp; // Not Encode/Decode flag, this is used to strict mysql type // such as not null, timestamp protected final int flag; protected final int decimal; private final String charset; protected final int collation; protected final long length; private final List<String> elems; protected DataType(TiColumnInfo.InternalTypeHolder holder) { this.tp = MySQLType.fromTypeCode(holder.getTp()); this.flag = holder.getFlag(); this.length = holder.getFlen(); this.decimal = holder.getDecimal(); this.charset = holder.getCharset(); this.collation = Collation.translate(holder.getCollate()); this.elems = holder.getElems() == null ? ImmutableList.of() : holder.getElems(); } protected DataType(MySQLType type) { this.tp = type; this.flag = 0; this.elems = ImmutableList.of(); this.length = UNSPECIFIED_LEN; this.decimal = UNSPECIFIED_LEN; this.charset = ""; this.collation = Collation.DEF_COLLATION_CODE; } protected DataType( MySQLType type, int flag, int len, int decimal, String charset, int collation) { this.tp = type; this.flag = flag; this.elems = ImmutableList.of(); this.length = len; this.decimal = decimal; this.charset = charset; this.collation = collation; } public Long signedLowerBound() throws TypeException { switch (this.getType()) { case TypeTiny: return MinInt8; case TypeShort: return MinInt16; case TypeInt24: return MinInt24; case TypeLong: return MinInt32; case TypeLonglong: return MinInt64; default: throw new TypeException("Signed Lower Bound: Input Type is not a mysql SIGNED type"); } } public Long signedUpperBound() throws TypeException { switch (this.getType()) { case TypeTiny: return MaxInt8; case TypeShort: return MaxInt16; case TypeInt24: return MaxInt24; case TypeLong: return MaxInt32; case TypeLonglong: return MaxInt64; default: throw new TypeException("Signed Upper Bound: Input Type is not a mysql SIGNED type"); } } public Long unsignedUpperBound() throws TypeException { switch (this.getType()) { case TypeTiny: return MaxUint8; case TypeShort: return MaxUint16; case TypeInt24: return MaxUint24; case TypeLong: return MaxUint32; case TypeLonglong: case TypeBit: case TypeEnum: case TypeSet: return MaxUint64; default: throw new TypeException("Unsigned Upper Bound: Input Type is not a mysql UNSIGNED type"); } } protected abstract Object decodeNotNull(int flag, CodecDataInput cdi); private int getFixLen() { switch (this.getType()) { case TypeFloat: return 4; case TypeTiny: case TypeShort: case TypeInt24: case TypeLong: case TypeLonglong: case TypeDouble: case TypeYear: case TypeDuration: case TypeTimestamp: case TypeDate: case TypeDatetime: return 8; case TypeDecimal: throw new UnsupportedOperationException( "this should not get involved in calculation process"); case TypeNewDecimal: return 40; default: return -1; } } private byte[] setAllNotNull(int numNullBitMapBytes) { byte[] nullBitMaps = new byte[numNullBitMapBytes]; for (int i = 0; i < numNullBitMapBytes; ) { // allNotNullBitNMap's actual length int numAppendBytes = Math.min(numNullBitMapBytes - i, 128); if (numAppendBytes >= 0) System.arraycopy(allNotNullBitMap, 0, nullBitMaps, i, numAppendBytes); i += numAppendBytes; } return nullBitMaps; } private final byte[] allNotNullBitMap = initAllNotNullBitMap(); private byte[] initAllNotNullBitMap() { byte[] allNotNullBitMap = new byte[128]; Arrays.fill(allNotNullBitMap, (byte) 0xFF); return allNotNullBitMap; } private int readIntLittleEndian(CodecDataInput cdi) { int ch1 = cdi.readUnsignedByte(); int ch2 = cdi.readUnsignedByte(); int ch3 = cdi.readUnsignedByte(); int ch4 = cdi.readUnsignedByte(); return ((ch1) + (ch2 << 8) + (ch3 << 16) + (ch4 << 24)); } private final byte[] readBuffer = new byte[8]; private long readLongLittleEndian(CodecDataInput cdi) { cdi.readFully(readBuffer, 0, 8); return ((readBuffer[0] & 255) + ((readBuffer[1] & 255) << 8) + ((readBuffer[2] & 255) << 16) + ((readBuffer[3] & 255) << 24) + ((long) (readBuffer[4] & 255) << 32) + ((long) (readBuffer[5] & 255) << 40) + ((long) (readBuffer[6] & 255) << 48) + ((long) (readBuffer[7] & 255) << 56)); } // all data should be read in little endian. public TiChunkColumnVector decodeChunkColumn(CodecDataInput cdi) { int numRows = readIntLittleEndian(cdi); int numNulls = readIntLittleEndian(cdi); assert (numRows >= 0) && (numNulls >= 0); int numNullBitmapBytes = (numRows + 7) / 8; byte[] nullBitMaps = new byte[numNullBitmapBytes]; if (numNulls > 0) { cdi.readFully(nullBitMaps); } else { nullBitMaps = setAllNotNull(numNullBitmapBytes); } int numFixedBytes = getFixLen(); int numDataBytes = numFixedBytes * numRows; int numOffsets; long[] offsets = null; // handle var element if (numFixedBytes == -1) { numOffsets = numRows + 1; // read numOffsets * 8 bytes array // and convert bytes to int64 offsets = new long[numOffsets]; for (int i = 0; i < numOffsets; i++) { offsets[i] = readLongLittleEndian(cdi); } numDataBytes = (int) offsets[numRows]; } // TODO this costs a lot, we need to find a way to avoid. byte[] dataBuffer = new byte[numDataBytes]; cdi.readFully(dataBuffer); ByteBuffer buffer = ByteBuffer.wrap(dataBuffer); buffer.order(LITTLE_ENDIAN); return new TiChunkColumnVector( this, numFixedBytes, numRows, numNulls, nullBitMaps, offsets, buffer); } /** * decode value from row which is nothing. * * @param cdi source of data. */ public Object decode(CodecDataInput cdi) { int flag = cdi.readUnsignedByte(); if (isNullFlag(flag)) { return null; } return decodeNotNull(flag, cdi); } public boolean isNextNull(CodecDataInput cdi) { return isNullFlag(cdi.peekByte()); } public static void encodeMaxValue(CodecDataOutput cdo) { cdo.writeByte(Codec.MAX_FLAG); } public static void encodeNull(CodecDataOutput cdo) { cdo.writeByte(Codec.NULL_FLAG); } public static void encodeIndex(CodecDataOutput cdo) { cdo.writeByte(Codec.BYTES_FLAG); } /** * encode a Row to CodecDataOutput * * @param cdo destination of data. * @param encodeType Key or Value. * @param value value to be encoded. */ public void encode(CodecDataOutput cdo, EncodeType encodeType, Object value) { requireNonNull(cdo, "cdo is null"); if (value == null) { if (encodeType != EncodeType.PROTO) { encodeNull(cdo); } } else { switch (encodeType) { case KEY: encodeKey(cdo, value); return; case VALUE: encodeValue(cdo, value); return; case PROTO: encodeProto(cdo, value); return; default: throw new TypeException("Unknown encoding type " + encodeType); } } } /** * Convert from Spark SQL Supported Java Type to TiDB Type * * <p>1. data convert, e.g. Integer -> SHORT * * <p>2. check overflow, e.g. write 1000 to short * * <p>Spark SQL only support following types: * * <p>1. BooleanType -> java.lang.Boolean 2. ByteType -> java.lang.Byte 3. ShortType -> * java.lang.Short 4. IntegerType -> java.lang.Integer 5. LongType -> java.lang.Long 6. FloatType * -> java.lang.Float 7. DoubleType -> java.lang.Double 8. StringType -> String 9. DecimalType -> * java.math.BigDecimal 10. DateType -> java.sql.Date 11. TimestampType -> java.sql.Timestamp 12. * BinaryType -> byte array 13. ArrayType -> scala.collection.Seq (use getList for java.util.List) * 14. MapType -> scala.collection.Map (use getJavaMap for java.util.Map) 15. StructType -> * org.apache.spark.sql.Row * * @param value * @return * @throws ConvertNotSupportException * @throws ConvertOverflowException */ public Object convertToTiDBType(Object value) throws ConvertNotSupportException, ConvertOverflowException { if (value == null) { return null; } else { return doConvertToTiDBType(value); } } protected abstract Object doConvertToTiDBType(Object value) throws ConvertNotSupportException, ConvertOverflowException; protected abstract void encodeKey(CodecDataOutput cdo, Object value); protected abstract void encodeValue(CodecDataOutput cdo, Object value); protected abstract void encodeProto(CodecDataOutput cdo, Object value); public abstract String getName(); /** * encode a Key's prefix to CodecDataOutput * * @param cdo destination of data. * @param value value to be encoded. * @param prefixLength specifies prefix length of value to be encoded. When prefixLength is * DataType.UNSPECIFIED_LEN, encode full length of value. */ public void encodeKey(CodecDataOutput cdo, Object value, int prefixLength) { requireNonNull(cdo, "cdo is null"); if (value == null) { encodeNull(cdo); } else if (DataType.isLengthUnSpecified(prefixLength)) { encodeKey(cdo, value); } else if (isPrefixIndexSupported()) { byte[] bytes; // When charset is utf8/utf8mb4, prefix length should be the number of utf8 characters // rather than length of its encoded byte value. if (getCharset().equalsIgnoreCase("utf8") || getCharset().equalsIgnoreCase("utf8mb4")) { bytes = Converter.convertUtf8ToBytes(value, prefixLength); } else { bytes = Converter.convertToBytes(value, prefixLength); } Codec.BytesCodec.writeBytesFully(cdo, bytes); } else { throw new TypeException("Data type can not encode with prefix"); } } /** * Indicates whether a data type supports prefix index * * @return returns true iff the type is BytesType */ protected boolean isPrefixIndexSupported() { return false; } public abstract ExprType getProtoExprType(); /** * get origin default value * * @param value a int value represents in string * @return a int object */ public abstract Object getOriginDefaultValueNonNull(String value, long version); /** @return true if this type can be pushed down to TiKV or TiFLASH */ public boolean isPushDownSupported() { return true; } public Object getOriginDefaultValue(String value, long version) { if (value == null) return null; return getOriginDefaultValueNonNull(value, version); } public int getCollationCode() { return collation; } public long getLength() { return length; } long getDefaultDataSize() { return tp.getDefaultSize(); } long getPrefixSize() { return tp.getPrefixSize(); } public int getDefaultLength() { return tp.getDefaultLength(); } /** * Size of data type * * @return size */ public long getSize() { // TiDB types are prepended with a type flag. return getPrefixSize() + getDefaultDataSize(); } public boolean isLengthUnSpecified() { return DataType.isLengthUnSpecified(length); } public boolean isDecimalUnSpecified() { return DataType.isLengthUnSpecified(decimal); } public int getDecimal() { return decimal; } public int getFlag() { return flag; } public List<String> getElems() { return this.elems; } public int getTypeCode() { return tp.getTypeCode(); } public MySQLType getType() { return tp; } public String getCharset() { return charset; } public boolean isPrimaryKey() { return (flag & PriKeyFlag) > 0; } public boolean isNotNull() { return (flag & NotNullFlag) > 0; } public boolean isNoDefault() { return (flag & NoDefaultValueFlag) > 0; } public boolean isAutoIncrement() { return (flag & AutoIncrementFlag) > 0; } public boolean isZeroFill() { return (flag & ZerofillFlag) > 0; } public boolean isBinary() { return (flag & BinaryFlag) > 0; } public boolean isUniqueKey() { return (flag & UniqueKeyFlag) > 0; } public boolean isMultiKey() { return (flag & MultipleKeyFlag) > 0; } public boolean isTimestamp() { return (flag & TimestampFlag) > 0; } public boolean isOnUpdateNow() { return (flag & OnUpdateNowFlag) > 0; } public boolean isBlob() { return (flag & BlobFlag) > 0; } public boolean isEnum() { return (flag & EnumFlag) > 0; } public boolean isSet() { return (flag & SetFlag) > 0; } public boolean isNum() { return (flag & NumFlag) > 0; } public boolean isUnsigned() { return (flag & UnsignedFlag) > 0; } public static boolean isLengthUnSpecified(long length) { return length == UNSPECIFIED_LEN; } @Override public String toString() { return String.format("%s:%s", this.getClass().getSimpleName(), getType()); } @Override public boolean equals(Object other) { if (other instanceof DataType) { DataType otherType = (DataType) other; // tp implies Class is the same // and that might not always hold // TODO: reconsider design here return tp == otherType.tp && flag == otherType.flag && decimal == otherType.decimal && (charset != null && charset.equals(otherType.charset)) && collation == otherType.collation && length == otherType.length && elems.equals(otherType.elems); } return false; } @Override public int hashCode() { return (int) (31 * (tp.getTypeCode() == 0 ? 1 : tp.getTypeCode()) * (flag == 0 ? 1 : flag) * (decimal == 0 ? 1 : decimal) * (charset == null ? 1 : charset.hashCode()) * (collation == 0 ? 1 : collation) * (length == 0 ? 1 : length) * (elems.hashCode())); } public InternalTypeHolder toTypeHolder() { return new InternalTypeHolder( getTypeCode(), flag, length, decimal, charset, Collation.translate(collation), elems); } }
1
12,373
how about rename to shouldNarrowDataTypeTo?
pingcap-tispark
java
@@ -33,6 +33,9 @@ from .util import bokeh_version Store.renderers['bokeh'] = BokehRenderer.instance() +if len(Store.renderers) == 1: + Store.current_backend = 'bokeh' + associations = {Overlay: OverlayPlot, NdOverlay: OverlayPlot, GridSpace: GridPlot,
1
import numpy as np from ...core import (Store, Overlay, NdOverlay, Layout, AdjointLayout, GridSpace, GridMatrix, NdLayout) from ...element import (Curve, Points, Scatter, Image, Raster, Path, RGB, Histogram, Spread, HeatMap, Contours, Bars, Box, Bounds, Ellipse, Polygons, BoxWhisker, ErrorBars, Text, HLine, VLine, Spline, Spikes, Table, ItemTable, Area, HSV, QuadMesh, VectorField) from ...core.options import Options, Cycle try: from ...interface import DFrame except: DFrame = None from ..plot import PlotSelector from .annotation import TextPlot, LineAnnotationPlot, SplinePlot from .callbacks import Callback # noqa (API import) from .element import OverlayPlot from .chart import (PointPlot, CurvePlot, SpreadPlot, ErrorPlot, HistogramPlot, SideHistogramPlot, BoxPlot, BarPlot, SpikesPlot, SideSpikesPlot, AreaPlot, VectorFieldPlot) from .path import PathPlot, PolygonPlot from .plot import GridPlot, LayoutPlot, AdjointLayoutPlot from .raster import (RasterPlot, RGBPlot, HeatmapPlot, HSVPlot, QuadMeshPlot) from .renderer import BokehRenderer from .tabular import TablePlot from .util import bokeh_version Store.renderers['bokeh'] = BokehRenderer.instance() associations = {Overlay: OverlayPlot, NdOverlay: OverlayPlot, GridSpace: GridPlot, GridMatrix: GridPlot, AdjointLayout: AdjointLayoutPlot, Layout: LayoutPlot, NdLayout: LayoutPlot, # Charts Curve: CurvePlot, Points: PointPlot, Scatter: PointPlot, ErrorBars: ErrorPlot, Spread: SpreadPlot, Spikes: SpikesPlot, Area: AreaPlot, VectorField: VectorFieldPlot, # Rasters Image: RasterPlot, RGB: RGBPlot, HSV: HSVPlot, Raster: RasterPlot, HeatMap: HeatmapPlot, Histogram: HistogramPlot, QuadMesh: QuadMeshPlot, # Paths Path: PathPlot, Contours: PathPlot, Path: PathPlot, Box: PathPlot, Bounds: PathPlot, Ellipse: PathPlot, Polygons: PolygonPlot, # Annotations HLine: LineAnnotationPlot, VLine: LineAnnotationPlot, Text: TextPlot, Spline: SplinePlot, # Tabular Table: TablePlot, ItemTable: TablePlot} if DFrame is not None: associations[DFrame] = TablePlot Store.register(associations, 'bokeh') AdjointLayoutPlot.registry[Histogram] = SideHistogramPlot AdjointLayoutPlot.registry[Spikes] = SideSpikesPlot try: import pandas # noqa (Conditional import) Store.register({BoxWhisker: BoxPlot, Bars: BarPlot}, 'bokeh') except ImportError: pass point_size = np.sqrt(6) # Matches matplotlib default Cycle.default_cycles['default_colors'] = ['#30a2da', '#fc4f30', '#e5ae38', '#6d904f', '#8b8b8b'] options = Store.options(backend='bokeh') # Charts options.Curve = Options('style', color=Cycle(), line_width=2) options.Scatter = Options('style', color=Cycle(), size=point_size, cmap='hot') options.Points = Options('style', color=Cycle(), size=point_size, cmap='hot') options.Histogram = Options('style', line_color='black', fill_color=Cycle()) options.ErrorBars = Options('style', color='black') options.Spread = Options('style', color=Cycle(), alpha=0.6, line_color='black') options.Spikes = Options('style', color='black') options.Area = Options('style', color=Cycle(), line_color='black') options.VectorField = Options('style', color='black') # Paths options.Contours = Options('style', color=Cycle()) options.Path = Options('style', color=Cycle()) options.Box = Options('style', color='black') options.Bounds = Options('style', color='black') options.Ellipse = Options('style', color='black') options.Polygons = Options('style', color=Cycle(), line_color='black') # Rasters options.Image = Options('style', cmap='hot') options.GridImage = Options('style', cmap='hot') options.Raster = Options('style', cmap='hot') options.QuadMesh = Options('style', cmap='hot', line_alpha=0) options.HeatMap = Options('style', cmap='RdYlBu_r', line_alpha=0) # Annotations options.HLine = Options('style', color=Cycle(), line_width=3, alpha=1) options.VLine = Options('style', color=Cycle(), line_width=3, alpha=1) # Define composite defaults options.GridMatrix = Options('plot', shared_xaxis=True, shared_yaxis=True, xaxis=None, yaxis=None) if bokeh_version >= '0.12.5': options.Overlay = Options('style', click_policy='mute') options.NdOverlay = Options('style', click_policy='mute') options.Curve = Options('style', muted_alpha=0.2) options.Path = Options('style', muted_alpha=0.2) options.Scatter = Options('style', muted_alpha=0.2) options.Points = Options('style', muted_alpha=0.2) options.Polygons = Options('style', muted_alpha=0.2)
1
17,175
Why not check that the one available renderer is 'bokeh'?
holoviz-holoviews
py